diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index c36b13bf..d5564f33 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -2,7 +2,7 @@ name: Main (test, releases)
on:
# # Indicates I want to run this workflow on all branches, PR, and tags
push:
- branches: ["*"]
+ branches: ["master"]
tags: ["*"]
pull_request:
branches: [ "master" ]
diff --git a/.gitignore b/.gitignore
index 544851cf..dea93704 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,6 +4,7 @@
example.tf
terraform.tfplan
terraform.tfstate
+.terraform.lock*
terraform-provider-mysql
bin/
modules-dev/
diff --git a/.goreleaser.yml b/.goreleaser.yml
index 37ff8f20..63ad53a4 100644
--- a/.goreleaser.yml
+++ b/.goreleaser.yml
@@ -3,8 +3,10 @@
version: 2
before:
hooks:
- # this is just an example and not a requirement for provider building/publishing
- go mod tidy
+ # Disabled until auto-generated docs and manual docs operate peacefully
+ # - go generate ./...
+
builds:
- env:
# goreleaser does not work with CGO, it could also complicate
diff --git a/GNUmakefile b/GNUmakefile
index 8f9530be..0e68193b 100644
--- a/GNUmakefile
+++ b/GNUmakefile
@@ -6,10 +6,30 @@ TERRAFORM_VERSION=0.14.7
TERRAFORM_OS=$(shell uname -s | tr A-Z a-z)
TEST_USER=root
TEST_PASSWORD=my-secret-pw
-DATESTAMP=$(shell date "+%Y%m%d%H%M%S")
+DATESTAMP=$(shell date "+%Y%m%d")
SHA_SHORT=$(shell git describe --match=FORCE_NEVER_MATCH --always --abbrev=40 --dirty --abbrev)
MOST_RECENT_UPSTREAM_TAG=$(shell git for-each-ref refs/tags --sort=-taggerdate --format="%(refname)" | head -1 | grep -E -o "v\d+\.\d+\.\d+")
+OS_ARCH=linux_amd64
+# Set correct OS_ARCH on Mac
+UNAME := $(shell uname -s)
+ifeq ($(UNAME),Darwin)
+ HW := $(shell uname -m)
+ ifeq ($(HW),arm64)
+ ARCH=$(HW)
+ else
+ ARCH=amd64
+ endif
+ OS_ARCH=darwin_$(ARCH)
+endif
+
+HOSTNAME=registry.terraform.io
+NAMESPACE=zph
+NAME=mysql
+VERSION=9.9.9
+## on linux base os
+TERRAFORM_PLUGINS_DIRECTORY=~/.terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${NAME}/${VERSION}/${OS_ARCH}
+
default: build
build: fmtcheck
@@ -121,6 +141,19 @@ endif
@$(MAKE) -C $(GOPATH)/src/$(WEBSITE_REPO) website-provider PROVIDER_PATH=$(shell pwd) PROVIDER_NAME=$(PKG_NAME)
+install:
+ mkdir -p ${TERRAFORM_PLUGINS_DIRECTORY}
+ go build -o ${TERRAFORM_PLUGINS_DIRECTORY}/terraform-provider-${NAME}
+ cd examples && rm -rf .terraform
+ cd examples && make init
+
+re-install:
+ rm -f examples/.terraform.lock.hcl
+ rm -f ${TERRAFORM_PLUGINS_DIRECTORY}/terraform-provider-${NAME}
+ go build -o ${TERRAFORM_PLUGINS_DIRECTORY}/terraform-provider-${NAME}
+ cd examples && rm -rf .terraform
+ cd examples && terraform init
+
format-tag:
@echo $(MOST_RECENT_UPSTREAM_TAG)-$(DATESTAMP)-$(SHA_SHORT)
@@ -128,5 +161,6 @@ tag:
@echo git tag -a $(MOST_RECENT_UPSTREAM_TAG)-$(DATESTAMP)-$(SHA_SHORT) -m $(MOST_RECENT_UPSTREAM_TAG)-$(DATESTAMP)-$(SHA_SHORT)
@git tag -a $(MOST_RECENT_UPSTREAM_TAG)-$(DATESTAMP)-$(SHA_SHORT) -m $(MOST_RECENT_UPSTREAM_TAG)-$(DATESTAMP)-$(SHA_SHORT)
-
-.PHONY: build test testacc vet fmt fmtcheck errcheck vendor-status test-compile website website-test tag
+release:
+ @goreleaser release --clean --verbose
+.PHONY: build test testacc vet fmt fmtcheck errcheck vendor-status test-compile website website-test tag format-tag
diff --git a/README.md b/README.md
index 61851fc7..55d4d67d 100644
--- a/README.md
+++ b/README.md
@@ -13,7 +13,7 @@ fragmentation. We will update this readme if those design choices change.
zph fork will use release naming in the following form:
-v3.0.62-20240705125429-3c7af6a
+v3.0.62-20240705-3c7af6a
{petoju version}-{date}-{sha}
diff --git a/docs/data-sources/databases.md b/docs/data-sources/databases.md
new file mode 100644
index 00000000..f66a7cdd
--- /dev/null
+++ b/docs/data-sources/databases.md
@@ -0,0 +1,25 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "mysql_databases Data Source - terraform-provider-mysql"
+subcategory: ""
+description: |-
+
+---
+
+# mysql_databases (Data Source)
+
+
+
+
+
+
+## Schema
+
+### Optional
+
+- `pattern` (String)
+
+### Read-Only
+
+- `databases` (List of String)
+- `id` (String) The ID of this resource.
diff --git a/docs/data-sources/tables.md b/docs/data-sources/tables.md
new file mode 100644
index 00000000..fa0546ad
--- /dev/null
+++ b/docs/data-sources/tables.md
@@ -0,0 +1,29 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "mysql_tables Data Source - terraform-provider-mysql"
+subcategory: ""
+description: |-
+
+---
+
+# mysql_tables (Data Source)
+
+
+
+
+
+
+## Schema
+
+### Required
+
+- `database` (String)
+
+### Optional
+
+- `pattern` (String)
+
+### Read-Only
+
+- `id` (String) The ID of this resource.
+- `tables` (List of String)
diff --git a/website/docs/index.html.markdown b/docs/index.md
similarity index 100%
rename from website/docs/index.html.markdown
rename to docs/index.md
diff --git a/website/docs/r/database.html.markdown b/docs/resources/database.md
similarity index 100%
rename from website/docs/r/database.html.markdown
rename to docs/resources/database.md
diff --git a/website/docs/r/default_roles.html.markdown b/docs/resources/default_roles.md
similarity index 100%
rename from website/docs/r/default_roles.html.markdown
rename to docs/resources/default_roles.md
diff --git a/website/docs/r/global_variable.html.markdown b/docs/resources/global_variable.md
similarity index 100%
rename from website/docs/r/global_variable.html.markdown
rename to docs/resources/global_variable.md
diff --git a/website/docs/r/grant.html.markdown b/docs/resources/grant.md
similarity index 100%
rename from website/docs/r/grant.html.markdown
rename to docs/resources/grant.md
diff --git a/website/docs/r/rds_config.html.markdown b/docs/resources/rds_config.md
similarity index 100%
rename from website/docs/r/rds_config.html.markdown
rename to docs/resources/rds_config.md
diff --git a/website/docs/r/role.html.markdown b/docs/resources/role.md
similarity index 100%
rename from website/docs/r/role.html.markdown
rename to docs/resources/role.md
diff --git a/docs/resources/sql.md b/docs/resources/sql.md
new file mode 100644
index 00000000..3ae5d64d
--- /dev/null
+++ b/docs/resources/sql.md
@@ -0,0 +1,26 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "mysql_sql Resource - terraform-provider-mysql"
+subcategory: ""
+description: |-
+
+---
+
+# mysql_sql (Resource)
+
+
+
+
+
+
+## Schema
+
+### Required
+
+- `create_sql` (String)
+- `delete_sql` (String)
+- `name` (String)
+
+### Read-Only
+
+- `id` (String) The ID of this resource.
diff --git a/docs/resources/ti_config.md b/docs/resources/ti_config.md
new file mode 100644
index 00000000..abee6062
--- /dev/null
+++ b/docs/resources/ti_config.md
@@ -0,0 +1,30 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "mysql_ti_config Resource - terraform-provider-mysql"
+subcategory: ""
+description: |-
+
+---
+
+# mysql_ti_config (Resource)
+
+
+
+
+
+
+## Schema
+
+### Required
+
+- `name` (String)
+- `type` (String)
+- `value` (String)
+
+### Optional
+
+- `instance` (String)
+
+### Read-Only
+
+- `id` (String) The ID of this resource.
diff --git a/docs/resources/ti_resource_group.md b/docs/resources/ti_resource_group.md
new file mode 100644
index 00000000..d78a4c4d
--- /dev/null
+++ b/docs/resources/ti_resource_group.md
@@ -0,0 +1,31 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "mysql_ti_resource_group Resource - terraform-provider-mysql"
+subcategory: ""
+description: |-
+
+---
+
+# mysql_ti_resource_group (Resource)
+
+
+
+
+
+
+## Schema
+
+### Required
+
+- `name` (String)
+- `resource_units` (Number)
+
+### Optional
+
+- `burstable` (Boolean)
+- `priority` (String)
+- `query_limit` (String)
+
+### Read-Only
+
+- `id` (String) The ID of this resource.
diff --git a/docs/resources/ti_resource_group_user_assignment.md b/docs/resources/ti_resource_group_user_assignment.md
new file mode 100644
index 00000000..79bc3bcc
--- /dev/null
+++ b/docs/resources/ti_resource_group_user_assignment.md
@@ -0,0 +1,25 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "mysql_ti_resource_group_user_assignment Resource - terraform-provider-mysql"
+subcategory: ""
+description: |-
+
+---
+
+# mysql_ti_resource_group_user_assignment (Resource)
+
+
+
+
+
+
+## Schema
+
+### Required
+
+- `resource_group` (String)
+- `user` (String)
+
+### Read-Only
+
+- `id` (String) The ID of this resource.
diff --git a/website/docs/r/user.html.markdown b/docs/resources/user.md
similarity index 100%
rename from website/docs/r/user.html.markdown
rename to docs/resources/user.md
diff --git a/website/docs/r/user_password.html.markdown b/docs/resources/user_password.md
similarity index 100%
rename from website/docs/r/user_password.html.markdown
rename to docs/resources/user_password.md
diff --git a/examples/tidb/main.tf b/examples/tidb/main.tf
new file mode 100644
index 00000000..7a1b0427
--- /dev/null
+++ b/examples/tidb/main.tf
@@ -0,0 +1,17 @@
+terraform {
+ required_version = ">= 1.5.7"
+
+ required_providers {
+ mongodb = {
+ source = "registry.terraform.io/zph/mysql"
+ version = "9.9.9"
+ }
+ }
+}
+
+provider "mysql" {
+ endpoint = "localhost:4000"
+ username = "root"
+ #alias = "tidb"
+ #password = "admin"
+}
diff --git a/examples/tidb/resource_groups/Makefile b/examples/tidb/resource_groups/Makefile
new file mode 100644
index 00000000..68677b63
--- /dev/null
+++ b/examples/tidb/resource_groups/Makefile
@@ -0,0 +1,6 @@
+re-install:
+ @cd ../../.. && \
+ make re-install && \
+ cd - && \
+ rm -rf .terraform* && \
+ terraform init
diff --git a/examples/tidb/resource_groups/main.tf b/examples/tidb/resource_groups/main.tf
new file mode 100644
index 00000000..e4a66334
--- /dev/null
+++ b/examples/tidb/resource_groups/main.tf
@@ -0,0 +1,40 @@
+terraform {
+ required_version = ">= 1.5.7"
+
+ required_providers {
+ mysql = {
+ source = "registry.terraform.io/zph/mysql"
+ version = "9.9.9"
+ }
+ }
+}
+
+provider "mysql" {
+ endpoint = "127.0.0.1:4000"
+
+ username = "root"
+}
+
+resource "mysql_ti_resource_group" "rg1" {
+ name = "rg1"
+ resource_units = 4000
+}
+
+resource "mysql_ti_resource_group" "rg2" {
+ name = "rg2"
+ resource_units = 2000
+ burstable = true
+ priority = "high"
+}
+
+resource "mysql_ti_resource_group_user_assignment" "rg1_user1" {
+ user = "user1"
+ resource_group = mysql_ti_resource_group.rg1.name
+ depends_on = [ mysql_ti_resource_group.rg1 ]
+}
+
+resource "mysql_ti_resource_group_user_assignment" "rg111_rg" {
+ user = "user2"
+ resource_group = mysql_ti_resource_group.rg1.name
+ depends_on = [ mysql_ti_resource_group.rg1 ]
+}
diff --git a/go.mod b/go.mod
index fccf733f..6ba864e1 100644
--- a/go.mod
+++ b/go.mod
@@ -8,6 +8,8 @@ require (
github.com/go-sql-driver/mysql v1.8.1
github.com/gofrs/uuid v4.4.0+incompatible
github.com/hashicorp/go-version v1.7.0
+ github.com/hashicorp/terraform-plugin-docs v0.19.4
+ github.com/hashicorp/terraform-plugin-log v0.9.0
github.com/hashicorp/terraform-plugin-sdk/v2 v2.34.0
github.com/tidwall/gjson v1.17.1
golang.org/x/net v0.26.0
@@ -22,9 +24,17 @@ require (
filippo.io/edwards25519 v1.1.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
+ github.com/BurntSushi/toml v1.2.1 // indirect
+ github.com/Kunde21/markdownfmt/v3 v3.1.0 // indirect
+ github.com/Masterminds/goutils v1.1.1 // indirect
+ github.com/Masterminds/semver/v3 v3.2.0 // indirect
+ github.com/Masterminds/sprig/v3 v3.2.3 // indirect
github.com/ProtonMail/go-crypto v1.1.0-alpha.2-proton // indirect
github.com/agext/levenshtein v1.2.3 // indirect
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
+ github.com/armon/go-radix v1.0.0 // indirect
+ github.com/bgentry/speakeasy v0.1.0 // indirect
+ github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect
github.com/cloudflare/circl v1.3.8 // indirect
github.com/fatih/color v1.17.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
@@ -38,6 +48,7 @@ require (
github.com/google/uuid v1.6.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.5 // indirect
+ github.com/hashicorp/cli v1.1.6 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-checkpoint v0.5.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
@@ -46,19 +57,21 @@ require (
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-plugin v1.6.1 // indirect
github.com/hashicorp/go-uuid v1.0.3 // indirect
- github.com/hashicorp/hc-install v0.6.4 // indirect
+ github.com/hashicorp/hc-install v0.7.0 // indirect
github.com/hashicorp/hcl/v2 v2.21.0 // indirect
github.com/hashicorp/logutils v1.0.0 // indirect
github.com/hashicorp/terraform-exec v0.21.0 // indirect
github.com/hashicorp/terraform-json v0.22.1 // indirect
github.com/hashicorp/terraform-plugin-go v0.23.0 // indirect
- github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect
github.com/hashicorp/terraform-registry-address v0.2.3 // indirect
github.com/hashicorp/terraform-svchost v0.1.1 // indirect
github.com/hashicorp/yamux v0.1.1 // indirect
+ github.com/huandu/xstrings v1.3.3 // indirect
+ github.com/imdario/mergo v0.3.15 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
+ github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
@@ -66,18 +79,25 @@ require (
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/oklog/run v1.1.0 // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
+ github.com/posener/complete v1.2.3 // indirect
+ github.com/shopspring/decimal v1.3.1 // indirect
+ github.com/spf13/cast v1.5.0 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.1 // indirect
github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
+ github.com/yuin/goldmark v1.7.1 // indirect
+ github.com/yuin/goldmark-meta v1.1.0 // indirect
github.com/zclconf/go-cty v1.14.4 // indirect
+ go.abhg.dev/goldmark/frontmatter v0.2.0 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect
go.opentelemetry.io/otel v1.27.0 // indirect
go.opentelemetry.io/otel/metric v1.27.0 // indirect
go.opentelemetry.io/otel/trace v1.27.0 // indirect
golang.org/x/crypto v0.24.0 // indirect
+ golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df // indirect
golang.org/x/mod v0.18.0 // indirect
golang.org/x/sync v0.7.0 // indirect
golang.org/x/sys v0.21.0 // indirect
@@ -88,6 +108,8 @@ require (
google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 // indirect
google.golang.org/grpc v1.64.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
+ gopkg.in/yaml.v2 v2.3.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
)
go 1.21.11
diff --git a/go.sum b/go.sum
index a04fb75b..eab84942 100644
--- a/go.sum
+++ b/go.sum
@@ -1,5 +1,4 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14=
cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw=
cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s=
cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4=
@@ -21,6 +20,16 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v1.9.0/go.mod h1:mgrmMSgaLp9hmax6
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
+github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/Kunde21/markdownfmt/v3 v3.1.0 h1:KiZu9LKs+wFFBQKhrZJrFZwtLnCCWJahL+S+E/3VnM0=
+github.com/Kunde21/markdownfmt/v3 v3.1.0/go.mod h1:tPXN1RTyOzJwhfHoon9wUr4HGYmWgVxSQN6VBJDkrVc=
+github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
+github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
+github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
+github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
+github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA=
+github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/ProtonMail/go-crypto v1.1.0-alpha.2-proton h1:HKz85FwoXx86kVtTvFke7rgHvq/HoloSUvW5semjFWs=
@@ -30,6 +39,12 @@ github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki
github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec=
github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY=
github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4=
+github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
+github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I=
+github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA=
github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
@@ -55,6 +70,8 @@ github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4=
github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
+github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=
github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU=
@@ -108,6 +125,7 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -115,6 +133,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA=
github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E=
+github.com/hashicorp/cli v1.1.6 h1:CMOV+/LJfL1tXCOKrgAX0uRKnzjj/mpmqNXloRSy2K8=
+github.com/hashicorp/cli v1.1.6/go.mod h1:MPon5QYlgjjo0BSoAiN0ESeT5fRzDjVRp+uioJ0piz4=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@@ -127,6 +147,7 @@ github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUK
github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs=
github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=
github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-plugin v1.6.1 h1:P7MR2UP6gNKGPp+y7EZw2kOiq4IR9WiqLvp0XOsVdwI=
@@ -136,8 +157,8 @@ github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/C
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/hc-install v0.6.4 h1:QLqlM56/+SIIGvGcfFiwMY3z5WGXT066suo/v9Km8e0=
-github.com/hashicorp/hc-install v0.6.4/go.mod h1:05LWLy8TD842OtgcfBbOT0WMoInBMUSHjmDx10zuBIA=
+github.com/hashicorp/hc-install v0.7.0 h1:Uu9edVqjKQxxuD28mR5TikkKDd/p55S8vzPC1659aBk=
+github.com/hashicorp/hc-install v0.7.0/go.mod h1:ELmmzZlGnEcqoUMKUuykHaPCIR1sYLYX+KSggWSKZuA=
github.com/hashicorp/hcl/v2 v2.21.0 h1:lve4q/o/2rqwYOgUg3y3V2YPyD1/zkCLGjIV74Jit14=
github.com/hashicorp/hcl/v2 v2.21.0/go.mod h1:62ZYHrXgPoX8xBnzl8QzbWq4dyDsDtfCRgIq1rbJEvA=
github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=
@@ -146,6 +167,8 @@ github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVW
github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg=
github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec=
github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A=
+github.com/hashicorp/terraform-plugin-docs v0.19.4 h1:G3Bgo7J22OMtegIgn8Cd/CaSeyEljqjH3G39w28JK4c=
+github.com/hashicorp/terraform-plugin-docs v0.19.4/go.mod h1:4pLASsatTmRynVzsjEhbXZ6s7xBlUw/2Kt0zfrq8HxA=
github.com/hashicorp/terraform-plugin-go v0.23.0 h1:AALVuU1gD1kPb48aPQUjug9Ir/125t+AAurhqphJ2Co=
github.com/hashicorp/terraform-plugin-go v0.23.0/go.mod h1:1E3Cr9h2vMlahWMbsSEcNrOCxovCZhOOIXjFHbjc/lQ=
github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0=
@@ -158,6 +181,11 @@ github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S
github.com/hashicorp/terraform-svchost v0.1.1/go.mod h1:mNsjQfZyf/Jhz35v6/0LWcv26+X7JPS+buii2c9/ctc=
github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE=
github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ=
+github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4=
+github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
+github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w=
@@ -202,8 +230,11 @@ github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27k
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
+github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/microsoft/go-mssqldb v1.7.2 h1:CHkFJiObW7ItKTJfHo1QX7QBBD1iV+mn1eOyRP3b/PA=
github.com/microsoft/go-mssqldb v1.7.2/go.mod h1:kOvZKUdrhhFQmxLZqbwUV0rHkNkZpthMITIb2Ko1IoA=
+github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU=
@@ -212,6 +243,7 @@ github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQ
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
@@ -222,16 +254,27 @@ github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmd
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo=
+github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
+github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
+github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A=
github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo=
+github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
+github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
@@ -255,10 +298,16 @@ github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/yuin/goldmark v1.7.1 h1:3bajkSilaCbjdKVsKdZjZCLBNPL9pYzrCakKaf4U49U=
+github.com/yuin/goldmark v1.7.1/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E=
+github.com/yuin/goldmark-meta v1.1.0 h1:pWw+JLHGZe8Rk0EGsMVssiNb/AaPMHfSRszZeUeiOUc=
+github.com/yuin/goldmark-meta v1.1.0/go.mod h1:U4spWENafuA7Zyg+Lj5RqK/MF+ovMYtBvXi1lBb2VP0=
github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8=
github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE=
github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940 h1:4r45xpDWB6ZMSMNJFMOjqrGHynW3DIBuR2H9j0ug+Mo=
github.com/zclconf/go-cty-debug v0.0.0-20240509010212-0d6042c53940/go.mod h1:CmBdvvj3nqzfzJ6nTCIwDTPZ56aVGvDrmztiO5g3qrM=
+go.abhg.dev/goldmark/frontmatter v0.2.0 h1:P8kPG0YkL12+aYk2yU3xHv4tcXzeVnN+gU0tJ5JnxRw=
+go.abhg.dev/goldmark/frontmatter v0.2.0/go.mod h1:XqrEkZuM57djk7zrlRUB02x8I5J0px76YjkOzhB4YlU=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A=
@@ -272,9 +321,12 @@ go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39S
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df h1:UA2aFVmmsIlefxMk29Dp2juaUSth8Pyn3Tq5Y5mJGME=
+golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
@@ -290,6 +342,7 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -316,16 +369,19 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
@@ -381,6 +437,9 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/main.go b/main.go
index ec777fb3..a4394dc1 100644
--- a/main.go
+++ b/main.go
@@ -1,5 +1,6 @@
package main
+//go:generate go run github.com/hashicorp/terraform-plugin-docs/cmd/tfplugindocs
import (
"github.com/hashicorp/terraform-plugin-sdk/v2/plugin"
"github.com/petoju/terraform-provider-mysql/v3/mysql"
diff --git a/mysql/provider.go b/mysql/provider.go
index 54339c53..eea8871a 100644
--- a/mysql/provider.go
+++ b/mysql/provider.go
@@ -246,16 +246,18 @@ func Provider() *schema.Provider {
},
ResourcesMap: map[string]*schema.Resource{
- "mysql_database": resourceDatabase(),
- "mysql_global_variable": resourceGlobalVariable(),
- "mysql_grant": resourceGrant(),
- "mysql_role": resourceRole(),
- "mysql_sql": resourceSql(),
- "mysql_user_password": resourceUserPassword(),
- "mysql_user": resourceUser(),
- "mysql_ti_config": resourceTiConfigVariable(),
- "mysql_rds_config": resourceRDSConfig(),
- "mysql_default_roles": resourceDefaultRoles(),
+ "mysql_database": resourceDatabase(),
+ "mysql_global_variable": resourceGlobalVariable(),
+ "mysql_grant": resourceGrant(),
+ "mysql_role": resourceRole(),
+ "mysql_sql": resourceSql(),
+ "mysql_user_password": resourceUserPassword(),
+ "mysql_user": resourceUser(),
+ "mysql_ti_config": resourceTiConfigVariable(),
+ "mysql_ti_resource_group": resourceTiResourceGroup(),
+ "mysql_ti_resource_group_user_assignment": resourceTiResourceGroupUserAssignment(),
+ "mysql_rds_config": resourceRDSConfig(),
+ "mysql_default_roles": resourceDefaultRoles(),
},
ConfigureContextFunc: providerConfigure,
diff --git a/mysql/resource_ti_resource_group.go b/mysql/resource_ti_resource_group.go
new file mode 100644
index 00000000..b7783b98
--- /dev/null
+++ b/mysql/resource_ti_resource_group.go
@@ -0,0 +1,243 @@
+package mysql
+
+import (
+ "context"
+ "database/sql"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
+)
+
+type ResourceGroup struct {
+ Name string
+ ResourceUnits int
+ Priority string
+ Burstable bool
+ QueryLimit string
+}
+
+var CreateResourceGroupSQLPrefix = "CREATE RESOURCE GROUP IF NOT EXISTS"
+var UpdateResourceGroupSQLPrefix = "ALTER RESOURCE GROUP"
+
+func (rg *ResourceGroup) buildSQLQuery(prefix string) string {
+ var query []string
+ baseQuery := fmt.Sprintf("%s %s RU_PER_SEC = %d", prefix, rg.Name, rg.ResourceUnits)
+ query = append(query, baseQuery)
+
+ query = append(query, fmt.Sprintf(`PRIORITY = %s`, rg.Priority))
+
+ if rg.QueryLimit != DefaultResourceGroup.QueryLimit {
+ query = append(query, fmt.Sprintf(`QUERY_LIMIT=%s`, rg.QueryLimit))
+
+ }
+
+ query = append(query, fmt.Sprintf(`BURSTABLE = %t`, rg.Burstable))
+ query = append(query, ";")
+
+ ctx := context.TODO()
+ tflog.SetField(ctx, "sql", query)
+ tflog.Debug(ctx, `buildSQLQuery`)
+ return strings.Join(query, " ")
+}
+
+var DefaultResourceGroup = ResourceGroup{
+ Name: "tfDefault",
+ Priority: "medium",
+ Burstable: false,
+ QueryLimit: "()",
+}
+
+func resourceTiResourceGroup() *schema.Resource {
+ return &schema.Resource{
+ CreateContext: CreateResourceGroup,
+ ReadContext: ReadResourceGroup,
+ UpdateContext: UpdateResourceGroup,
+ DeleteContext: DeleteResourceGroup,
+ Importer: &schema.ResourceImporter{
+ StateContext: schema.ImportStatePassthroughContext,
+ },
+ Schema: map[string]*schema.Schema{
+ "name": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ // TODO: allow a centralized way to check if there's capacity remaining to use
+ "resource_units": {
+ Type: schema.TypeInt,
+ Required: true,
+ },
+ "priority": {
+ Type: schema.TypeString,
+ Default: DefaultResourceGroup.Priority,
+ ForceNew: false,
+ ValidateFunc: validation.StringInSlice([]string{"high", "medium", "low"}, false),
+ Optional: true,
+ },
+ "burstable": {
+ Type: schema.TypeBool,
+ Default: DefaultResourceGroup.Burstable,
+ ForceNew: false,
+ Optional: true,
+ },
+ /*
+ QUERY_LIMIT=(EXEC_ELAPSED='60s', ACTION=KILL, WATCH=EXACT DURATION='10m')
+ */
+ "query_limit": {
+ Type: schema.TypeString,
+ Default: DefaultResourceGroup.QueryLimit,
+ ForceNew: false,
+ Optional: true,
+ },
+ },
+ }
+}
+
+func CreateResourceGroup(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
+ db, err := getDatabaseFromMeta(ctx, meta)
+ if err != nil {
+ return diag.FromErr(err)
+ }
+
+ rg := NewResourceGroupFromResourceData(d)
+
+ var warnLevel, warnMessage string
+ var warnCode int = 0
+
+ query := rg.buildSQLQuery(CreateResourceGroupSQLPrefix)
+ tflog.SetField(ctx, "query", query)
+ tflog.Debug(ctx, "SQL")
+
+ _, err = db.ExecContext(ctx, query)
+ if err != nil {
+ return diag.Errorf("error creating resource group (%s): %s", rg.Name, err)
+ }
+
+ db.QueryRowContext(ctx, "SHOW WARNINGS").Scan(&warnLevel, &warnCode, &warnMessage)
+ if warnCode != 0 {
+ return diag.Errorf("error setting value: %+v Error: %s", rg, warnMessage)
+ }
+
+ d.SetId(rg.Name)
+
+ return nil
+}
+
+func UpdateResourceGroup(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
+ db, err := getDatabaseFromMeta(ctx, meta)
+ if err != nil {
+ return diag.FromErr(err)
+ }
+
+ rg := NewResourceGroupFromResourceData(d)
+
+ var warnLevel, warnMessage string
+ var warnCode int = 0
+
+ query := rg.buildSQLQuery(UpdateResourceGroupSQLPrefix)
+
+ tflog.SetField(ctx, "query", query)
+ tflog.Debug(ctx, "SQL")
+
+ _, err = db.ExecContext(ctx, query)
+ if err != nil {
+ return diag.Errorf("error altering resource group (%s): %s", rg.Name, err)
+ }
+
+ db.QueryRowContext(ctx, "SHOW WARNINGS").Scan(&warnLevel, &warnCode, &warnMessage)
+ if warnCode != 0 {
+ return diag.Errorf("error setting value: %s -> %d Error: %s", rg.Name, rg.ResourceUnits, warnMessage)
+ }
+
+ d.SetId(rg.Name)
+
+ return nil
+}
+
+func ReadResourceGroup(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
+ db, err := getDatabaseFromMeta(ctx, meta)
+ if err != nil {
+ return diag.FromErr(err)
+ }
+
+ rg, err := getResourceGroupFromDB(db, d.Id())
+ if err != nil {
+ d.SetId("")
+ return diag.Errorf("error during get resource group (%s): %s", d.Id(), err)
+ }
+
+ if err != nil {
+ d.SetId("")
+ return diag.Errorf(`error converting burstable value from tidb %e`, err)
+ }
+
+ setResourceGroupOnResourceData(rg, d)
+ return nil
+}
+
+func DeleteResourceGroup(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
+ name := d.Get("name").(string)
+
+ db, err := getDatabaseFromMeta(ctx, meta)
+ if err != nil {
+ return diag.FromErr(err)
+ }
+ // TODO: check for users assigned as safety? and assert zero?
+ deleteQuery := fmt.Sprintf("DROP RESOURCE GROUP IF EXISTS %s", name)
+ _, err = db.Exec(deleteQuery)
+ if err != nil && !errors.Is(err, sql.ErrNoRows) {
+ return diag.Errorf("error during drop resource group (%s): %s", d.Id(), err)
+ }
+
+ d.SetId("")
+ return nil
+}
+
+func getResourceGroupFromDB(db *sql.DB, name string) (ResourceGroup, error) {
+ rg := ResourceGroup{Name: name}
+
+ /*
+ Coerce types on SQL side into good types for golang
+ Burstable is a varchar(3) so we coerce to BOOLEAN
+ QUERY_LIMIT is nullable in DB, but we coerce to standard "empty" string type of "()"
+ Lowercase priority for less configuration variability
+ */
+ query := `SELECT NAME, RU_PER_SEC, LOWER(PRIORITY), BURSTABLE = 'YES' as BURSTABLE, IFNULL(QUERY_LIMIT,"()") FROM information_schema.resource_groups WHERE NAME = ?`
+
+ ctx := context.Background()
+ tflog.SetField(ctx, "query", query)
+ tflog.Debug(ctx, "getResourceGroupFromDB")
+
+ err := db.QueryRow(query, name).Scan(&rg.Name, &rg.ResourceUnits, &rg.Priority, &rg.Burstable, &rg.QueryLimit)
+ if errors.Is(err, sql.ErrNoRows) {
+ return ResourceGroup{}, fmt.Errorf("resource group doesn't exist (%s): %s", name, err)
+ } else if err != nil {
+ return ResourceGroup{}, fmt.Errorf("error during get resource group (%s): %s", name, err)
+ }
+
+ return rg, nil
+}
+
+func NewResourceGroupFromResourceData(d *schema.ResourceData) ResourceGroup {
+ return ResourceGroup{
+ Name: d.Get("name").(string),
+ ResourceUnits: d.Get("resource_units").(int),
+ Priority: strings.ToUpper(d.Get("priority").(string)),
+ Burstable: d.Get("burstable").(bool),
+ QueryLimit: d.Get("query_limit").(string),
+ }
+}
+
+func setResourceGroupOnResourceData(rg ResourceGroup, d *schema.ResourceData) {
+ d.Set("name", rg.Name)
+ d.Set("resource_units", rg.ResourceUnits)
+ d.Set("priority", rg.Priority)
+ d.Set("burstable", rg.Burstable)
+ d.Set("query_limit", rg.QueryLimit)
+}
diff --git a/mysql/resource_ti_resource_group_test.go b/mysql/resource_ti_resource_group_test.go
new file mode 100644
index 00000000..cd4907d9
--- /dev/null
+++ b/mysql/resource_ti_resource_group_test.go
@@ -0,0 +1,237 @@
+package mysql
+
+/*
+
+import (
+ "context"
+ "database/sql"
+ "errors"
+ "fmt"
+ "log"
+ "regexp"
+ "testing"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
+)
+
+func TestTIDBResourceGroup_basic(t *testing.T) {
+ varName := "rg100"
+ varResourceUnits := 100
+ varBurstable := true
+ varPriority := "low"
+ resourceName := "mysql_ti_resource_group.test"
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() {
+ testAccPreCheck(t)
+ testAccPreCheckSkipNotTiDB(t)
+ // TODO: skip if not TiDB version X (7.5.2?)
+ },
+ ProviderFactories: testAccProviderFactories,
+ CheckDestroy: testAccResourceGroupCheckDestroy(varName),
+ Steps: []resource.TestStep{
+ {
+ Config: testAccResourceGroupConfigBasic(varName, varResourceUnits),
+ Check: resource.ComposeTestCheckFunc(
+ testAccResourceGroupExists(varName),
+ resource.TestCheckResourceAttr(resourceName, "name", varName),
+ ),
+ },
+ {
+ Config: testAccConfigVarConfigWithInstanceAndType(varName, varValue, varType, ""),
+ Check: resource.ComposeTestCheckFunc(
+ testAccConfigVarExists(varName, varValue, varType),
+ resource.TestCheckResourceAttr(resourceName, "name", varName),
+ ),
+ },
+ {
+ Config: testAccConfigVarConfigWithInstanceAndType(varName, varValue, varType, varInstance),
+ Check: resource.ComposeTestCheckFunc(
+ testAccConfigVarExists(varName, varValue, varType),
+ resource.TestCheckResourceAttr(resourceName, "name", varName),
+ ),
+ },
+ {
+ Config: testAccConfigVarConfigWithInstanceAndType(varName, varValue, varType, varInstance),
+ ExpectError: regexp.MustCompile("variable 'log.level' not found"),
+ Check: resource.ComposeTestCheckFunc(
+ testAccConfigVarExists(varName, varValue, "badType"),
+ resource.TestCheckResourceAttr(resourceName, "name", varName),
+ ),
+ },
+ {
+ Config: testAccConfigVarConfigWithInstanceAndType("varName", varValue, varType, varInstance),
+ ExpectError: regexp.MustCompile(".*Error: bad request to:*"),
+ Check: resource.ComposeTestCheckFunc(
+ testAccConfigVarExists(varName, varValue, varType),
+ resource.TestCheckResourceAttr(resourceName, "name", varName),
+ ),
+ },
+ {
+ Config: testAccConfigVarConfigWithInstanceAndType(varName, varValue, "varType", varInstance),
+ ExpectError: regexp.MustCompile(".*Error: expected type to be one of.*"),
+ Check: resource.ComposeTestCheckFunc(
+ testAccConfigVarExists(varName, varValue, varType),
+ resource.TestCheckResourceAttr(resourceName, "name", varName),
+ ),
+ },
+ {
+ Config: testAccConfigVarConfigWithInstanceAndType(varName, "varValue'varValue", varType, varInstance),
+ ExpectError: regexp.MustCompile(".*Error: \"value\" is badly formatted.*"),
+ Check: resource.ComposeTestCheckFunc(
+ testAccConfigVarExists(varName, varValue, varType),
+ resource.TestCheckResourceAttr(resourceName, "name", varName),
+ ),
+ },
+ },
+ })
+}
+
+func TestTiKvConfigVar_basic(t *testing.T) {
+ varName := "split.qps-threshold"
+ varValue := "1000"
+ varType := "tikv"
+ varInstance := getGetInstance(varType, t)
+ resourceName := "mysql_ti_config.test"
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() {
+ testAccPreCheck(t)
+ testAccPreCheckSkipNotTiDB(t)
+ },
+ ProviderFactories: testAccProviderFactories,
+ CheckDestroy: testAccConfigVarCheckDestroy(varName, varType),
+ Steps: []resource.TestStep{
+ {
+ Config: testAccConfigVarConfigBasic(varName, varValue, varType),
+ Check: resource.ComposeTestCheckFunc(
+ testAccConfigVarExists(varName, varValue, varType),
+ resource.TestCheckResourceAttr(resourceName, "name", varName),
+ ),
+ },
+ {
+ Config: testAccConfigVarConfigWithInstanceAndType(varName, varValue, varType, varInstance),
+ Check: resource.ComposeTestCheckFunc(
+ testAccConfigVarExists(varName, varValue, varType),
+ resource.TestCheckResourceAttr(resourceName, "name", varName),
+ ),
+ },
+ {
+ Config: testAccConfigVarConfigWithInstanceAndType(varName, "varValue", varType, varInstance),
+ ExpectError: regexp.MustCompile(".*Error: error setting value*"),
+ Check: resource.ComposeTestCheckFunc(
+ testAccConfigVarExists(varName, "varValue", varType),
+ resource.TestCheckResourceAttr(resourceName, "name", varName),
+ ),
+ },
+ },
+ })
+}
+
+func testAccResourceGroupExists(varName string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ ctx := context.Background()
+ db, err := connectToMySQL(ctx, testAccProvider.Meta().(*MySQLConfiguration))
+ if err != nil {
+ return err
+ }
+
+ getResourceGroup(varName, t)
+ resName, resValue, err := testAccGetConfigVar(varName, varType, db)
+
+ if err != nil {
+ return err
+ }
+
+ if resValue == varValue {
+ return nil
+ }
+
+ return fmt.Errorf("variable '%s' not found. resName: %s, resValue: %s, err: %s", varName, resName, resValue, err)
+ }
+}
+
+type ResourceGroup struct {
+ Name string
+ ResourceUnits int
+ Priority string
+ Burstable bool
+ Users []string
+}
+
+func NewResourceGroup(name string) ResourceGroup {
+ return ResourceGroup{
+ Name: name,
+ ResourceUnits: 2000,
+ Priority: "medium",
+ Burstable: false,
+ }
+}
+
+func getResourceGroup(name string) (ResourceGroup, error) {
+ rg := NewResourceGroup(name)
+
+ ctx := context.Background()
+ db, err := connectToMySQL(ctx, testAccProvider.Meta().(*MySQLConfiguration))
+ if err != nil {
+ return ResourceGroup{}, err
+ }
+ query := fmt.Sprintf(`SELECT NAME, RU_PER_SEC, PRIORITY, BURSTABLE FROM information_schema.resource_groups WHERE NAME="%s";`, rg.Name)
+
+ log.Printf("[DEBUG] SQL: %s\n", query)
+
+ err = db.QueryRow(query).Scan(rg.Name, rg.ResourceUnits, rg.Priority, rg.Burstable)
+ if err != nil && !errors.Is(err, sql.ErrNoRows) {
+ return ResourceGroup{}, fmt.Errorf("error during get resource group (%s): %s", d.Id(), err)
+ }
+
+ return rg, nil
+}
+
+func testAccGetResourceGroup(varName string, db *sql.DB) (string, string, error) {
+ var resType, resInstance, resName, resValue string
+
+ configQuery := "SHOW CONFIG WHERE name = ? AND type = ?"
+
+ stmt, err := db.Prepare(configQuery)
+
+ if err != nil {
+ return "nil", "nil", err
+ }
+
+ err = stmt.QueryRow(varName, varType).Scan(&resType, &resInstance, &resName, &resValue)
+
+ if err != nil && !errors.Is(err, sql.ErrNoRows) {
+ return "nil", "nil", err
+ }
+
+ return resName, resValue, nil
+}
+
+func testAccResourceGroupCheckDestroy(varName string) resource.TestCheckFunc {
+ return func(s *terraform.State) error {
+ return nil
+ }
+}
+
+func testAccResourceGroupConfigBasic(varName string, varResourceUnits int) string {
+ return fmt.Sprintf(`
+resource "mysql_ti_resource_group" "test" {
+ name = "%s"
+ resource_units = "%s"
+}
+`, varName, varResourceUnits)
+}
+
+func testAccResourceGroupConfigFull(varName string, varResourceUnits int, varPriority string, varBurstable bool, users []string) string {
+ return fmt.Sprintf(`
+resource "mysql_ti_resource_group" "test" {
+ name = "%s"
+ resource_units = "%s"
+ priority = "%s"
+ burstable = %s
+}
+`, varName, varResourceUnits, varPriority, varBurstable)
+}
+*/
diff --git a/mysql/resource_ti_resource_group_user_assignment.go b/mysql/resource_ti_resource_group_user_assignment.go
new file mode 100644
index 00000000..df703b28
--- /dev/null
+++ b/mysql/resource_ti_resource_group_user_assignment.go
@@ -0,0 +1,129 @@
+package mysql
+
+import (
+ "context"
+ "database/sql"
+ "errors"
+ "fmt"
+ "log"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+)
+
+func resourceTiResourceGroupUserAssignment() *schema.Resource {
+ return &schema.Resource{
+ CreateContext: CreateOrUpdateResourceGroupUser,
+ ReadContext: ReadResourceGroupUser,
+ UpdateContext: CreateOrUpdateResourceGroupUser,
+ DeleteContext: DeleteResourceGroupUser,
+ Importer: &schema.ResourceImporter{
+ StateContext: schema.ImportStatePassthroughContext,
+ },
+ Schema: map[string]*schema.Schema{
+ "user": {
+ Type: schema.TypeString,
+ Required: true,
+ ForceNew: true,
+ },
+ "resource_group": {
+ Type: schema.TypeString,
+ Required: true,
+ },
+ },
+ }
+}
+
+func CreateOrUpdateResourceGroupUser(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
+ db, err := getDatabaseFromMeta(ctx, meta)
+ if err != nil {
+ return diag.FromErr(err)
+ }
+
+ // TODO: should this be the d.Id()?
+ user := d.Get("user").(string)
+ resourceGroup := d.Get("resource_group").(string)
+
+ var warnLevel, warnMessage string
+ var warnCode int = 0
+
+ _, _, err = readUserFromDB(db, user)
+ if err != nil {
+ d.SetId("")
+ return diag.Errorf(`must create user first before assigning to resource group | getting user %s | error %s`, user, err)
+ }
+
+ sql := fmt.Sprintf("ALTER USER `%s` RESOURCE GROUP `%s`", user, resourceGroup)
+ log.Printf("[DEBUG] SQL: %s\n", sql)
+
+ _, err = db.ExecContext(ctx, sql)
+ if err != nil {
+ d.SetId("")
+ return diag.Errorf("error attaching user (%s) to resource group (%s): %s", user, resourceGroup, err)
+ }
+
+ // TODO: relevant?
+ db.QueryRowContext(ctx, "SHOW WARNINGS").Scan(&warnLevel, &warnCode, &warnMessage)
+ if warnCode != 0 {
+ d.SetId("")
+ return diag.Errorf("error setting value: %s -> %s Error: %s", user, resourceGroup, warnMessage)
+ }
+
+ d.SetId(user)
+ return nil
+}
+
+func ReadResourceGroupUser(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
+ var user, resourceGroup string
+
+ db, err := getDatabaseFromMeta(ctx, meta)
+ if err != nil {
+ return diag.FromErr(err)
+ }
+
+ user, resourceGroup, err = readUserFromDB(db, d.Id())
+ if err != nil {
+ d.SetId("")
+ return diag.Errorf(`error getting user %s`, err)
+ }
+
+ d.Set("user", user)
+ d.Set("resourceGroup", resourceGroup)
+
+ return nil
+}
+
+func DeleteResourceGroupUser(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
+ user := d.Get("user").(string)
+ // TODO: should we re-assert that it's part of the expected resourceGroup first? I think no bc plan should read it
+
+ db, err := getDatabaseFromMeta(ctx, meta)
+ if err != nil {
+ return diag.FromErr(err)
+ }
+ deleteQuery := fmt.Sprintf("ALTER USER `%s` RESOURCE GROUP `default`", user)
+ _, err = db.Exec(deleteQuery)
+ if err != nil && !errors.Is(err, sql.ErrNoRows) {
+ return diag.Errorf("error during drop resource group (%s): %s", d.Id(), err)
+ }
+
+ d.SetId("")
+ return nil
+}
+
+func readUserFromDB(db *sql.DB, name string) (string, string, error) {
+ selectUsersQuery := `SELECT USER, IFNULL(JSON_EXTRACT(User_attributes, "$.resource_group"), "") as resource_group FROM mysql.user WHERE USER = ?`
+ row := db.QueryRow(selectUsersQuery, name)
+
+ var user, resourceGroup string
+
+ err := row.Scan(&user, &resourceGroup)
+ if errors.Is(err, sql.ErrNoRows) {
+ return "", "", sql.ErrNoRows
+ } else if err != nil {
+ return "", "", fmt.Errorf(`error fetching user %e`, err)
+ }
+
+ return user, resourceGroup, nil
+}
diff --git a/tools.go b/tools.go
new file mode 100644
index 00000000..6b99d67e
--- /dev/null
+++ b/tools.go
@@ -0,0 +1,12 @@
+//go:build tools
+
+/*
+Used for keeping ftplugindocs in the go.mod when using go mod tidy
+and used as go:generate
+Recommended approach per: https://github.com/golang/go/issues/25922#issuecomment-413898264
+*/
+package main
+
+import (
+ _ "github.com/hashicorp/terraform-plugin-docs"
+)
diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md
deleted file mode 100644
index 7ef5fc0d..00000000
--- a/vendor/cloud.google.com/go/auth/CHANGES.md
+++ /dev/null
@@ -1,149 +0,0 @@
-# Changelog
-
-## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.5.0...auth/v0.5.1) (2024-05-31)
-
-
-### Bug Fixes
-
-* **auth:** Pass through client to 2LO and 3LO flows ([#10290](https://github.com/googleapis/google-cloud-go/issues/10290)) ([685784e](https://github.com/googleapis/google-cloud-go/commit/685784ea84358c15e9214bdecb307d37aa3b6d2f))
-
-## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.2...auth/v0.5.0) (2024-05-28)
-
-
-### Features
-
-* **auth:** Adds X509 workload certificate provider ([#10233](https://github.com/googleapis/google-cloud-go/issues/10233)) ([17a9db7](https://github.com/googleapis/google-cloud-go/commit/17a9db73af35e3d1a7a25ac4fd1377a103de6150))
-
-## [0.4.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.1...auth/v0.4.2) (2024-05-16)
-
-
-### Bug Fixes
-
-* **auth:** Enable client certificates by default only for GDU ([#10151](https://github.com/googleapis/google-cloud-go/issues/10151)) ([7c52978](https://github.com/googleapis/google-cloud-go/commit/7c529786275a39b7e00525f7d5e7be0d963e9e15))
-* **auth:** Handle non-Transport DefaultTransport ([#10162](https://github.com/googleapis/google-cloud-go/issues/10162)) ([fa3bfdb](https://github.com/googleapis/google-cloud-go/commit/fa3bfdb23aaa45b34394a8b61e753b3587506782)), refs [#10159](https://github.com/googleapis/google-cloud-go/issues/10159)
-* **auth:** Have refresh time match docs ([#10147](https://github.com/googleapis/google-cloud-go/issues/10147)) ([bcb5568](https://github.com/googleapis/google-cloud-go/commit/bcb5568c07a54dd3d2e869d15f502b0741a609e8))
-* **auth:** Update compute token fetching error with named prefix ([#10180](https://github.com/googleapis/google-cloud-go/issues/10180)) ([4573504](https://github.com/googleapis/google-cloud-go/commit/4573504828d2928bebedc875d87650ba227829ea))
-
-## [0.4.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.4.0...auth/v0.4.1) (2024-05-09)
-
-
-### Bug Fixes
-
-* **auth:** Don't try to detect default creds it opt configured ([#10143](https://github.com/googleapis/google-cloud-go/issues/10143)) ([804632e](https://github.com/googleapis/google-cloud-go/commit/804632e7c5b0b85ff522f7951114485e256eb5bc))
-
-## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.3.0...auth/v0.4.0) (2024-05-07)
-
-
-### Features
-
-* **auth:** Enable client certificates by default ([#10102](https://github.com/googleapis/google-cloud-go/issues/10102)) ([9013e52](https://github.com/googleapis/google-cloud-go/commit/9013e5200a6ec0f178ed91acb255481ffb073a2c))
-
-
-### Bug Fixes
-
-* **auth:** Get s2a logic up to date ([#10093](https://github.com/googleapis/google-cloud-go/issues/10093)) ([4fe9ae4](https://github.com/googleapis/google-cloud-go/commit/4fe9ae4b7101af2a5221d6d6b2e77b479305bb06))
-
-## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.2...auth/v0.3.0) (2024-04-23)
-
-
-### Features
-
-* **auth/httptransport:** Add ability to customize transport ([#10023](https://github.com/googleapis/google-cloud-go/issues/10023)) ([72c7f6b](https://github.com/googleapis/google-cloud-go/commit/72c7f6bbec3136cc7a62788fc7186bc33ef6c3b3)), refs [#9812](https://github.com/googleapis/google-cloud-go/issues/9812) [#9814](https://github.com/googleapis/google-cloud-go/issues/9814)
-
-
-### Bug Fixes
-
-* **auth/credentials:** Error on bad file name if explicitly set ([#10018](https://github.com/googleapis/google-cloud-go/issues/10018)) ([55beaa9](https://github.com/googleapis/google-cloud-go/commit/55beaa993aaf052d8be39766afc6777c3c2a0bdd)), refs [#9809](https://github.com/googleapis/google-cloud-go/issues/9809)
-
-## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.1...auth/v0.2.2) (2024-04-19)
-
-
-### Bug Fixes
-
-* **auth:** Add internal opt to skip validation on transports ([#9999](https://github.com/googleapis/google-cloud-go/issues/9999)) ([9e20ef8](https://github.com/googleapis/google-cloud-go/commit/9e20ef89f6287d6bd03b8697d5898dc43b4a77cf)), refs [#9823](https://github.com/googleapis/google-cloud-go/issues/9823)
-* **auth:** Set secure flag for gRPC conn pools ([#10002](https://github.com/googleapis/google-cloud-go/issues/10002)) ([14e3956](https://github.com/googleapis/google-cloud-go/commit/14e3956dfd736399731b5ee8d9b178ae085cf7ba)), refs [#9833](https://github.com/googleapis/google-cloud-go/issues/9833)
-
-## [0.2.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.2.0...auth/v0.2.1) (2024-04-18)
-
-
-### Bug Fixes
-
-* **auth:** Default gRPC token type to Bearer if not set ([#9800](https://github.com/googleapis/google-cloud-go/issues/9800)) ([5284066](https://github.com/googleapis/google-cloud-go/commit/5284066670b6fe65d79089cfe0199c9660f87fc7))
-
-## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.1.1...auth/v0.2.0) (2024-04-15)
-
-### Breaking Changes
-
-In the below mentioned commits there were a few large breaking changes since the
-last release of the module.
-
-1. The `Credentials` type has been moved to the root of the module as it is
- becoming the core abstraction for the whole module.
-2. Because of the above mentioned change many functions that previously
- returned a `TokenProvider` now return `Credentials`. Similarly, these
- functions have been renamed to be more specific.
-3. Most places that used to take an optional `TokenProvider` now accept
- `Credentials`. You can make a `Credentials` from a `TokenProvider` using the
- constructor found in the `auth` package.
-4. The `detect` package has been renamed to `credentials`. With this change some
- function signatures were also updated for better readability.
-5. Derivative auth flows like `impersonate` and `downscope` have been moved to
- be under the new `credentials` package.
-
-Although these changes are disruptive we think that they are for the best of the
-long-term health of the module. We do not expect any more large breaking changes
-like these in future revisions, even before 1.0.0. This version will be the
-first version of the auth library that our client libraries start to use and
-depend on.
-
-### Features
-
-* **auth/credentials/externalaccount:** Add default TokenURL ([#9700](https://github.com/googleapis/google-cloud-go/issues/9700)) ([81830e6](https://github.com/googleapis/google-cloud-go/commit/81830e6848ceefd055aa4d08f933d1154455a0f6))
-* **auth:** Add downscope.Options.UniverseDomain ([#9634](https://github.com/googleapis/google-cloud-go/issues/9634)) ([52cf7d7](https://github.com/googleapis/google-cloud-go/commit/52cf7d780853594291c4e34302d618299d1f5a1d))
-* **auth:** Add universe domain to grpctransport and httptransport ([#9663](https://github.com/googleapis/google-cloud-go/issues/9663)) ([67d353b](https://github.com/googleapis/google-cloud-go/commit/67d353beefe3b607c08c891876fbd95ab89e5fe3)), refs [#9670](https://github.com/googleapis/google-cloud-go/issues/9670)
-* **auth:** Add UniverseDomain to DetectOptions ([#9536](https://github.com/googleapis/google-cloud-go/issues/9536)) ([3618d3f](https://github.com/googleapis/google-cloud-go/commit/3618d3f7061615c0e189f376c75abc201203b501))
-* **auth:** Make package externalaccount public ([#9633](https://github.com/googleapis/google-cloud-go/issues/9633)) ([a0978d8](https://github.com/googleapis/google-cloud-go/commit/a0978d8e96968399940ebd7d092539772bf9caac))
-* **auth:** Move credentials to base auth package ([#9590](https://github.com/googleapis/google-cloud-go/issues/9590)) ([1a04baf](https://github.com/googleapis/google-cloud-go/commit/1a04bafa83c27342b9308d785645e1e5423ea10d))
-* **auth:** Refactor public sigs to use Credentials ([#9603](https://github.com/googleapis/google-cloud-go/issues/9603)) ([69cb240](https://github.com/googleapis/google-cloud-go/commit/69cb240c530b1f7173a9af2555c19e9a1beb56c5))
-
-
-### Bug Fixes
-
-* **auth/oauth2adapt:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a))
-* **auth:** Fix uint32 conversion ([9221c7f](https://github.com/googleapis/google-cloud-go/commit/9221c7fa12cef9d5fb7ddc92f41f1d6204971c7b))
-* **auth:** Port sts expires fix ([#9618](https://github.com/googleapis/google-cloud-go/issues/9618)) ([7bec97b](https://github.com/googleapis/google-cloud-go/commit/7bec97b2f51ed3ac4f9b88bf100d301da3f5d1bd))
-* **auth:** Read universe_domain from all credentials files ([#9632](https://github.com/googleapis/google-cloud-go/issues/9632)) ([16efbb5](https://github.com/googleapis/google-cloud-go/commit/16efbb52e39ea4a319e5ee1e95c0e0305b6d9824))
-* **auth:** Remove content-type header from idms get requests ([#9508](https://github.com/googleapis/google-cloud-go/issues/9508)) ([8589f41](https://github.com/googleapis/google-cloud-go/commit/8589f41599d265d7c3d46a3d86c9fab2329cbdd9))
-* **auth:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a))
-
-## [0.1.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.1.0...auth/v0.1.1) (2024-03-10)
-
-
-### Bug Fixes
-
-* **auth/impersonate:** Properly send default detect params ([#9529](https://github.com/googleapis/google-cloud-go/issues/9529)) ([5b6b8be](https://github.com/googleapis/google-cloud-go/commit/5b6b8bef577f82707e51f5cc5d258d5bdf90218f)), refs [#9136](https://github.com/googleapis/google-cloud-go/issues/9136)
-* **auth:** Update grpc-go to v1.56.3 ([343cea8](https://github.com/googleapis/google-cloud-go/commit/343cea8c43b1e31ae21ad50ad31d3b0b60143f8c))
-* **auth:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7))
-
-## 0.1.0 (2023-10-18)
-
-
-### Features
-
-* **auth:** Add base auth package ([#8465](https://github.com/googleapis/google-cloud-go/issues/8465)) ([6a45f26](https://github.com/googleapis/google-cloud-go/commit/6a45f26b809b64edae21f312c18d4205f96b180e))
-* **auth:** Add cert support to httptransport ([#8569](https://github.com/googleapis/google-cloud-go/issues/8569)) ([37e3435](https://github.com/googleapis/google-cloud-go/commit/37e3435f8e98595eafab481bdfcb31a4c56fa993))
-* **auth:** Add Credentials.UniverseDomain() ([#8654](https://github.com/googleapis/google-cloud-go/issues/8654)) ([af0aa1e](https://github.com/googleapis/google-cloud-go/commit/af0aa1ed8015bc8fe0dd87a7549ae029107cbdb8))
-* **auth:** Add detect package ([#8491](https://github.com/googleapis/google-cloud-go/issues/8491)) ([d977419](https://github.com/googleapis/google-cloud-go/commit/d977419a3269f6acc193df77a2136a6eb4b4add7))
-* **auth:** Add downscope package ([#8532](https://github.com/googleapis/google-cloud-go/issues/8532)) ([dda9bff](https://github.com/googleapis/google-cloud-go/commit/dda9bff8ec70e6d104901b4105d13dcaa4e2404c))
-* **auth:** Add grpctransport package ([#8625](https://github.com/googleapis/google-cloud-go/issues/8625)) ([69a8347](https://github.com/googleapis/google-cloud-go/commit/69a83470bdcc7ed10c6c36d1abc3b7cfdb8a0ee5))
-* **auth:** Add httptransport package ([#8567](https://github.com/googleapis/google-cloud-go/issues/8567)) ([6898597](https://github.com/googleapis/google-cloud-go/commit/6898597d2ea95d630fcd00fd15c58c75ea843bff))
-* **auth:** Add idtoken package ([#8580](https://github.com/googleapis/google-cloud-go/issues/8580)) ([a79e693](https://github.com/googleapis/google-cloud-go/commit/a79e693e97e4e3e1c6742099af3dbc58866d88fe))
-* **auth:** Add impersonate package ([#8578](https://github.com/googleapis/google-cloud-go/issues/8578)) ([e29ba0c](https://github.com/googleapis/google-cloud-go/commit/e29ba0cb7bd3888ab9e808087027dc5a32474c04))
-* **auth:** Add support for external accounts in detect ([#8508](https://github.com/googleapis/google-cloud-go/issues/8508)) ([62210d5](https://github.com/googleapis/google-cloud-go/commit/62210d5d3e56e8e9f35db8e6ac0defec19582507))
-* **auth:** Port external account changes ([#8697](https://github.com/googleapis/google-cloud-go/issues/8697)) ([5823db5](https://github.com/googleapis/google-cloud-go/commit/5823db5d633069999b58b9131a7f9cd77e82c899))
-
-
-### Bug Fixes
-
-* **auth/oauth2adapt:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
-* **auth:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
diff --git a/vendor/cloud.google.com/go/auth/LICENSE b/vendor/cloud.google.com/go/auth/LICENSE
deleted file mode 100644
index d6456956..00000000
--- a/vendor/cloud.google.com/go/auth/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/cloud.google.com/go/auth/README.md b/vendor/cloud.google.com/go/auth/README.md
deleted file mode 100644
index 36de276a..00000000
--- a/vendor/cloud.google.com/go/auth/README.md
+++ /dev/null
@@ -1,4 +0,0 @@
-# auth
-
-This module is currently EXPERIMENTAL and under active development. It is not
-yet intended to be used.
diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go
deleted file mode 100644
index d579e482..00000000
--- a/vendor/cloud.google.com/go/auth/auth.go
+++ /dev/null
@@ -1,476 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package auth
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "net/url"
- "strings"
- "sync"
- "time"
-
- "cloud.google.com/go/auth/internal"
- "cloud.google.com/go/auth/internal/jwt"
-)
-
-const (
- // Parameter keys for AuthCodeURL method to support PKCE.
- codeChallengeKey = "code_challenge"
- codeChallengeMethodKey = "code_challenge_method"
-
- // Parameter key for Exchange method to support PKCE.
- codeVerifierKey = "code_verifier"
-
- // 3 minutes and 45 seconds before expiration. The shortest MDS cache is 4 minutes,
- // so we give it 15 seconds to refresh it's cache before attempting to refresh a token.
- defaultExpiryDelta = 225 * time.Second
-
- universeDomainDefault = "googleapis.com"
-)
-
-var (
- defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer"
- defaultHeader = &jwt.Header{Algorithm: jwt.HeaderAlgRSA256, Type: jwt.HeaderType}
-
- // for testing
- timeNow = time.Now
-)
-
-// TokenProvider specifies an interface for anything that can return a token.
-type TokenProvider interface {
- // Token returns a Token or an error.
- // The Token returned must be safe to use
- // concurrently.
- // The returned Token must not be modified.
- // The context provided must be sent along to any requests that are made in
- // the implementing code.
- Token(context.Context) (*Token, error)
-}
-
-// Token holds the credential token used to authorized requests. All fields are
-// considered read-only.
-type Token struct {
- // Value is the token used to authorize requests. It is usually an access
- // token but may be other types of tokens such as ID tokens in some flows.
- Value string
- // Type is the type of token Value is. If uninitialized, it should be
- // assumed to be a "Bearer" token.
- Type string
- // Expiry is the time the token is set to expire.
- Expiry time.Time
- // Metadata may include, but is not limited to, the body of the token
- // response returned by the server.
- Metadata map[string]interface{} // TODO(codyoss): maybe make a method to flatten metadata to avoid []string for url.Values
-}
-
-// IsValid reports that a [Token] is non-nil, has a [Token.Value], and has not
-// expired. A token is considered expired if [Token.Expiry] has passed or will
-// pass in the next 10 seconds.
-func (t *Token) IsValid() bool {
- return t.isValidWithEarlyExpiry(defaultExpiryDelta)
-}
-
-func (t *Token) isValidWithEarlyExpiry(earlyExpiry time.Duration) bool {
- if t == nil || t.Value == "" {
- return false
- }
- if t.Expiry.IsZero() {
- return true
- }
- return !t.Expiry.Round(0).Add(-earlyExpiry).Before(timeNow())
-}
-
-// Credentials holds Google credentials, including
-// [Application Default Credentials](https://developers.google.com/accounts/docs/application-default-credentials).
-type Credentials struct {
- json []byte
- projectID CredentialsPropertyProvider
- quotaProjectID CredentialsPropertyProvider
- // universeDomain is the default service domain for a given Cloud universe.
- universeDomain CredentialsPropertyProvider
-
- TokenProvider
-}
-
-// JSON returns the bytes associated with the the file used to source
-// credentials if one was used.
-func (c *Credentials) JSON() []byte {
- return c.json
-}
-
-// ProjectID returns the associated project ID from the underlying file or
-// environment.
-func (c *Credentials) ProjectID(ctx context.Context) (string, error) {
- if c.projectID == nil {
- return internal.GetProjectID(c.json, ""), nil
- }
- v, err := c.projectID.GetProperty(ctx)
- if err != nil {
- return "", err
- }
- return internal.GetProjectID(c.json, v), nil
-}
-
-// QuotaProjectID returns the associated quota project ID from the underlying
-// file or environment.
-func (c *Credentials) QuotaProjectID(ctx context.Context) (string, error) {
- if c.quotaProjectID == nil {
- return internal.GetQuotaProject(c.json, ""), nil
- }
- v, err := c.quotaProjectID.GetProperty(ctx)
- if err != nil {
- return "", err
- }
- return internal.GetQuotaProject(c.json, v), nil
-}
-
-// UniverseDomain returns the default service domain for a given Cloud universe.
-// The default value is "googleapis.com".
-func (c *Credentials) UniverseDomain(ctx context.Context) (string, error) {
- if c.universeDomain == nil {
- return universeDomainDefault, nil
- }
- v, err := c.universeDomain.GetProperty(ctx)
- if err != nil {
- return "", err
- }
- if v == "" {
- return universeDomainDefault, nil
- }
- return v, err
-}
-
-// CredentialsPropertyProvider provides an implementation to fetch a property
-// value for [Credentials].
-type CredentialsPropertyProvider interface {
- GetProperty(context.Context) (string, error)
-}
-
-// CredentialsPropertyFunc is a type adapter to allow the use of ordinary
-// functions as a [CredentialsPropertyProvider].
-type CredentialsPropertyFunc func(context.Context) (string, error)
-
-// GetProperty loads the properly value provided the given context.
-func (p CredentialsPropertyFunc) GetProperty(ctx context.Context) (string, error) {
- return p(ctx)
-}
-
-// CredentialsOptions are used to configure [Credentials].
-type CredentialsOptions struct {
- // TokenProvider is a means of sourcing a token for the credentials. Required.
- TokenProvider TokenProvider
- // JSON is the raw contents of the credentials file if sourced from a file.
- JSON []byte
- // ProjectIDProvider resolves the project ID associated with the
- // credentials.
- ProjectIDProvider CredentialsPropertyProvider
- // QuotaProjectIDProvider resolves the quota project ID associated with the
- // credentials.
- QuotaProjectIDProvider CredentialsPropertyProvider
- // UniverseDomainProvider resolves the universe domain with the credentials.
- UniverseDomainProvider CredentialsPropertyProvider
-}
-
-// NewCredentials returns new [Credentials] from the provided options. Most users
-// will want to build this object a function from the
-// [cloud.google.com/go/auth/credentials] package.
-func NewCredentials(opts *CredentialsOptions) *Credentials {
- creds := &Credentials{
- TokenProvider: opts.TokenProvider,
- json: opts.JSON,
- projectID: opts.ProjectIDProvider,
- quotaProjectID: opts.QuotaProjectIDProvider,
- universeDomain: opts.UniverseDomainProvider,
- }
-
- return creds
-}
-
-// CachedTokenProviderOptions provided options for configuring a
-// CachedTokenProvider.
-type CachedTokenProviderOptions struct {
- // DisableAutoRefresh makes the TokenProvider always return the same token,
- // even if it is expired.
- DisableAutoRefresh bool
- // ExpireEarly configures the amount of time before a token expires, that it
- // should be refreshed. If unset, the default value is 10 seconds.
- ExpireEarly time.Duration
-}
-
-func (ctpo *CachedTokenProviderOptions) autoRefresh() bool {
- if ctpo == nil {
- return true
- }
- return !ctpo.DisableAutoRefresh
-}
-
-func (ctpo *CachedTokenProviderOptions) expireEarly() time.Duration {
- if ctpo == nil {
- return defaultExpiryDelta
- }
- return ctpo.ExpireEarly
-}
-
-// NewCachedTokenProvider wraps a [TokenProvider] to cache the tokens returned
-// by the underlying provider. By default it will refresh tokens ten seconds
-// before they expire, but this time can be configured with the optional
-// options.
-func NewCachedTokenProvider(tp TokenProvider, opts *CachedTokenProviderOptions) TokenProvider {
- if ctp, ok := tp.(*cachedTokenProvider); ok {
- return ctp
- }
- return &cachedTokenProvider{
- tp: tp,
- autoRefresh: opts.autoRefresh(),
- expireEarly: opts.expireEarly(),
- }
-}
-
-type cachedTokenProvider struct {
- tp TokenProvider
- autoRefresh bool
- expireEarly time.Duration
-
- mu sync.Mutex
- cachedToken *Token
-}
-
-func (c *cachedTokenProvider) Token(ctx context.Context) (*Token, error) {
- c.mu.Lock()
- defer c.mu.Unlock()
- if c.cachedToken.IsValid() || !c.autoRefresh {
- return c.cachedToken, nil
- }
- t, err := c.tp.Token(ctx)
- if err != nil {
- return nil, err
- }
- c.cachedToken = t
- return t, nil
-}
-
-// Error is a error associated with retrieving a [Token]. It can hold useful
-// additional details for debugging.
-type Error struct {
- // Response is the HTTP response associated with error. The body will always
- // be already closed and consumed.
- Response *http.Response
- // Body is the HTTP response body.
- Body []byte
- // Err is the underlying wrapped error.
- Err error
-
- // code returned in the token response
- code string
- // description returned in the token response
- description string
- // uri returned in the token response
- uri string
-}
-
-func (e *Error) Error() string {
- if e.code != "" {
- s := fmt.Sprintf("auth: %q", e.code)
- if e.description != "" {
- s += fmt.Sprintf(" %q", e.description)
- }
- if e.uri != "" {
- s += fmt.Sprintf(" %q", e.uri)
- }
- return s
- }
- return fmt.Sprintf("auth: cannot fetch token: %v\nResponse: %s", e.Response.StatusCode, e.Body)
-}
-
-// Temporary returns true if the error is considered temporary and may be able
-// to be retried.
-func (e *Error) Temporary() bool {
- if e.Response == nil {
- return false
- }
- sc := e.Response.StatusCode
- return sc == http.StatusInternalServerError || sc == http.StatusServiceUnavailable || sc == http.StatusRequestTimeout || sc == http.StatusTooManyRequests
-}
-
-func (e *Error) Unwrap() error {
- return e.Err
-}
-
-// Style describes how the token endpoint wants to receive the ClientID and
-// ClientSecret.
-type Style int
-
-const (
- // StyleUnknown means the value has not been initiated. Sending this in
- // a request will cause the token exchange to fail.
- StyleUnknown Style = iota
- // StyleInParams sends client info in the body of a POST request.
- StyleInParams
- // StyleInHeader sends client info using Basic Authorization header.
- StyleInHeader
-)
-
-// Options2LO is the configuration settings for doing a 2-legged JWT OAuth2 flow.
-type Options2LO struct {
- // Email is the OAuth2 client ID. This value is set as the "iss" in the
- // JWT.
- Email string
- // PrivateKey contains the contents of an RSA private key or the
- // contents of a PEM file that contains a private key. It is used to sign
- // the JWT created.
- PrivateKey []byte
- // TokenURL is th URL the JWT is sent to. Required.
- TokenURL string
- // PrivateKeyID is the ID of the key used to sign the JWT. It is used as the
- // "kid" in the JWT header. Optional.
- PrivateKeyID string
- // Subject is the used for to impersonate a user. It is used as the "sub" in
- // the JWT.m Optional.
- Subject string
- // Scopes specifies requested permissions for the token. Optional.
- Scopes []string
- // Expires specifies the lifetime of the token. Optional.
- Expires time.Duration
- // Audience specifies the "aud" in the JWT. Optional.
- Audience string
- // PrivateClaims allows specifying any custom claims for the JWT. Optional.
- PrivateClaims map[string]interface{}
-
- // Client is the client to be used to make the underlying token requests.
- // Optional.
- Client *http.Client
- // UseIDToken requests that the token returned be an ID token if one is
- // returned from the server. Optional.
- UseIDToken bool
-}
-
-func (o *Options2LO) client() *http.Client {
- if o.Client != nil {
- return o.Client
- }
- return internal.CloneDefaultClient()
-}
-
-func (o *Options2LO) validate() error {
- if o == nil {
- return errors.New("auth: options must be provided")
- }
- if o.Email == "" {
- return errors.New("auth: email must be provided")
- }
- if len(o.PrivateKey) == 0 {
- return errors.New("auth: private key must be provided")
- }
- if o.TokenURL == "" {
- return errors.New("auth: token URL must be provided")
- }
- return nil
-}
-
-// New2LOTokenProvider returns a [TokenProvider] from the provided options.
-func New2LOTokenProvider(opts *Options2LO) (TokenProvider, error) {
- if err := opts.validate(); err != nil {
- return nil, err
- }
- return tokenProvider2LO{opts: opts, Client: opts.client()}, nil
-}
-
-type tokenProvider2LO struct {
- opts *Options2LO
- Client *http.Client
-}
-
-func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) {
- pk, err := internal.ParseKey(tp.opts.PrivateKey)
- if err != nil {
- return nil, err
- }
- claimSet := &jwt.Claims{
- Iss: tp.opts.Email,
- Scope: strings.Join(tp.opts.Scopes, " "),
- Aud: tp.opts.TokenURL,
- AdditionalClaims: tp.opts.PrivateClaims,
- Sub: tp.opts.Subject,
- }
- if t := tp.opts.Expires; t > 0 {
- claimSet.Exp = time.Now().Add(t).Unix()
- }
- if aud := tp.opts.Audience; aud != "" {
- claimSet.Aud = aud
- }
- h := *defaultHeader
- h.KeyID = tp.opts.PrivateKeyID
- payload, err := jwt.EncodeJWS(&h, claimSet, pk)
- if err != nil {
- return nil, err
- }
- v := url.Values{}
- v.Set("grant_type", defaultGrantType)
- v.Set("assertion", payload)
- resp, err := tp.Client.PostForm(tp.opts.TokenURL, v)
- if err != nil {
- return nil, fmt.Errorf("auth: cannot fetch token: %w", err)
- }
- defer resp.Body.Close()
- body, err := internal.ReadAll(resp.Body)
- if err != nil {
- return nil, fmt.Errorf("auth: cannot fetch token: %w", err)
- }
- if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
- return nil, &Error{
- Response: resp,
- Body: body,
- }
- }
- // tokenRes is the JSON response body.
- var tokenRes struct {
- AccessToken string `json:"access_token"`
- TokenType string `json:"token_type"`
- IDToken string `json:"id_token"`
- ExpiresIn int64 `json:"expires_in"`
- }
- if err := json.Unmarshal(body, &tokenRes); err != nil {
- return nil, fmt.Errorf("auth: cannot fetch token: %w", err)
- }
- token := &Token{
- Value: tokenRes.AccessToken,
- Type: tokenRes.TokenType,
- }
- token.Metadata = make(map[string]interface{})
- json.Unmarshal(body, &token.Metadata) // no error checks for optional fields
-
- if secs := tokenRes.ExpiresIn; secs > 0 {
- token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
- }
- if v := tokenRes.IDToken; v != "" {
- // decode returned id token to get expiry
- claimSet, err := jwt.DecodeJWS(v)
- if err != nil {
- return nil, fmt.Errorf("auth: error decoding JWT token: %w", err)
- }
- token.Expiry = time.Unix(claimSet.Exp, 0)
- }
- if tp.opts.UseIDToken {
- if tokenRes.IDToken == "" {
- return nil, fmt.Errorf("auth: response doesn't have JWT token")
- }
- token.Value = tokenRes.IDToken
- }
- return token, nil
-}
diff --git a/vendor/cloud.google.com/go/auth/credentials/compute.go b/vendor/cloud.google.com/go/auth/credentials/compute.go
deleted file mode 100644
index f3ec8882..00000000
--- a/vendor/cloud.google.com/go/auth/credentials/compute.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package credentials
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "net/url"
- "strings"
- "time"
-
- "cloud.google.com/go/auth"
- "cloud.google.com/go/compute/metadata"
-)
-
-var (
- computeTokenMetadata = map[string]interface{}{
- "auth.google.tokenSource": "compute-metadata",
- "auth.google.serviceAccount": "default",
- }
- computeTokenURI = "instance/service-accounts/default/token"
-)
-
-// computeTokenProvider creates a [cloud.google.com/go/auth.TokenProvider] that
-// uses the metadata service to retrieve tokens.
-func computeTokenProvider(earlyExpiry time.Duration, scope ...string) auth.TokenProvider {
- return auth.NewCachedTokenProvider(computeProvider{scopes: scope}, &auth.CachedTokenProviderOptions{
- ExpireEarly: earlyExpiry,
- })
-}
-
-// computeProvider fetches tokens from the google cloud metadata service.
-type computeProvider struct {
- scopes []string
-}
-
-type metadataTokenResp struct {
- AccessToken string `json:"access_token"`
- ExpiresInSec int `json:"expires_in"`
- TokenType string `json:"token_type"`
-}
-
-func (cs computeProvider) Token(ctx context.Context) (*auth.Token, error) {
- tokenURI, err := url.Parse(computeTokenURI)
- if err != nil {
- return nil, err
- }
- if len(cs.scopes) > 0 {
- v := url.Values{}
- v.Set("scopes", strings.Join(cs.scopes, ","))
- tokenURI.RawQuery = v.Encode()
- }
- tokenJSON, err := metadata.GetWithContext(ctx, tokenURI.String())
- if err != nil {
- return nil, fmt.Errorf("credentials: cannot fetch token: %w", err)
- }
- var res metadataTokenResp
- if err := json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res); err != nil {
- return nil, fmt.Errorf("credentials: invalid token JSON from metadata: %w", err)
- }
- if res.ExpiresInSec == 0 || res.AccessToken == "" {
- return nil, errors.New("credentials: incomplete token received from metadata")
- }
- return &auth.Token{
- Value: res.AccessToken,
- Type: res.TokenType,
- Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second),
- Metadata: computeTokenMetadata,
- }, nil
-
-}
diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go
deleted file mode 100644
index cb3f44f5..00000000
--- a/vendor/cloud.google.com/go/auth/credentials/detect.go
+++ /dev/null
@@ -1,252 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package credentials
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "os"
- "time"
-
- "cloud.google.com/go/auth"
- "cloud.google.com/go/auth/internal"
- "cloud.google.com/go/auth/internal/credsfile"
- "cloud.google.com/go/compute/metadata"
-)
-
-const (
- // jwtTokenURL is Google's OAuth 2.0 token URL to use with the JWT(2LO) flow.
- jwtTokenURL = "https://oauth2.googleapis.com/token"
-
- // Google's OAuth 2.0 default endpoints.
- googleAuthURL = "https://accounts.google.com/o/oauth2/auth"
- googleTokenURL = "https://oauth2.googleapis.com/token"
-
- // Help on default credentials
- adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc"
-)
-
-var (
- // for testing
- allowOnGCECheck = true
-)
-
-// OnGCE reports whether this process is running in Google Cloud.
-func OnGCE() bool {
- // TODO(codyoss): once all libs use this auth lib move metadata check here
- return allowOnGCECheck && metadata.OnGCE()
-}
-
-// DetectDefault searches for "Application Default Credentials" and returns
-// a credential based on the [DetectOptions] provided.
-//
-// It looks for credentials in the following places, preferring the first
-// location found:
-//
-// - A JSON file whose path is specified by the GOOGLE_APPLICATION_CREDENTIALS
-// environment variable. For workload identity federation, refer to
-// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation
-// on how to generate the JSON configuration file for on-prem/non-Google
-// cloud platforms.
-// - A JSON file in a location known to the gcloud command-line tool. On
-// Windows, this is %APPDATA%/gcloud/application_default_credentials.json. On
-// other systems, $HOME/.config/gcloud/application_default_credentials.json.
-// - On Google Compute Engine, Google App Engine standard second generation
-// runtimes, and Google App Engine flexible environment, it fetches
-// credentials from the metadata server.
-func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) {
- if err := opts.validate(); err != nil {
- return nil, err
- }
- if opts.CredentialsJSON != nil {
- return readCredentialsFileJSON(opts.CredentialsJSON, opts)
- }
- if opts.CredentialsFile != "" {
- return readCredentialsFile(opts.CredentialsFile, opts)
- }
- if filename := os.Getenv(credsfile.GoogleAppCredsEnvVar); filename != "" {
- if creds, err := readCredentialsFile(filename, opts); err == nil {
- return creds, err
- }
- }
-
- fileName := credsfile.GetWellKnownFileName()
- if b, err := os.ReadFile(fileName); err == nil {
- return readCredentialsFileJSON(b, opts)
- }
-
- if OnGCE() {
- return auth.NewCredentials(&auth.CredentialsOptions{
- TokenProvider: computeTokenProvider(opts.EarlyTokenRefresh, opts.Scopes...),
- ProjectIDProvider: auth.CredentialsPropertyFunc(func(context.Context) (string, error) {
- return metadata.ProjectID()
- }),
- UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{},
- }), nil
- }
-
- return nil, fmt.Errorf("credentials: could not find default credentials. See %v for more information", adcSetupURL)
-}
-
-// DetectOptions provides configuration for [DetectDefault].
-type DetectOptions struct {
- // Scopes that credentials tokens should have. Example:
- // https://www.googleapis.com/auth/cloud-platform. Required if Audience is
- // not provided.
- Scopes []string
- // Audience that credentials tokens should have. Only applicable for 2LO
- // flows with service accounts. If specified, scopes should not be provided.
- Audience string
- // Subject is the user email used for [domain wide delegation](https://developers.google.com/identity/protocols/oauth2/service-account#delegatingauthority).
- // Optional.
- Subject string
- // EarlyTokenRefresh configures how early before a token expires that it
- // should be refreshed.
- EarlyTokenRefresh time.Duration
- // AuthHandlerOptions configures an authorization handler and other options
- // for 3LO flows. It is required, and only used, for client credential
- // flows.
- AuthHandlerOptions *auth.AuthorizationHandlerOptions
- // TokenURL allows to set the token endpoint for user credential flows. If
- // unset the default value is: https://oauth2.googleapis.com/token.
- // Optional.
- TokenURL string
- // STSAudience is the audience sent to when retrieving an STS token.
- // Currently this only used for GDCH auth flow, for which it is required.
- STSAudience string
- // CredentialsFile overrides detection logic and sources a credential file
- // from the provided filepath. If provided, CredentialsJSON must not be.
- // Optional.
- CredentialsFile string
- // CredentialsJSON overrides detection logic and uses the JSON bytes as the
- // source for the credential. If provided, CredentialsFile must not be.
- // Optional.
- CredentialsJSON []byte
- // UseSelfSignedJWT directs service account based credentials to create a
- // self-signed JWT with the private key found in the file, skipping any
- // network requests that would normally be made. Optional.
- UseSelfSignedJWT bool
- // Client configures the underlying client used to make network requests
- // when fetching tokens. Optional.
- Client *http.Client
- // UniverseDomain is the default service domain for a given Cloud universe.
- // The default value is "googleapis.com". This option is ignored for
- // authentication flows that do not support universe domain. Optional.
- UniverseDomain string
-}
-
-func (o *DetectOptions) validate() error {
- if o == nil {
- return errors.New("credentials: options must be provided")
- }
- if len(o.Scopes) > 0 && o.Audience != "" {
- return errors.New("credentials: both scopes and audience were provided")
- }
- if len(o.CredentialsJSON) > 0 && o.CredentialsFile != "" {
- return errors.New("credentials: both credentials file and JSON were provided")
- }
- return nil
-}
-
-func (o *DetectOptions) tokenURL() string {
- if o.TokenURL != "" {
- return o.TokenURL
- }
- return googleTokenURL
-}
-
-func (o *DetectOptions) scopes() []string {
- scopes := make([]string, len(o.Scopes))
- copy(scopes, o.Scopes)
- return scopes
-}
-
-func (o *DetectOptions) client() *http.Client {
- if o.Client != nil {
- return o.Client
- }
- return internal.CloneDefaultClient()
-}
-
-func readCredentialsFile(filename string, opts *DetectOptions) (*auth.Credentials, error) {
- b, err := os.ReadFile(filename)
- if err != nil {
- return nil, err
- }
- return readCredentialsFileJSON(b, opts)
-}
-
-func readCredentialsFileJSON(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
- // attempt to parse jsonData as a Google Developers Console client_credentials.json.
- config := clientCredConfigFromJSON(b, opts)
- if config != nil {
- if config.AuthHandlerOpts == nil {
- return nil, errors.New("credentials: auth handler must be specified for this credential filetype")
- }
- tp, err := auth.New3LOTokenProvider(config)
- if err != nil {
- return nil, err
- }
- return auth.NewCredentials(&auth.CredentialsOptions{
- TokenProvider: tp,
- JSON: b,
- }), nil
- }
- return fileCredentials(b, opts)
-}
-
-func clientCredConfigFromJSON(b []byte, opts *DetectOptions) *auth.Options3LO {
- var creds credsfile.ClientCredentialsFile
- var c *credsfile.Config3LO
- if err := json.Unmarshal(b, &creds); err != nil {
- return nil
- }
- switch {
- case creds.Web != nil:
- c = creds.Web
- case creds.Installed != nil:
- c = creds.Installed
- default:
- return nil
- }
- if len(c.RedirectURIs) < 1 {
- return nil
- }
- var handleOpts *auth.AuthorizationHandlerOptions
- if opts.AuthHandlerOptions != nil {
- handleOpts = &auth.AuthorizationHandlerOptions{
- Handler: opts.AuthHandlerOptions.Handler,
- State: opts.AuthHandlerOptions.State,
- PKCEOpts: opts.AuthHandlerOptions.PKCEOpts,
- }
- }
- return &auth.Options3LO{
- ClientID: c.ClientID,
- ClientSecret: c.ClientSecret,
- RedirectURL: c.RedirectURIs[0],
- Scopes: opts.scopes(),
- AuthURL: c.AuthURI,
- TokenURL: c.TokenURI,
- Client: opts.client(),
- EarlyTokenExpiry: opts.EarlyTokenRefresh,
- AuthHandlerOpts: handleOpts,
- // TODO(codyoss): refactor this out. We need to add in auto-detection
- // for this use case.
- AuthStyle: auth.StyleInParams,
- }
-}
diff --git a/vendor/cloud.google.com/go/auth/credentials/doc.go b/vendor/cloud.google.com/go/auth/credentials/doc.go
deleted file mode 100644
index 1dbb2866..00000000
--- a/vendor/cloud.google.com/go/auth/credentials/doc.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package credentials provides support for making OAuth2 authorized and
-// authenticated HTTP requests to Google APIs. It supports the Web server flow,
-// client-side credentials, service accounts, Google Compute Engine service
-// accounts, Google App Engine service accounts and workload identity federation
-// from non-Google cloud platforms.
-//
-// A brief overview of the package follows. For more information, please read
-// https://developers.google.com/accounts/docs/OAuth2
-// and
-// https://developers.google.com/accounts/docs/application-default-credentials.
-// For more information on using workload identity federation, refer to
-// https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation.
-//
-// # Credentials
-//
-// The [cloud.google.com/go/auth.Credentials] type represents Google
-// credentials, including Application Default Credentials.
-//
-// Use [DetectDefault] to obtain Application Default Credentials.
-//
-// Application Default Credentials support workload identity federation to
-// access Google Cloud resources from non-Google Cloud platforms including Amazon
-// Web Services (AWS), Microsoft Azure or any identity provider that supports
-// OpenID Connect (OIDC). Workload identity federation is recommended for
-// non-Google Cloud environments as it avoids the need to download, manage, and
-// store service account private keys locally.
-//
-// # Workforce Identity Federation
-//
-// For more information on this feature see [cloud.google.com/go/auth/credentials/externalaccount].
-package credentials
diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go
deleted file mode 100644
index fe935573..00000000
--- a/vendor/cloud.google.com/go/auth/credentials/filetypes.go
+++ /dev/null
@@ -1,221 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package credentials
-
-import (
- "errors"
- "fmt"
-
- "cloud.google.com/go/auth"
- "cloud.google.com/go/auth/credentials/internal/externalaccount"
- "cloud.google.com/go/auth/credentials/internal/externalaccountuser"
- "cloud.google.com/go/auth/credentials/internal/gdch"
- "cloud.google.com/go/auth/credentials/internal/impersonate"
- internalauth "cloud.google.com/go/auth/internal"
- "cloud.google.com/go/auth/internal/credsfile"
-)
-
-func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
- fileType, err := credsfile.ParseFileType(b)
- if err != nil {
- return nil, err
- }
-
- var projectID, quotaProjectID, universeDomain string
- var tp auth.TokenProvider
- switch fileType {
- case credsfile.ServiceAccountKey:
- f, err := credsfile.ParseServiceAccount(b)
- if err != nil {
- return nil, err
- }
- tp, err = handleServiceAccount(f, opts)
- if err != nil {
- return nil, err
- }
- projectID = f.ProjectID
- universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
- case credsfile.UserCredentialsKey:
- f, err := credsfile.ParseUserCredentials(b)
- if err != nil {
- return nil, err
- }
- tp, err = handleUserCredential(f, opts)
- if err != nil {
- return nil, err
- }
- quotaProjectID = f.QuotaProjectID
- universeDomain = f.UniverseDomain
- case credsfile.ExternalAccountKey:
- f, err := credsfile.ParseExternalAccount(b)
- if err != nil {
- return nil, err
- }
- tp, err = handleExternalAccount(f, opts)
- if err != nil {
- return nil, err
- }
- quotaProjectID = f.QuotaProjectID
- universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
- case credsfile.ExternalAccountAuthorizedUserKey:
- f, err := credsfile.ParseExternalAccountAuthorizedUser(b)
- if err != nil {
- return nil, err
- }
- tp, err = handleExternalAccountAuthorizedUser(f, opts)
- if err != nil {
- return nil, err
- }
- quotaProjectID = f.QuotaProjectID
- universeDomain = f.UniverseDomain
- case credsfile.ImpersonatedServiceAccountKey:
- f, err := credsfile.ParseImpersonatedServiceAccount(b)
- if err != nil {
- return nil, err
- }
- tp, err = handleImpersonatedServiceAccount(f, opts)
- if err != nil {
- return nil, err
- }
- universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
- case credsfile.GDCHServiceAccountKey:
- f, err := credsfile.ParseGDCHServiceAccount(b)
- if err != nil {
- return nil, err
- }
- tp, err = handleGDCHServiceAccount(f, opts)
- if err != nil {
- return nil, err
- }
- projectID = f.Project
- universeDomain = f.UniverseDomain
- default:
- return nil, fmt.Errorf("credentials: unsupported filetype %q", fileType)
- }
- return auth.NewCredentials(&auth.CredentialsOptions{
- TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{
- ExpireEarly: opts.EarlyTokenRefresh,
- }),
- JSON: b,
- ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID),
- QuotaProjectIDProvider: internalauth.StaticCredentialsProperty(quotaProjectID),
- UniverseDomainProvider: internalauth.StaticCredentialsProperty(universeDomain),
- }), nil
-}
-
-// resolveUniverseDomain returns optsUniverseDomain if non-empty, in order to
-// support configuring universe-specific credentials in code. Auth flows
-// unsupported for universe domain should not use this func, but should instead
-// simply set the file universe domain on the credentials.
-func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string {
- if optsUniverseDomain != "" {
- return optsUniverseDomain
- }
- return fileUniverseDomain
-}
-
-func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
- if opts.UseSelfSignedJWT {
- return configureSelfSignedJWT(f, opts)
- }
- opts2LO := &auth.Options2LO{
- Email: f.ClientEmail,
- PrivateKey: []byte(f.PrivateKey),
- PrivateKeyID: f.PrivateKeyID,
- Scopes: opts.scopes(),
- TokenURL: f.TokenURL,
- Subject: opts.Subject,
- Client: opts.client(),
- }
- if opts2LO.TokenURL == "" {
- opts2LO.TokenURL = jwtTokenURL
- }
- return auth.New2LOTokenProvider(opts2LO)
-}
-
-func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions) (auth.TokenProvider, error) {
- opts3LO := &auth.Options3LO{
- ClientID: f.ClientID,
- ClientSecret: f.ClientSecret,
- Scopes: opts.scopes(),
- AuthURL: googleAuthURL,
- TokenURL: opts.tokenURL(),
- AuthStyle: auth.StyleInParams,
- EarlyTokenExpiry: opts.EarlyTokenRefresh,
- RefreshToken: f.RefreshToken,
- Client: opts.client(),
- }
- return auth.New3LOTokenProvider(opts3LO)
-}
-
-func handleExternalAccount(f *credsfile.ExternalAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
- externalOpts := &externalaccount.Options{
- Audience: f.Audience,
- SubjectTokenType: f.SubjectTokenType,
- TokenURL: f.TokenURL,
- TokenInfoURL: f.TokenInfoURL,
- ServiceAccountImpersonationURL: f.ServiceAccountImpersonationURL,
- ClientSecret: f.ClientSecret,
- ClientID: f.ClientID,
- CredentialSource: f.CredentialSource,
- QuotaProjectID: f.QuotaProjectID,
- Scopes: opts.scopes(),
- WorkforcePoolUserProject: f.WorkforcePoolUserProject,
- Client: opts.client(),
- }
- if f.ServiceAccountImpersonation != nil {
- externalOpts.ServiceAccountImpersonationLifetimeSeconds = f.ServiceAccountImpersonation.TokenLifetimeSeconds
- }
- return externalaccount.NewTokenProvider(externalOpts)
-}
-
-func handleExternalAccountAuthorizedUser(f *credsfile.ExternalAccountAuthorizedUserFile, opts *DetectOptions) (auth.TokenProvider, error) {
- externalOpts := &externalaccountuser.Options{
- Audience: f.Audience,
- RefreshToken: f.RefreshToken,
- TokenURL: f.TokenURL,
- TokenInfoURL: f.TokenInfoURL,
- ClientID: f.ClientID,
- ClientSecret: f.ClientSecret,
- Scopes: opts.scopes(),
- Client: opts.client(),
- }
- return externalaccountuser.NewTokenProvider(externalOpts)
-}
-
-func handleImpersonatedServiceAccount(f *credsfile.ImpersonatedServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
- if f.ServiceAccountImpersonationURL == "" || f.CredSource == nil {
- return nil, errors.New("missing 'source_credentials' field or 'service_account_impersonation_url' in credentials")
- }
-
- tp, err := fileCredentials(f.CredSource, opts)
- if err != nil {
- return nil, err
- }
- return impersonate.NewTokenProvider(&impersonate.Options{
- URL: f.ServiceAccountImpersonationURL,
- Scopes: opts.scopes(),
- Tp: tp,
- Delegates: f.Delegates,
- Client: opts.client(),
- })
-}
-
-func handleGDCHServiceAccount(f *credsfile.GDCHServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
- return gdch.NewTokenProvider(f, &gdch.Options{
- STSAudience: opts.STSAudience,
- Client: opts.client(),
- })
-}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go
deleted file mode 100644
index d9e1dcdd..00000000
--- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go
+++ /dev/null
@@ -1,547 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package externalaccount
-
-import (
- "bytes"
- "context"
- "crypto/hmac"
- "crypto/sha256"
- "encoding/hex"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "net/url"
- "os"
- "path"
- "sort"
- "strings"
- "time"
-
- "cloud.google.com/go/auth/internal"
-)
-
-var (
- // getenv aliases os.Getenv for testing
- getenv = os.Getenv
-)
-
-const (
- // AWS Signature Version 4 signing algorithm identifier.
- awsAlgorithm = "AWS4-HMAC-SHA256"
-
- // The termination string for the AWS credential scope value as defined in
- // https://docs.aws.amazon.com/general/latest/gr/sigv4-create-string-to-sign.html
- awsRequestType = "aws4_request"
-
- // The AWS authorization header name for the security session token if available.
- awsSecurityTokenHeader = "x-amz-security-token"
-
- // The name of the header containing the session token for metadata endpoint calls
- awsIMDSv2SessionTokenHeader = "X-aws-ec2-metadata-token"
-
- awsIMDSv2SessionTTLHeader = "X-aws-ec2-metadata-token-ttl-seconds"
-
- awsIMDSv2SessionTTL = "300"
-
- // The AWS authorization header name for the auto-generated date.
- awsDateHeader = "x-amz-date"
-
- defaultRegionalCredentialVerificationURL = "https://sts.{region}.amazonaws.com?Action=GetCallerIdentity&Version=2011-06-15"
-
- // Supported AWS configuration environment variables.
- awsAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID"
- awsDefaultRegionEnvVar = "AWS_DEFAULT_REGION"
- awsRegionEnvVar = "AWS_REGION"
- awsSecretAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY"
- awsSessionTokenEnvVar = "AWS_SESSION_TOKEN"
-
- awsTimeFormatLong = "20060102T150405Z"
- awsTimeFormatShort = "20060102"
- awsProviderType = "aws"
-)
-
-type awsSubjectProvider struct {
- EnvironmentID string
- RegionURL string
- RegionalCredVerificationURL string
- CredVerificationURL string
- IMDSv2SessionTokenURL string
- TargetResource string
- requestSigner *awsRequestSigner
- region string
- securityCredentialsProvider AwsSecurityCredentialsProvider
- reqOpts *RequestOptions
-
- Client *http.Client
-}
-
-func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) {
- // Set Defaults
- if sp.RegionalCredVerificationURL == "" {
- sp.RegionalCredVerificationURL = defaultRegionalCredentialVerificationURL
- }
- if sp.requestSigner == nil {
- headers := make(map[string]string)
- if sp.shouldUseMetadataServer() {
- awsSessionToken, err := sp.getAWSSessionToken(ctx)
- if err != nil {
- return "", err
- }
-
- if awsSessionToken != "" {
- headers[awsIMDSv2SessionTokenHeader] = awsSessionToken
- }
- }
-
- awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers)
- if err != nil {
- return "", err
- }
- if sp.region, err = sp.getRegion(ctx, headers); err != nil {
- return "", err
- }
- sp.requestSigner = &awsRequestSigner{
- RegionName: sp.region,
- AwsSecurityCredentials: awsSecurityCredentials,
- }
- }
-
- // Generate the signed request to AWS STS GetCallerIdentity API.
- // Use the required regional endpoint. Otherwise, the request will fail.
- req, err := http.NewRequest("POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil)
- if err != nil {
- return "", err
- }
- // The full, canonical resource name of the workload identity pool
- // provider, with or without the HTTPS prefix.
- // Including this header as part of the signature is recommended to
- // ensure data integrity.
- if sp.TargetResource != "" {
- req.Header.Set("x-goog-cloud-target-resource", sp.TargetResource)
- }
- sp.requestSigner.signRequest(req)
-
- /*
- The GCP STS endpoint expects the headers to be formatted as:
- # [
- # {key: 'x-amz-date', value: '...'},
- # {key: 'Authorization', value: '...'},
- # ...
- # ]
- # And then serialized as:
- # quote(json.dumps({
- # url: '...',
- # method: 'POST',
- # headers: [{key: 'x-amz-date', value: '...'}, ...]
- # }))
- */
-
- awsSignedReq := awsRequest{
- URL: req.URL.String(),
- Method: "POST",
- }
- for headerKey, headerList := range req.Header {
- for _, headerValue := range headerList {
- awsSignedReq.Headers = append(awsSignedReq.Headers, awsRequestHeader{
- Key: headerKey,
- Value: headerValue,
- })
- }
- }
- sort.Slice(awsSignedReq.Headers, func(i, j int) bool {
- headerCompare := strings.Compare(awsSignedReq.Headers[i].Key, awsSignedReq.Headers[j].Key)
- if headerCompare == 0 {
- return strings.Compare(awsSignedReq.Headers[i].Value, awsSignedReq.Headers[j].Value) < 0
- }
- return headerCompare < 0
- })
-
- result, err := json.Marshal(awsSignedReq)
- if err != nil {
- return "", err
- }
- return url.QueryEscape(string(result)), nil
-}
-
-func (sp *awsSubjectProvider) providerType() string {
- if sp.securityCredentialsProvider != nil {
- return programmaticProviderType
- }
- return awsProviderType
-}
-
-func (sp *awsSubjectProvider) getAWSSessionToken(ctx context.Context) (string, error) {
- if sp.IMDSv2SessionTokenURL == "" {
- return "", nil
- }
- req, err := http.NewRequestWithContext(ctx, "PUT", sp.IMDSv2SessionTokenURL, nil)
- if err != nil {
- return "", err
- }
- req.Header.Set(awsIMDSv2SessionTTLHeader, awsIMDSv2SessionTTL)
-
- resp, err := sp.Client.Do(req)
- if err != nil {
- return "", err
- }
- defer resp.Body.Close()
-
- respBody, err := internal.ReadAll(resp.Body)
- if err != nil {
- return "", err
- }
- if resp.StatusCode != http.StatusOK {
- return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", respBody)
- }
- return string(respBody), nil
-}
-
-func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string]string) (string, error) {
- if sp.securityCredentialsProvider != nil {
- return sp.securityCredentialsProvider.AwsRegion(ctx, sp.reqOpts)
- }
- if canRetrieveRegionFromEnvironment() {
- if envAwsRegion := getenv(awsRegionEnvVar); envAwsRegion != "" {
- return envAwsRegion, nil
- }
- return getenv(awsDefaultRegionEnvVar), nil
- }
-
- if sp.RegionURL == "" {
- return "", errors.New("credentials: unable to determine AWS region")
- }
-
- req, err := http.NewRequestWithContext(ctx, "GET", sp.RegionURL, nil)
- if err != nil {
- return "", err
- }
-
- for name, value := range headers {
- req.Header.Add(name, value)
- }
-
- resp, err := sp.Client.Do(req)
- if err != nil {
- return "", err
- }
- defer resp.Body.Close()
-
- respBody, err := internal.ReadAll(resp.Body)
- if err != nil {
- return "", err
- }
-
- if resp.StatusCode != http.StatusOK {
- return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", respBody)
- }
-
- // This endpoint will return the region in format: us-east-2b.
- // Only the us-east-2 part should be used.
- bodyLen := len(respBody)
- if bodyLen == 0 {
- return "", nil
- }
- return string(respBody[:bodyLen-1]), nil
-}
-
-func (sp *awsSubjectProvider) getSecurityCredentials(ctx context.Context, headers map[string]string) (result *AwsSecurityCredentials, err error) {
- if sp.securityCredentialsProvider != nil {
- return sp.securityCredentialsProvider.AwsSecurityCredentials(ctx, sp.reqOpts)
- }
- if canRetrieveSecurityCredentialFromEnvironment() {
- return &AwsSecurityCredentials{
- AccessKeyID: getenv(awsAccessKeyIDEnvVar),
- SecretAccessKey: getenv(awsSecretAccessKeyEnvVar),
- SessionToken: getenv(awsSessionTokenEnvVar),
- }, nil
- }
-
- roleName, err := sp.getMetadataRoleName(ctx, headers)
- if err != nil {
- return
- }
- credentials, err := sp.getMetadataSecurityCredentials(ctx, roleName, headers)
- if err != nil {
- return
- }
-
- if credentials.AccessKeyID == "" {
- return result, errors.New("credentials: missing AccessKeyId credential")
- }
- if credentials.SecretAccessKey == "" {
- return result, errors.New("credentials: missing SecretAccessKey credential")
- }
-
- return credentials, nil
-}
-
-func (sp *awsSubjectProvider) getMetadataSecurityCredentials(ctx context.Context, roleName string, headers map[string]string) (*AwsSecurityCredentials, error) {
- var result *AwsSecurityCredentials
-
- req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/%s", sp.CredVerificationURL, roleName), nil)
- if err != nil {
- return result, err
- }
- for name, value := range headers {
- req.Header.Add(name, value)
- }
-
- resp, err := sp.Client.Do(req)
- if err != nil {
- return result, err
- }
- defer resp.Body.Close()
-
- respBody, err := internal.ReadAll(resp.Body)
- if err != nil {
- return result, err
- }
- if resp.StatusCode != http.StatusOK {
- return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", respBody)
- }
- err = json.Unmarshal(respBody, &result)
- return result, err
-}
-
-func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers map[string]string) (string, error) {
- if sp.CredVerificationURL == "" {
- return "", errors.New("credentials: unable to determine the AWS metadata server security credentials endpoint")
- }
- req, err := http.NewRequestWithContext(ctx, "GET", sp.CredVerificationURL, nil)
- if err != nil {
- return "", err
- }
- for name, value := range headers {
- req.Header.Add(name, value)
- }
-
- resp, err := sp.Client.Do(req)
- if err != nil {
- return "", err
- }
- defer resp.Body.Close()
-
- respBody, err := internal.ReadAll(resp.Body)
- if err != nil {
- return "", err
- }
- if resp.StatusCode != http.StatusOK {
- return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", respBody)
- }
- return string(respBody), nil
-}
-
-// awsRequestSigner is a utility class to sign http requests using a AWS V4 signature.
-type awsRequestSigner struct {
- RegionName string
- AwsSecurityCredentials *AwsSecurityCredentials
-}
-
-// signRequest adds the appropriate headers to an http.Request
-// or returns an error if something prevented this.
-func (rs *awsRequestSigner) signRequest(req *http.Request) error {
- // req is assumed non-nil
- signedRequest := cloneRequest(req)
- timestamp := Now()
- signedRequest.Header.Set("host", requestHost(req))
- if rs.AwsSecurityCredentials.SessionToken != "" {
- signedRequest.Header.Set(awsSecurityTokenHeader, rs.AwsSecurityCredentials.SessionToken)
- }
- if signedRequest.Header.Get("date") == "" {
- signedRequest.Header.Set(awsDateHeader, timestamp.Format(awsTimeFormatLong))
- }
- authorizationCode, err := rs.generateAuthentication(signedRequest, timestamp)
- if err != nil {
- return err
- }
- signedRequest.Header.Set("Authorization", authorizationCode)
- req.Header = signedRequest.Header
- return nil
-}
-
-func (rs *awsRequestSigner) generateAuthentication(req *http.Request, timestamp time.Time) (string, error) {
- canonicalHeaderColumns, canonicalHeaderData := canonicalHeaders(req)
- dateStamp := timestamp.Format(awsTimeFormatShort)
- serviceName := ""
-
- if splitHost := strings.Split(requestHost(req), "."); len(splitHost) > 0 {
- serviceName = splitHost[0]
- }
- credentialScope := strings.Join([]string{dateStamp, rs.RegionName, serviceName, awsRequestType}, "/")
- requestString, err := canonicalRequest(req, canonicalHeaderColumns, canonicalHeaderData)
- if err != nil {
- return "", err
- }
- requestHash, err := getSha256([]byte(requestString))
- if err != nil {
- return "", err
- }
-
- stringToSign := strings.Join([]string{awsAlgorithm, timestamp.Format(awsTimeFormatLong), credentialScope, requestHash}, "\n")
- signingKey := []byte("AWS4" + rs.AwsSecurityCredentials.SecretAccessKey)
- for _, signingInput := range []string{
- dateStamp, rs.RegionName, serviceName, awsRequestType, stringToSign,
- } {
- signingKey, err = getHmacSha256(signingKey, []byte(signingInput))
- if err != nil {
- return "", err
- }
- }
-
- return fmt.Sprintf("%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", awsAlgorithm, rs.AwsSecurityCredentials.AccessKeyID, credentialScope, canonicalHeaderColumns, hex.EncodeToString(signingKey)), nil
-}
-
-func getSha256(input []byte) (string, error) {
- hash := sha256.New()
- if _, err := hash.Write(input); err != nil {
- return "", err
- }
- return hex.EncodeToString(hash.Sum(nil)), nil
-}
-
-func getHmacSha256(key, input []byte) ([]byte, error) {
- hash := hmac.New(sha256.New, key)
- if _, err := hash.Write(input); err != nil {
- return nil, err
- }
- return hash.Sum(nil), nil
-}
-
-func cloneRequest(r *http.Request) *http.Request {
- r2 := new(http.Request)
- *r2 = *r
- if r.Header != nil {
- r2.Header = make(http.Header, len(r.Header))
-
- // Find total number of values.
- headerCount := 0
- for _, headerValues := range r.Header {
- headerCount += len(headerValues)
- }
- copiedHeaders := make([]string, headerCount) // shared backing array for headers' values
-
- for headerKey, headerValues := range r.Header {
- headerCount = copy(copiedHeaders, headerValues)
- r2.Header[headerKey] = copiedHeaders[:headerCount:headerCount]
- copiedHeaders = copiedHeaders[headerCount:]
- }
- }
- return r2
-}
-
-func canonicalPath(req *http.Request) string {
- result := req.URL.EscapedPath()
- if result == "" {
- return "/"
- }
- return path.Clean(result)
-}
-
-func canonicalQuery(req *http.Request) string {
- queryValues := req.URL.Query()
- for queryKey := range queryValues {
- sort.Strings(queryValues[queryKey])
- }
- return queryValues.Encode()
-}
-
-func canonicalHeaders(req *http.Request) (string, string) {
- // Header keys need to be sorted alphabetically.
- var headers []string
- lowerCaseHeaders := make(http.Header)
- for k, v := range req.Header {
- k := strings.ToLower(k)
- if _, ok := lowerCaseHeaders[k]; ok {
- // include additional values
- lowerCaseHeaders[k] = append(lowerCaseHeaders[k], v...)
- } else {
- headers = append(headers, k)
- lowerCaseHeaders[k] = v
- }
- }
- sort.Strings(headers)
-
- var fullHeaders bytes.Buffer
- for _, header := range headers {
- headerValue := strings.Join(lowerCaseHeaders[header], ",")
- fullHeaders.WriteString(header)
- fullHeaders.WriteRune(':')
- fullHeaders.WriteString(headerValue)
- fullHeaders.WriteRune('\n')
- }
-
- return strings.Join(headers, ";"), fullHeaders.String()
-}
-
-func requestDataHash(req *http.Request) (string, error) {
- var requestData []byte
- if req.Body != nil {
- requestBody, err := req.GetBody()
- if err != nil {
- return "", err
- }
- defer requestBody.Close()
-
- requestData, err = internal.ReadAll(requestBody)
- if err != nil {
- return "", err
- }
- }
-
- return getSha256(requestData)
-}
-
-func requestHost(req *http.Request) string {
- if req.Host != "" {
- return req.Host
- }
- return req.URL.Host
-}
-
-func canonicalRequest(req *http.Request, canonicalHeaderColumns, canonicalHeaderData string) (string, error) {
- dataHash, err := requestDataHash(req)
- if err != nil {
- return "", err
- }
- return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s", req.Method, canonicalPath(req), canonicalQuery(req), canonicalHeaderData, canonicalHeaderColumns, dataHash), nil
-}
-
-type awsRequestHeader struct {
- Key string `json:"key"`
- Value string `json:"value"`
-}
-
-type awsRequest struct {
- URL string `json:"url"`
- Method string `json:"method"`
- Headers []awsRequestHeader `json:"headers"`
-}
-
-// The AWS region can be provided through AWS_REGION or AWS_DEFAULT_REGION. Only one is
-// required.
-func canRetrieveRegionFromEnvironment() bool {
- return getenv(awsRegionEnvVar) != "" || getenv(awsDefaultRegionEnvVar) != ""
-}
-
-// Check if both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are available.
-func canRetrieveSecurityCredentialFromEnvironment() bool {
- return getenv(awsAccessKeyIDEnvVar) != "" && getenv(awsSecretAccessKeyEnvVar) != ""
-}
-
-func (sp *awsSubjectProvider) shouldUseMetadataServer() bool {
- return sp.securityCredentialsProvider == nil && (!canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment())
-}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go
deleted file mode 100644
index d5765c47..00000000
--- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/executable_provider.go
+++ /dev/null
@@ -1,284 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package externalaccount
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "os"
- "os/exec"
- "regexp"
- "strings"
- "time"
-
- "cloud.google.com/go/auth/internal"
-)
-
-const (
- executableSupportedMaxVersion = 1
- executableDefaultTimeout = 30 * time.Second
- executableSource = "response"
- executableProviderType = "executable"
- outputFileSource = "output file"
-
- allowExecutablesEnvVar = "GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES"
-
- jwtTokenType = "urn:ietf:params:oauth:token-type:jwt"
- idTokenType = "urn:ietf:params:oauth:token-type:id_token"
- saml2TokenType = "urn:ietf:params:oauth:token-type:saml2"
-)
-
-var (
- serviceAccountImpersonationRE = regexp.MustCompile(`https://iamcredentials..+/v1/projects/-/serviceAccounts/(.*@.*):generateAccessToken`)
-)
-
-type nonCacheableError struct {
- message string
-}
-
-func (nce nonCacheableError) Error() string {
- return nce.message
-}
-
-// environment is a contract for testing
-type environment interface {
- existingEnv() []string
- getenv(string) string
- run(ctx context.Context, command string, env []string) ([]byte, error)
- now() time.Time
-}
-
-type runtimeEnvironment struct{}
-
-func (r runtimeEnvironment) existingEnv() []string {
- return os.Environ()
-}
-func (r runtimeEnvironment) getenv(key string) string {
- return os.Getenv(key)
-}
-func (r runtimeEnvironment) now() time.Time {
- return time.Now().UTC()
-}
-
-func (r runtimeEnvironment) run(ctx context.Context, command string, env []string) ([]byte, error) {
- splitCommand := strings.Fields(command)
- cmd := exec.CommandContext(ctx, splitCommand[0], splitCommand[1:]...)
- cmd.Env = env
-
- var stdout, stderr bytes.Buffer
- cmd.Stdout = &stdout
- cmd.Stderr = &stderr
-
- if err := cmd.Run(); err != nil {
- if ctx.Err() == context.DeadlineExceeded {
- return nil, context.DeadlineExceeded
- }
- if exitError, ok := err.(*exec.ExitError); ok {
- return nil, exitCodeError(exitError)
- }
- return nil, executableError(err)
- }
-
- bytesStdout := bytes.TrimSpace(stdout.Bytes())
- if len(bytesStdout) > 0 {
- return bytesStdout, nil
- }
- return bytes.TrimSpace(stderr.Bytes()), nil
-}
-
-type executableSubjectProvider struct {
- Command string
- Timeout time.Duration
- OutputFile string
- client *http.Client
- opts *Options
- env environment
-}
-
-type executableResponse struct {
- Version int `json:"version,omitempty"`
- Success *bool `json:"success,omitempty"`
- TokenType string `json:"token_type,omitempty"`
- ExpirationTime int64 `json:"expiration_time,omitempty"`
- IDToken string `json:"id_token,omitempty"`
- SamlResponse string `json:"saml_response,omitempty"`
- Code string `json:"code,omitempty"`
- Message string `json:"message,omitempty"`
-}
-
-func (sp *executableSubjectProvider) parseSubjectTokenFromSource(response []byte, source string, now int64) (string, error) {
- var result executableResponse
- if err := json.Unmarshal(response, &result); err != nil {
- return "", jsonParsingError(source, string(response))
- }
- // Validate
- if result.Version == 0 {
- return "", missingFieldError(source, "version")
- }
- if result.Success == nil {
- return "", missingFieldError(source, "success")
- }
- if !*result.Success {
- if result.Code == "" || result.Message == "" {
- return "", malformedFailureError()
- }
- return "", userDefinedError(result.Code, result.Message)
- }
- if result.Version > executableSupportedMaxVersion || result.Version < 0 {
- return "", unsupportedVersionError(source, result.Version)
- }
- if result.ExpirationTime == 0 && sp.OutputFile != "" {
- return "", missingFieldError(source, "expiration_time")
- }
- if result.TokenType == "" {
- return "", missingFieldError(source, "token_type")
- }
- if result.ExpirationTime != 0 && result.ExpirationTime < now {
- return "", tokenExpiredError()
- }
-
- switch result.TokenType {
- case jwtTokenType, idTokenType:
- if result.IDToken == "" {
- return "", missingFieldError(source, "id_token")
- }
- return result.IDToken, nil
- case saml2TokenType:
- if result.SamlResponse == "" {
- return "", missingFieldError(source, "saml_response")
- }
- return result.SamlResponse, nil
- default:
- return "", tokenTypeError(source)
- }
-}
-
-func (sp *executableSubjectProvider) subjectToken(ctx context.Context) (string, error) {
- if token, err := sp.getTokenFromOutputFile(); token != "" || err != nil {
- return token, err
- }
- return sp.getTokenFromExecutableCommand(ctx)
-}
-
-func (sp *executableSubjectProvider) providerType() string {
- return executableProviderType
-}
-
-func (sp *executableSubjectProvider) getTokenFromOutputFile() (token string, err error) {
- if sp.OutputFile == "" {
- // This ExecutableCredentialSource doesn't use an OutputFile.
- return "", nil
- }
-
- file, err := os.Open(sp.OutputFile)
- if err != nil {
- // No OutputFile found. Hasn't been created yet, so skip it.
- return "", nil
- }
- defer file.Close()
-
- data, err := internal.ReadAll(file)
- if err != nil || len(data) == 0 {
- // Cachefile exists, but no data found. Get new credential.
- return "", nil
- }
-
- token, err = sp.parseSubjectTokenFromSource(data, outputFileSource, sp.env.now().Unix())
- if err != nil {
- if _, ok := err.(nonCacheableError); ok {
- // If the cached token is expired we need a new token,
- // and if the cache contains a failure, we need to try again.
- return "", nil
- }
-
- // There was an error in the cached token, and the developer should be aware of it.
- return "", err
- }
- // Token parsing succeeded. Use found token.
- return token, nil
-}
-
-func (sp *executableSubjectProvider) executableEnvironment() []string {
- result := sp.env.existingEnv()
- result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_AUDIENCE=%v", sp.opts.Audience))
- result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_TOKEN_TYPE=%v", sp.opts.SubjectTokenType))
- result = append(result, "GOOGLE_EXTERNAL_ACCOUNT_INTERACTIVE=0")
- if sp.opts.ServiceAccountImpersonationURL != "" {
- matches := serviceAccountImpersonationRE.FindStringSubmatch(sp.opts.ServiceAccountImpersonationURL)
- if matches != nil {
- result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_IMPERSONATED_EMAIL=%v", matches[1]))
- }
- }
- if sp.OutputFile != "" {
- result = append(result, fmt.Sprintf("GOOGLE_EXTERNAL_ACCOUNT_OUTPUT_FILE=%v", sp.OutputFile))
- }
- return result
-}
-
-func (sp *executableSubjectProvider) getTokenFromExecutableCommand(ctx context.Context) (string, error) {
- // For security reasons, we need our consumers to set this environment variable to allow executables to be run.
- if sp.env.getenv(allowExecutablesEnvVar) != "1" {
- return "", errors.New("credentials: executables need to be explicitly allowed (set GOOGLE_EXTERNAL_ACCOUNT_ALLOW_EXECUTABLES to '1') to run")
- }
-
- ctx, cancel := context.WithDeadline(ctx, sp.env.now().Add(sp.Timeout))
- defer cancel()
-
- output, err := sp.env.run(ctx, sp.Command, sp.executableEnvironment())
- if err != nil {
- return "", err
- }
- return sp.parseSubjectTokenFromSource(output, executableSource, sp.env.now().Unix())
-}
-
-func missingFieldError(source, field string) error {
- return fmt.Errorf("credentials: %q missing %q field", source, field)
-}
-
-func jsonParsingError(source, data string) error {
- return fmt.Errorf("credentials: unable to parse %q: %v", source, data)
-}
-
-func malformedFailureError() error {
- return nonCacheableError{"credentials: response must include `error` and `message` fields when unsuccessful"}
-}
-
-func userDefinedError(code, message string) error {
- return nonCacheableError{fmt.Sprintf("credentials: response contains unsuccessful response: (%v) %v", code, message)}
-}
-
-func unsupportedVersionError(source string, version int) error {
- return fmt.Errorf("credentials: %v contains unsupported version: %v", source, version)
-}
-
-func tokenExpiredError() error {
- return nonCacheableError{"credentials: the token returned by the executable is expired"}
-}
-
-func tokenTypeError(source string) error {
- return fmt.Errorf("credentials: %v contains unsupported token type", source)
-}
-
-func exitCodeError(err *exec.ExitError) error {
- return fmt.Errorf("credentials: executable command failed with exit code %v: %w", err.ExitCode(), err)
-}
-
-func executableError(err error) error {
- return fmt.Errorf("credentials: executable command failed: %w", err)
-}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go
deleted file mode 100644
index b19c6ede..00000000
--- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go
+++ /dev/null
@@ -1,367 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package externalaccount
-
-import (
- "context"
- "errors"
- "fmt"
- "net/http"
- "regexp"
- "strconv"
- "strings"
- "time"
-
- "cloud.google.com/go/auth"
- "cloud.google.com/go/auth/credentials/internal/impersonate"
- "cloud.google.com/go/auth/credentials/internal/stsexchange"
- "cloud.google.com/go/auth/internal/credsfile"
-)
-
-const (
- timeoutMinimum = 5 * time.Second
- timeoutMaximum = 120 * time.Second
-
- universeDomainPlaceholder = "UNIVERSE_DOMAIN"
- defaultTokenURL = "https://sts.UNIVERSE_DOMAIN/v1/token"
- defaultUniverseDomain = "googleapis.com"
-)
-
-var (
- // Now aliases time.Now for testing
- Now = func() time.Time {
- return time.Now().UTC()
- }
- validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`)
-)
-
-// Options stores the configuration for fetching tokens with external credentials.
-type Options struct {
- // Audience is the Secure Token Service (STS) audience which contains the resource name for the workload
- // identity pool or the workforce pool and the provider identifier in that pool.
- Audience string
- // SubjectTokenType is the STS token type based on the Oauth2.0 token exchange spec
- // e.g. `urn:ietf:params:oauth:token-type:jwt`.
- SubjectTokenType string
- // TokenURL is the STS token exchange endpoint.
- TokenURL string
- // TokenInfoURL is the token_info endpoint used to retrieve the account related information (
- // user attributes like account identifier, eg. email, username, uid, etc). This is
- // needed for gCloud session account identification.
- TokenInfoURL string
- // ServiceAccountImpersonationURL is the URL for the service account impersonation request. This is only
- // required for workload identity pools when APIs to be accessed have not integrated with UberMint.
- ServiceAccountImpersonationURL string
- // ServiceAccountImpersonationLifetimeSeconds is the number of seconds the service account impersonation
- // token will be valid for.
- ServiceAccountImpersonationLifetimeSeconds int
- // ClientSecret is currently only required if token_info endpoint also
- // needs to be called with the generated GCP access token. When provided, STS will be
- // called with additional basic authentication using client_id as username and client_secret as password.
- ClientSecret string
- // ClientID is only required in conjunction with ClientSecret, as described above.
- ClientID string
- // CredentialSource contains the necessary information to retrieve the token itself, as well
- // as some environmental information.
- CredentialSource *credsfile.CredentialSource
- // QuotaProjectID is injected by gCloud. If the value is non-empty, the Auth libraries
- // will set the x-goog-user-project which overrides the project associated with the credentials.
- QuotaProjectID string
- // Scopes contains the desired scopes for the returned access token.
- Scopes []string
- // WorkforcePoolUserProject should be set when it is a workforce pool and
- // not a workload identity pool. The underlying principal must still have
- // serviceusage.services.use IAM permission to use the project for
- // billing/quota. Optional.
- WorkforcePoolUserProject string
- // UniverseDomain is the default service domain for a given Cloud universe.
- // This value will be used in the default STS token URL. The default value
- // is "googleapis.com". It will not be used if TokenURL is set. Optional.
- UniverseDomain string
- // SubjectTokenProvider is an optional token provider for OIDC/SAML
- // credentials. One of SubjectTokenProvider, AWSSecurityCredentialProvider
- // or CredentialSource must be provided. Optional.
- SubjectTokenProvider SubjectTokenProvider
- // AwsSecurityCredentialsProvider is an AWS Security Credential provider
- // for AWS credentials. One of SubjectTokenProvider,
- // AWSSecurityCredentialProvider or CredentialSource must be provided. Optional.
- AwsSecurityCredentialsProvider AwsSecurityCredentialsProvider
- // Client for token request.
- Client *http.Client
-}
-
-// SubjectTokenProvider can be used to supply a subject token to exchange for a
-// GCP access token.
-type SubjectTokenProvider interface {
- // SubjectToken should return a valid subject token or an error.
- // The external account token provider does not cache the returned subject
- // token, so caching logic should be implemented in the provider to prevent
- // multiple requests for the same subject token.
- SubjectToken(ctx context.Context, opts *RequestOptions) (string, error)
-}
-
-// RequestOptions contains information about the requested subject token or AWS
-// security credentials from the Google external account credential.
-type RequestOptions struct {
- // Audience is the requested audience for the external account credential.
- Audience string
- // Subject token type is the requested subject token type for the external
- // account credential. Expected values include:
- // “urn:ietf:params:oauth:token-type:jwt”
- // “urn:ietf:params:oauth:token-type:id-token”
- // “urn:ietf:params:oauth:token-type:saml2”
- // “urn:ietf:params:aws:token-type:aws4_request”
- SubjectTokenType string
-}
-
-// AwsSecurityCredentialsProvider can be used to supply AwsSecurityCredentials
-// and an AWS Region to exchange for a GCP access token.
-type AwsSecurityCredentialsProvider interface {
- // AwsRegion should return the AWS region or an error.
- AwsRegion(ctx context.Context, opts *RequestOptions) (string, error)
- // GetAwsSecurityCredentials should return a valid set of
- // AwsSecurityCredentials or an error. The external account token provider
- // does not cache the returned security credentials, so caching logic should
- // be implemented in the provider to prevent multiple requests for the
- // same security credentials.
- AwsSecurityCredentials(ctx context.Context, opts *RequestOptions) (*AwsSecurityCredentials, error)
-}
-
-// AwsSecurityCredentials models AWS security credentials.
-type AwsSecurityCredentials struct {
- // AccessKeyId is the AWS Access Key ID - Required.
- AccessKeyID string `json:"AccessKeyID"`
- // SecretAccessKey is the AWS Secret Access Key - Required.
- SecretAccessKey string `json:"SecretAccessKey"`
- // SessionToken is the AWS Session token. This should be provided for
- // temporary AWS security credentials - Optional.
- SessionToken string `json:"Token"`
-}
-
-func (o *Options) validate() error {
- if o.Audience == "" {
- return fmt.Errorf("externalaccount: Audience must be set")
- }
- if o.SubjectTokenType == "" {
- return fmt.Errorf("externalaccount: Subject token type must be set")
- }
- if o.WorkforcePoolUserProject != "" {
- if valid := validWorkforceAudiencePattern.MatchString(o.Audience); !valid {
- return fmt.Errorf("externalaccount: workforce_pool_user_project should not be set for non-workforce pool credentials")
- }
- }
- count := 0
- if o.CredentialSource != nil {
- count++
- }
- if o.SubjectTokenProvider != nil {
- count++
- }
- if o.AwsSecurityCredentialsProvider != nil {
- count++
- }
- if count == 0 {
- return fmt.Errorf("externalaccount: one of CredentialSource, SubjectTokenProvider, or AwsSecurityCredentialsProvider must be set")
- }
- if count > 1 {
- return fmt.Errorf("externalaccount: only one of CredentialSource, SubjectTokenProvider, or AwsSecurityCredentialsProvider must be set")
- }
- return nil
-}
-
-// resolveTokenURL sets the default STS token endpoint with the configured
-// universe domain.
-func (o *Options) resolveTokenURL() {
- if o.TokenURL != "" {
- return
- } else if o.UniverseDomain != "" {
- o.TokenURL = strings.Replace(defaultTokenURL, universeDomainPlaceholder, o.UniverseDomain, 1)
- } else {
- o.TokenURL = strings.Replace(defaultTokenURL, universeDomainPlaceholder, defaultUniverseDomain, 1)
- }
-}
-
-// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider]
-// configured with the provided options.
-func NewTokenProvider(opts *Options) (auth.TokenProvider, error) {
- if err := opts.validate(); err != nil {
- return nil, err
- }
- opts.resolveTokenURL()
- stp, err := newSubjectTokenProvider(opts)
- if err != nil {
- return nil, err
- }
- tp := &tokenProvider{
- client: opts.Client,
- opts: opts,
- stp: stp,
- }
- if opts.ServiceAccountImpersonationURL == "" {
- return auth.NewCachedTokenProvider(tp, nil), nil
- }
-
- scopes := make([]string, len(opts.Scopes))
- copy(scopes, opts.Scopes)
- // needed for impersonation
- tp.opts.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"}
- imp, err := impersonate.NewTokenProvider(&impersonate.Options{
- Client: opts.Client,
- URL: opts.ServiceAccountImpersonationURL,
- Scopes: scopes,
- Tp: auth.NewCachedTokenProvider(tp, nil),
- TokenLifetimeSeconds: opts.ServiceAccountImpersonationLifetimeSeconds,
- })
- if err != nil {
- return nil, err
- }
- return auth.NewCachedTokenProvider(imp, nil), nil
-}
-
-type subjectTokenProvider interface {
- subjectToken(ctx context.Context) (string, error)
- providerType() string
-}
-
-// tokenProvider is the provider that handles external credentials. It is used to retrieve Tokens.
-type tokenProvider struct {
- client *http.Client
- opts *Options
- stp subjectTokenProvider
-}
-
-func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) {
- subjectToken, err := tp.stp.subjectToken(ctx)
- if err != nil {
- return nil, err
- }
-
- stsRequest := &stsexchange.TokenRequest{
- GrantType: stsexchange.GrantType,
- Audience: tp.opts.Audience,
- Scope: tp.opts.Scopes,
- RequestedTokenType: stsexchange.TokenType,
- SubjectToken: subjectToken,
- SubjectTokenType: tp.opts.SubjectTokenType,
- }
- header := make(http.Header)
- header.Set("Content-Type", "application/x-www-form-urlencoded")
- header.Add("x-goog-api-client", getGoogHeaderValue(tp.opts, tp.stp))
- clientAuth := stsexchange.ClientAuthentication{
- AuthStyle: auth.StyleInHeader,
- ClientID: tp.opts.ClientID,
- ClientSecret: tp.opts.ClientSecret,
- }
- var options map[string]interface{}
- // Do not pass workforce_pool_user_project when client authentication is used.
- // The client ID is sufficient for determining the user project.
- if tp.opts.WorkforcePoolUserProject != "" && tp.opts.ClientID == "" {
- options = map[string]interface{}{
- "userProject": tp.opts.WorkforcePoolUserProject,
- }
- }
- stsResp, err := stsexchange.ExchangeToken(ctx, &stsexchange.Options{
- Client: tp.client,
- Endpoint: tp.opts.TokenURL,
- Request: stsRequest,
- Authentication: clientAuth,
- Headers: header,
- ExtraOpts: options,
- })
- if err != nil {
- return nil, err
- }
-
- tok := &auth.Token{
- Value: stsResp.AccessToken,
- Type: stsResp.TokenType,
- }
- // The RFC8693 doesn't define the explicit 0 of "expires_in" field behavior.
- if stsResp.ExpiresIn <= 0 {
- return nil, fmt.Errorf("credentials: got invalid expiry from security token service")
- }
- tok.Expiry = Now().Add(time.Duration(stsResp.ExpiresIn) * time.Second)
- return tok, nil
-}
-
-// newSubjectTokenProvider determines the type of credsfile.CredentialSource needed to create a
-// subjectTokenProvider
-func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) {
- reqOpts := &RequestOptions{Audience: o.Audience, SubjectTokenType: o.SubjectTokenType}
- if o.AwsSecurityCredentialsProvider != nil {
- return &awsSubjectProvider{
- securityCredentialsProvider: o.AwsSecurityCredentialsProvider,
- TargetResource: o.Audience,
- reqOpts: reqOpts,
- }, nil
- } else if o.SubjectTokenProvider != nil {
- return &programmaticProvider{stp: o.SubjectTokenProvider, opts: reqOpts}, nil
- } else if len(o.CredentialSource.EnvironmentID) > 3 && o.CredentialSource.EnvironmentID[:3] == "aws" {
- if awsVersion, err := strconv.Atoi(o.CredentialSource.EnvironmentID[3:]); err == nil {
- if awsVersion != 1 {
- return nil, fmt.Errorf("credentials: aws version '%d' is not supported in the current build", awsVersion)
- }
-
- awsProvider := &awsSubjectProvider{
- EnvironmentID: o.CredentialSource.EnvironmentID,
- RegionURL: o.CredentialSource.RegionURL,
- RegionalCredVerificationURL: o.CredentialSource.RegionalCredVerificationURL,
- CredVerificationURL: o.CredentialSource.URL,
- TargetResource: o.Audience,
- Client: o.Client,
- }
- if o.CredentialSource.IMDSv2SessionTokenURL != "" {
- awsProvider.IMDSv2SessionTokenURL = o.CredentialSource.IMDSv2SessionTokenURL
- }
-
- return awsProvider, nil
- }
- } else if o.CredentialSource.File != "" {
- return &fileSubjectProvider{File: o.CredentialSource.File, Format: o.CredentialSource.Format}, nil
- } else if o.CredentialSource.URL != "" {
- return &urlSubjectProvider{URL: o.CredentialSource.URL, Headers: o.CredentialSource.Headers, Format: o.CredentialSource.Format, Client: o.Client}, nil
- } else if o.CredentialSource.Executable != nil {
- ec := o.CredentialSource.Executable
- if ec.Command == "" {
- return nil, errors.New("credentials: missing `command` field — executable command must be provided")
- }
-
- execProvider := &executableSubjectProvider{}
- execProvider.Command = ec.Command
- if ec.TimeoutMillis == 0 {
- execProvider.Timeout = executableDefaultTimeout
- } else {
- execProvider.Timeout = time.Duration(ec.TimeoutMillis) * time.Millisecond
- if execProvider.Timeout < timeoutMinimum || execProvider.Timeout > timeoutMaximum {
- return nil, fmt.Errorf("credentials: invalid `timeout_millis` field — executable timeout must be between %v and %v seconds", timeoutMinimum.Seconds(), timeoutMaximum.Seconds())
- }
- }
- execProvider.OutputFile = ec.OutputFile
- execProvider.client = o.Client
- execProvider.opts = o
- execProvider.env = runtimeEnvironment{}
- return execProvider, nil
- }
- return nil, errors.New("credentials: unable to parse credential source")
-}
-
-func getGoogHeaderValue(conf *Options, p subjectTokenProvider) string {
- return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t",
- goVersion(),
- "unknown",
- p.providerType(),
- conf.ServiceAccountImpersonationURL != "",
- conf.ServiceAccountImpersonationLifetimeSeconds != 0)
-}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go
deleted file mode 100644
index 8186939f..00000000
--- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/file_provider.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package externalaccount
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "os"
-
- "cloud.google.com/go/auth/internal"
- "cloud.google.com/go/auth/internal/credsfile"
-)
-
-const (
- fileProviderType = "file"
-)
-
-type fileSubjectProvider struct {
- File string
- Format *credsfile.Format
-}
-
-func (sp *fileSubjectProvider) subjectToken(context.Context) (string, error) {
- tokenFile, err := os.Open(sp.File)
- if err != nil {
- return "", fmt.Errorf("credentials: failed to open credential file %q: %w", sp.File, err)
- }
- defer tokenFile.Close()
- tokenBytes, err := internal.ReadAll(tokenFile)
- if err != nil {
- return "", fmt.Errorf("credentials: failed to read credential file: %w", err)
- }
- tokenBytes = bytes.TrimSpace(tokenBytes)
-
- if sp.Format == nil {
- return string(tokenBytes), nil
- }
- switch sp.Format.Type {
- case fileTypeJSON:
- jsonData := make(map[string]interface{})
- err = json.Unmarshal(tokenBytes, &jsonData)
- if err != nil {
- return "", fmt.Errorf("credentials: failed to unmarshal subject token file: %w", err)
- }
- val, ok := jsonData[sp.Format.SubjectTokenFieldName]
- if !ok {
- return "", errors.New("credentials: provided subject_token_field_name not found in credentials")
- }
- token, ok := val.(string)
- if !ok {
- return "", errors.New("credentials: improperly formatted subject token")
- }
- return token, nil
- case fileTypeText:
- return string(tokenBytes), nil
- default:
- return "", errors.New("credentials: invalid credential_source file format type: " + sp.Format.Type)
- }
-}
-
-func (sp *fileSubjectProvider) providerType() string {
- return fileProviderType
-}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go
deleted file mode 100644
index 8e4b4379..00000000
--- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/info.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package externalaccount
-
-import (
- "runtime"
- "strings"
- "unicode"
-)
-
-var (
- // version is a package internal global variable for testing purposes.
- version = runtime.Version
-)
-
-// versionUnknown is only used when the runtime version cannot be determined.
-const versionUnknown = "UNKNOWN"
-
-// goVersion returns a Go runtime version derived from the runtime environment
-// that is modified to be suitable for reporting in a header, meaning it has no
-// whitespace. If it is unable to determine the Go runtime version, it returns
-// versionUnknown.
-func goVersion() string {
- const develPrefix = "devel +"
-
- s := version()
- if strings.HasPrefix(s, develPrefix) {
- s = s[len(develPrefix):]
- if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
- s = s[:p]
- }
- return s
- } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
- s = s[:p]
- }
-
- notSemverRune := func(r rune) bool {
- return !strings.ContainsRune("0123456789.", r)
- }
-
- if strings.HasPrefix(s, "go1") {
- s = s[2:]
- var prerelease string
- if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
- s, prerelease = s[:p], s[p:]
- }
- if strings.HasSuffix(s, ".") {
- s += "0"
- } else if strings.Count(s, ".") < 2 {
- s += ".0"
- }
- if prerelease != "" {
- // Some release candidates already have a dash in them.
- if !strings.HasPrefix(prerelease, "-") {
- prerelease = "-" + prerelease
- }
- s += prerelease
- }
- return s
- }
- return versionUnknown
-}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go
deleted file mode 100644
index be3c8735..00000000
--- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/programmatic_provider.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package externalaccount
-
-import "context"
-
-type programmaticProvider struct {
- opts *RequestOptions
- stp SubjectTokenProvider
-}
-
-func (pp *programmaticProvider) providerType() string {
- return programmaticProviderType
-}
-
-func (pp *programmaticProvider) subjectToken(ctx context.Context) (string, error) {
- return pp.stp.SubjectToken(ctx, pp.opts)
-}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go
deleted file mode 100644
index 22b8af1c..00000000
--- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package externalaccount
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
-
- "cloud.google.com/go/auth/internal"
- "cloud.google.com/go/auth/internal/credsfile"
-)
-
-const (
- fileTypeText = "text"
- fileTypeJSON = "json"
- urlProviderType = "url"
- programmaticProviderType = "programmatic"
-)
-
-type urlSubjectProvider struct {
- URL string
- Headers map[string]string
- Format *credsfile.Format
- Client *http.Client
-}
-
-func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) {
- req, err := http.NewRequestWithContext(ctx, "GET", sp.URL, nil)
- if err != nil {
- return "", fmt.Errorf("credentials: HTTP request for URL-sourced credential failed: %w", err)
- }
-
- for key, val := range sp.Headers {
- req.Header.Add(key, val)
- }
- resp, err := sp.Client.Do(req)
- if err != nil {
- return "", fmt.Errorf("credentials: invalid response when retrieving subject token: %w", err)
- }
- defer resp.Body.Close()
-
- respBody, err := internal.ReadAll(resp.Body)
- if err != nil {
- return "", fmt.Errorf("credentials: invalid body in subject token URL query: %w", err)
- }
- if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
- return "", fmt.Errorf("credentials: status code %d: %s", c, respBody)
- }
-
- if sp.Format == nil {
- return string(respBody), nil
- }
- switch sp.Format.Type {
- case "json":
- jsonData := make(map[string]interface{})
- err = json.Unmarshal(respBody, &jsonData)
- if err != nil {
- return "", fmt.Errorf("credentials: failed to unmarshal subject token file: %w", err)
- }
- val, ok := jsonData[sp.Format.SubjectTokenFieldName]
- if !ok {
- return "", errors.New("credentials: provided subject_token_field_name not found in credentials")
- }
- token, ok := val.(string)
- if !ok {
- return "", errors.New("credentials: improperly formatted subject token")
- }
- return token, nil
- case fileTypeText:
- return string(respBody), nil
- default:
- return "", errors.New("credentials: invalid credential_source file format type: " + sp.Format.Type)
- }
-}
-
-func (sp *urlSubjectProvider) providerType() string {
- return urlProviderType
-}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go
deleted file mode 100644
index 0d788547..00000000
--- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package externalaccountuser
-
-import (
- "context"
- "errors"
- "net/http"
- "time"
-
- "cloud.google.com/go/auth"
- "cloud.google.com/go/auth/credentials/internal/stsexchange"
- "cloud.google.com/go/auth/internal"
-)
-
-// Options stores the configuration for fetching tokens with external authorized
-// user credentials.
-type Options struct {
- // Audience is the Secure Token Service (STS) audience which contains the
- // resource name for the workforce pool and the provider identifier in that
- // pool.
- Audience string
- // RefreshToken is the OAuth 2.0 refresh token.
- RefreshToken string
- // TokenURL is the STS token exchange endpoint for refresh.
- TokenURL string
- // TokenInfoURL is the STS endpoint URL for token introspection. Optional.
- TokenInfoURL string
- // ClientID is only required in conjunction with ClientSecret, as described
- // below.
- ClientID string
- // ClientSecret is currently only required if token_info endpoint also needs
- // to be called with the generated a cloud access token. When provided, STS
- // will be called with additional basic authentication using client_id as
- // username and client_secret as password.
- ClientSecret string
- // Scopes contains the desired scopes for the returned access token.
- Scopes []string
-
- // Client for token request.
- Client *http.Client
-}
-
-func (c *Options) validate() bool {
- return c.ClientID != "" && c.ClientSecret != "" && c.RefreshToken != "" && c.TokenURL != ""
-}
-
-// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider]
-// configured with the provided options.
-func NewTokenProvider(opts *Options) (auth.TokenProvider, error) {
- if !opts.validate() {
- return nil, errors.New("credentials: invalid external_account_authorized_user configuration")
- }
-
- tp := &tokenProvider{
- o: opts,
- }
- return auth.NewCachedTokenProvider(tp, nil), nil
-}
-
-type tokenProvider struct {
- o *Options
-}
-
-func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) {
- opts := tp.o
-
- clientAuth := stsexchange.ClientAuthentication{
- AuthStyle: auth.StyleInHeader,
- ClientID: opts.ClientID,
- ClientSecret: opts.ClientSecret,
- }
- headers := make(http.Header)
- headers.Set("Content-Type", "application/x-www-form-urlencoded")
- stsResponse, err := stsexchange.RefreshAccessToken(ctx, &stsexchange.Options{
- Client: opts.Client,
- Endpoint: opts.TokenURL,
- RefreshToken: opts.RefreshToken,
- Authentication: clientAuth,
- Headers: headers,
- })
- if err != nil {
- return nil, err
- }
- if stsResponse.ExpiresIn < 0 {
- return nil, errors.New("credentials: invalid expiry from security token service")
- }
-
- // guarded by the wrapping with CachedTokenProvider
- if stsResponse.RefreshToken != "" {
- opts.RefreshToken = stsResponse.RefreshToken
- }
- return &auth.Token{
- Value: stsResponse.AccessToken,
- Expiry: time.Now().UTC().Add(time.Duration(stsResponse.ExpiresIn) * time.Second),
- Type: internal.TokenTypeBearer,
- }, nil
-}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go
deleted file mode 100644
index 467edb90..00000000
--- a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package gdch
-
-import (
- "context"
- "crypto/rsa"
- "crypto/tls"
- "crypto/x509"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "net/url"
- "os"
- "time"
-
- "cloud.google.com/go/auth"
- "cloud.google.com/go/auth/internal"
- "cloud.google.com/go/auth/internal/credsfile"
- "cloud.google.com/go/auth/internal/jwt"
-)
-
-const (
- // GrantType is the grant type for the token request.
- GrantType = "urn:ietf:params:oauth:token-type:token-exchange"
- requestTokenType = "urn:ietf:params:oauth:token-type:access_token"
- subjectTokenType = "urn:k8s:params:oauth:token-type:serviceaccount"
-)
-
-var (
- gdchSupportFormatVersions map[string]bool = map[string]bool{
- "1": true,
- }
-)
-
-// Options for [NewTokenProvider].
-type Options struct {
- STSAudience string
- Client *http.Client
-}
-
-// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] from a
-// GDCH cred file.
-func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.TokenProvider, error) {
- if !gdchSupportFormatVersions[f.FormatVersion] {
- return nil, fmt.Errorf("credentials: unsupported gdch_service_account format %q", f.FormatVersion)
- }
- if o.STSAudience == "" {
- return nil, errors.New("credentials: STSAudience must be set for the GDCH auth flows")
- }
- pk, err := internal.ParseKey([]byte(f.PrivateKey))
- if err != nil {
- return nil, err
- }
- certPool, err := loadCertPool(f.CertPath)
- if err != nil {
- return nil, err
- }
-
- tp := gdchProvider{
- serviceIdentity: fmt.Sprintf("system:serviceaccount:%s:%s", f.Project, f.Name),
- tokenURL: f.TokenURL,
- aud: o.STSAudience,
- pk: pk,
- pkID: f.PrivateKeyID,
- certPool: certPool,
- client: o.Client,
- }
- return tp, nil
-}
-
-func loadCertPool(path string) (*x509.CertPool, error) {
- pool := x509.NewCertPool()
- pem, err := os.ReadFile(path)
- if err != nil {
- return nil, fmt.Errorf("credentials: failed to read certificate: %w", err)
- }
- pool.AppendCertsFromPEM(pem)
- return pool, nil
-}
-
-type gdchProvider struct {
- serviceIdentity string
- tokenURL string
- aud string
- pk *rsa.PrivateKey
- pkID string
- certPool *x509.CertPool
-
- client *http.Client
-}
-
-func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) {
- addCertToTransport(g.client, g.certPool)
- iat := time.Now()
- exp := iat.Add(time.Hour)
- claims := jwt.Claims{
- Iss: g.serviceIdentity,
- Sub: g.serviceIdentity,
- Aud: g.tokenURL,
- Iat: iat.Unix(),
- Exp: exp.Unix(),
- }
- h := jwt.Header{
- Algorithm: jwt.HeaderAlgRSA256,
- Type: jwt.HeaderType,
- KeyID: string(g.pkID),
- }
- payload, err := jwt.EncodeJWS(&h, &claims, g.pk)
- if err != nil {
- return nil, err
- }
- v := url.Values{}
- v.Set("grant_type", GrantType)
- v.Set("audience", g.aud)
- v.Set("requested_token_type", requestTokenType)
- v.Set("subject_token", payload)
- v.Set("subject_token_type", subjectTokenType)
- resp, err := g.client.PostForm(g.tokenURL, v)
- if err != nil {
- return nil, fmt.Errorf("credentials: cannot fetch token: %w", err)
- }
- defer resp.Body.Close()
- body, err := internal.ReadAll(resp.Body)
- if err != nil {
- return nil, fmt.Errorf("credentials: cannot fetch token: %w", err)
- }
- if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices {
- return nil, &auth.Error{
- Response: resp,
- Body: body,
- }
- }
-
- var tokenRes struct {
- AccessToken string `json:"access_token"`
- TokenType string `json:"token_type"`
- ExpiresIn int64 `json:"expires_in"` // relative seconds from now
- }
- if err := json.Unmarshal(body, &tokenRes); err != nil {
- return nil, fmt.Errorf("credentials: cannot fetch token: %w", err)
- }
- token := &auth.Token{
- Value: tokenRes.AccessToken,
- Type: tokenRes.TokenType,
- }
- raw := make(map[string]interface{})
- json.Unmarshal(body, &raw) // no error checks for optional fields
- token.Metadata = raw
-
- if secs := tokenRes.ExpiresIn; secs > 0 {
- token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
- }
- return token, nil
-}
-
-// addCertToTransport makes a best effort attempt at adding in the cert info to
-// the client. It tries to keep all configured transport settings if the
-// underlying transport is an http.Transport. Or else it overwrites the
-// transport with defaults adding in the certs.
-func addCertToTransport(hc *http.Client, certPool *x509.CertPool) {
- trans, ok := hc.Transport.(*http.Transport)
- if !ok {
- trans = http.DefaultTransport.(*http.Transport).Clone()
- }
- trans.TLSClientConfig = &tls.Config{
- RootCAs: certPool,
- }
-}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go
deleted file mode 100644
index 3ceab873..00000000
--- a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package impersonate
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "time"
-
- "cloud.google.com/go/auth"
- "cloud.google.com/go/auth/internal"
-)
-
-const (
- defaultTokenLifetime = "3600s"
- authHeaderKey = "Authorization"
-)
-
-// generateAccesstokenReq is used for service account impersonation
-type generateAccessTokenReq struct {
- Delegates []string `json:"delegates,omitempty"`
- Lifetime string `json:"lifetime,omitempty"`
- Scope []string `json:"scope,omitempty"`
-}
-
-type impersonateTokenResponse struct {
- AccessToken string `json:"accessToken"`
- ExpireTime string `json:"expireTime"`
-}
-
-// NewTokenProvider uses a source credential, stored in Ts, to request an access token to the provided URL.
-// Scopes can be defined when the access token is requested.
-func NewTokenProvider(opts *Options) (auth.TokenProvider, error) {
- if err := opts.validate(); err != nil {
- return nil, err
- }
- return opts, nil
-}
-
-// Options for [NewTokenProvider].
-type Options struct {
- // Tp is the source credential used to generate a token on the
- // impersonated service account. Required.
- Tp auth.TokenProvider
-
- // URL is the endpoint to call to generate a token
- // on behalf of the service account. Required.
- URL string
- // Scopes that the impersonated credential should have. Required.
- Scopes []string
- // Delegates are the service account email addresses in a delegation chain.
- // Each service account must be granted roles/iam.serviceAccountTokenCreator
- // on the next service account in the chain. Optional.
- Delegates []string
- // TokenLifetimeSeconds is the number of seconds the impersonation token will
- // be valid for. Defaults to 1 hour if unset. Optional.
- TokenLifetimeSeconds int
- // Client configures the underlying client used to make network requests
- // when fetching tokens. Required.
- Client *http.Client
-}
-
-func (o *Options) validate() error {
- if o.Tp == nil {
- return errors.New("credentials: missing required 'source_credentials' field in impersonated credentials")
- }
- if o.URL == "" {
- return errors.New("credentials: missing required 'service_account_impersonation_url' field in impersonated credentials")
- }
- return nil
-}
-
-// Token performs the exchange to get a temporary service account token to allow access to GCP.
-func (o *Options) Token(ctx context.Context) (*auth.Token, error) {
- lifetime := defaultTokenLifetime
- if o.TokenLifetimeSeconds != 0 {
- lifetime = fmt.Sprintf("%ds", o.TokenLifetimeSeconds)
- }
- reqBody := generateAccessTokenReq{
- Lifetime: lifetime,
- Scope: o.Scopes,
- Delegates: o.Delegates,
- }
- b, err := json.Marshal(reqBody)
- if err != nil {
- return nil, fmt.Errorf("credentials: unable to marshal request: %w", err)
- }
- req, err := http.NewRequestWithContext(ctx, "POST", o.URL, bytes.NewReader(b))
- if err != nil {
- return nil, fmt.Errorf("credentials: unable to create impersonation request: %w", err)
- }
- req.Header.Set("Content-Type", "application/json")
- if err := setAuthHeader(ctx, o.Tp, req); err != nil {
- return nil, err
- }
- resp, err := o.Client.Do(req)
- if err != nil {
- return nil, fmt.Errorf("credentials: unable to generate access token: %w", err)
- }
- defer resp.Body.Close()
- body, err := internal.ReadAll(resp.Body)
- if err != nil {
- return nil, fmt.Errorf("credentials: unable to read body: %w", err)
- }
- if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
- return nil, fmt.Errorf("credentials: status code %d: %s", c, body)
- }
-
- var accessTokenResp impersonateTokenResponse
- if err := json.Unmarshal(body, &accessTokenResp); err != nil {
- return nil, fmt.Errorf("credentials: unable to parse response: %w", err)
- }
- expiry, err := time.Parse(time.RFC3339, accessTokenResp.ExpireTime)
- if err != nil {
- return nil, fmt.Errorf("credentials: unable to parse expiry: %w", err)
- }
- return &auth.Token{
- Value: accessTokenResp.AccessToken,
- Expiry: expiry,
- Type: internal.TokenTypeBearer,
- }, nil
-}
-
-func setAuthHeader(ctx context.Context, tp auth.TokenProvider, r *http.Request) error {
- t, err := tp.Token(ctx)
- if err != nil {
- return err
- }
- typ := t.Type
- if typ == "" {
- typ = internal.TokenTypeBearer
- }
- r.Header.Set(authHeaderKey, typ+" "+t.Value)
- return nil
-}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go
deleted file mode 100644
index f70e0aef..00000000
--- a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package stsexchange
-
-import (
- "context"
- "encoding/base64"
- "encoding/json"
- "fmt"
- "net/http"
- "net/url"
- "strconv"
- "strings"
-
- "cloud.google.com/go/auth"
- "cloud.google.com/go/auth/internal"
-)
-
-const (
- // GrantType for a sts exchange.
- GrantType = "urn:ietf:params:oauth:grant-type:token-exchange"
- // TokenType for a sts exchange.
- TokenType = "urn:ietf:params:oauth:token-type:access_token"
-
- jwtTokenType = "urn:ietf:params:oauth:token-type:jwt"
-)
-
-// Options stores the configuration for making an sts exchange request.
-type Options struct {
- Client *http.Client
- Endpoint string
- Request *TokenRequest
- Authentication ClientAuthentication
- Headers http.Header
- // ExtraOpts are optional fields marshalled into the `options` field of the
- // request body.
- ExtraOpts map[string]interface{}
- RefreshToken string
-}
-
-// RefreshAccessToken performs the token exchange using a refresh token flow.
-func RefreshAccessToken(ctx context.Context, opts *Options) (*TokenResponse, error) {
- data := url.Values{}
- data.Set("grant_type", "refresh_token")
- data.Set("refresh_token", opts.RefreshToken)
- return doRequest(ctx, opts, data)
-}
-
-// ExchangeToken performs an oauth2 token exchange with the provided endpoint.
-func ExchangeToken(ctx context.Context, opts *Options) (*TokenResponse, error) {
- data := url.Values{}
- data.Set("audience", opts.Request.Audience)
- data.Set("grant_type", GrantType)
- data.Set("requested_token_type", TokenType)
- data.Set("subject_token_type", opts.Request.SubjectTokenType)
- data.Set("subject_token", opts.Request.SubjectToken)
- data.Set("scope", strings.Join(opts.Request.Scope, " "))
- if opts.ExtraOpts != nil {
- opts, err := json.Marshal(opts.ExtraOpts)
- if err != nil {
- return nil, fmt.Errorf("credentials: failed to marshal additional options: %w", err)
- }
- data.Set("options", string(opts))
- }
- return doRequest(ctx, opts, data)
-}
-
-func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenResponse, error) {
- opts.Authentication.InjectAuthentication(data, opts.Headers)
- encodedData := data.Encode()
-
- req, err := http.NewRequestWithContext(ctx, "POST", opts.Endpoint, strings.NewReader(encodedData))
- if err != nil {
- return nil, fmt.Errorf("credentials: failed to properly build http request: %w", err)
-
- }
- for key, list := range opts.Headers {
- for _, val := range list {
- req.Header.Add(key, val)
- }
- }
- req.Header.Set("Content-Length", strconv.Itoa(len(encodedData)))
-
- resp, err := opts.Client.Do(req)
- if err != nil {
- return nil, fmt.Errorf("credentials: invalid response from Secure Token Server: %w", err)
- }
- defer resp.Body.Close()
-
- body, err := internal.ReadAll(resp.Body)
- if err != nil {
- return nil, err
- }
- if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices {
- return nil, fmt.Errorf("credentials: status code %d: %s", c, body)
- }
- var stsResp TokenResponse
- if err := json.Unmarshal(body, &stsResp); err != nil {
- return nil, fmt.Errorf("credentials: failed to unmarshal response body from Secure Token Server: %w", err)
- }
-
- return &stsResp, nil
-}
-
-// TokenRequest contains fields necessary to make an oauth2 token
-// exchange.
-type TokenRequest struct {
- ActingParty struct {
- ActorToken string
- ActorTokenType string
- }
- GrantType string
- Resource string
- Audience string
- Scope []string
- RequestedTokenType string
- SubjectToken string
- SubjectTokenType string
-}
-
-// TokenResponse is used to decode the remote server response during
-// an oauth2 token exchange.
-type TokenResponse struct {
- AccessToken string `json:"access_token"`
- IssuedTokenType string `json:"issued_token_type"`
- TokenType string `json:"token_type"`
- ExpiresIn int `json:"expires_in"`
- Scope string `json:"scope"`
- RefreshToken string `json:"refresh_token"`
-}
-
-// ClientAuthentication represents an OAuth client ID and secret and the
-// mechanism for passing these credentials as stated in rfc6749#2.3.1.
-type ClientAuthentication struct {
- AuthStyle auth.Style
- ClientID string
- ClientSecret string
-}
-
-// InjectAuthentication is used to add authentication to a Secure Token Service
-// exchange request. It modifies either the passed url.Values or http.Header
-// depending on the desired authentication format.
-func (c *ClientAuthentication) InjectAuthentication(values url.Values, headers http.Header) {
- if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil {
- return
- }
- switch c.AuthStyle {
- case auth.StyleInHeader:
- plainHeader := c.ClientID + ":" + c.ClientSecret
- headers.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(plainHeader)))
- default:
- values.Set("client_id", c.ClientID)
- values.Set("client_secret", c.ClientSecret)
- }
-}
diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go
deleted file mode 100644
index b62a8ae4..00000000
--- a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package credentials
-
-import (
- "context"
- "crypto/rsa"
- "fmt"
- "strings"
- "time"
-
- "cloud.google.com/go/auth"
- "cloud.google.com/go/auth/internal"
- "cloud.google.com/go/auth/internal/credsfile"
- "cloud.google.com/go/auth/internal/jwt"
-)
-
-var (
- // for testing
- now func() time.Time = time.Now
-)
-
-// configureSelfSignedJWT uses the private key in the service account to create
-// a JWT without making a network call.
-func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
- pk, err := internal.ParseKey([]byte(f.PrivateKey))
- if err != nil {
- return nil, fmt.Errorf("credentials: could not parse key: %w", err)
- }
- return &selfSignedTokenProvider{
- email: f.ClientEmail,
- audience: opts.Audience,
- scopes: opts.scopes(),
- pk: pk,
- pkID: f.PrivateKeyID,
- }, nil
-}
-
-type selfSignedTokenProvider struct {
- email string
- audience string
- scopes []string
- pk *rsa.PrivateKey
- pkID string
-}
-
-func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) {
- iat := now()
- exp := iat.Add(time.Hour)
- scope := strings.Join(tp.scopes, " ")
- c := &jwt.Claims{
- Iss: tp.email,
- Sub: tp.email,
- Aud: tp.audience,
- Scope: scope,
- Iat: iat.Unix(),
- Exp: exp.Unix(),
- }
- h := &jwt.Header{
- Algorithm: jwt.HeaderAlgRSA256,
- Type: jwt.HeaderType,
- KeyID: string(tp.pkID),
- }
- msg, err := jwt.EncodeJWS(h, c, tp.pk)
- if err != nil {
- return nil, fmt.Errorf("credentials: could not encode JWT: %w", err)
- }
- return &auth.Token{Value: msg, Type: internal.TokenTypeBearer, Expiry: exp}, nil
-}
diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go
deleted file mode 100644
index ef09c1b7..00000000
--- a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go
+++ /dev/null
@@ -1,215 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httptransport
-
-import (
- "crypto/tls"
- "errors"
- "fmt"
- "net/http"
-
- "cloud.google.com/go/auth"
- detect "cloud.google.com/go/auth/credentials"
- "cloud.google.com/go/auth/internal"
- "cloud.google.com/go/auth/internal/transport"
-)
-
-// ClientCertProvider is a function that returns a TLS client certificate to be
-// used when opening TLS connections. It follows the same semantics as
-// [crypto/tls.Config.GetClientCertificate].
-type ClientCertProvider = func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
-
-// Options used to configure a [net/http.Client] from [NewClient].
-type Options struct {
- // DisableTelemetry disables default telemetry (OpenTelemetry). An example
- // reason to do so would be to bind custom telemetry that overrides the
- // defaults.
- DisableTelemetry bool
- // DisableAuthentication specifies that no authentication should be used. It
- // is suitable only for testing and for accessing public resources, like
- // public Google Cloud Storage buckets.
- DisableAuthentication bool
- // Headers are extra HTTP headers that will be appended to every outgoing
- // request.
- Headers http.Header
- // BaseRoundTripper overrides the base transport used for serving requests.
- // If specified ClientCertProvider is ignored.
- BaseRoundTripper http.RoundTripper
- // Endpoint overrides the default endpoint to be used for a service.
- Endpoint string
- // APIKey specifies an API key to be used as the basis for authentication.
- // If set DetectOpts are ignored.
- APIKey string
- // Credentials used to add Authorization header to all requests. If set
- // DetectOpts are ignored.
- Credentials *auth.Credentials
- // ClientCertProvider is a function that returns a TLS client certificate to
- // be used when opening TLS connections. It follows the same semantics as
- // crypto/tls.Config.GetClientCertificate.
- ClientCertProvider ClientCertProvider
- // DetectOpts configures settings for detect Application Default
- // Credentials.
- DetectOpts *detect.DetectOptions
- // UniverseDomain is the default service domain for a given Cloud universe.
- // The default value is "googleapis.com". This is the universe domain
- // configured for the client, which will be compared to the universe domain
- // that is separately configured for the credentials.
- UniverseDomain string
-
- // InternalOptions are NOT meant to be set directly by consumers of this
- // package, they should only be set by generated client code.
- InternalOptions *InternalOptions
-}
-
-func (o *Options) validate() error {
- if o == nil {
- return errors.New("httptransport: opts required to be non-nil")
- }
- if o.InternalOptions != nil && o.InternalOptions.SkipValidation {
- return nil
- }
- hasCreds := o.APIKey != "" ||
- o.Credentials != nil ||
- (o.DetectOpts != nil && len(o.DetectOpts.CredentialsJSON) > 0) ||
- (o.DetectOpts != nil && o.DetectOpts.CredentialsFile != "")
- if o.DisableAuthentication && hasCreds {
- return errors.New("httptransport: DisableAuthentication is incompatible with options that set or detect credentials")
- }
- return nil
-}
-
-// client returns the client a user set for the detect options or nil if one was
-// not set.
-func (o *Options) client() *http.Client {
- if o.DetectOpts != nil && o.DetectOpts.Client != nil {
- return o.DetectOpts.Client
- }
- return nil
-}
-
-func (o *Options) resolveDetectOptions() *detect.DetectOptions {
- io := o.InternalOptions
- // soft-clone these so we are not updating a ref the user holds and may reuse
- do := transport.CloneDetectOptions(o.DetectOpts)
-
- // If scoped JWTs are enabled user provided an aud, allow self-signed JWT.
- if (io != nil && io.EnableJWTWithScope) || do.Audience != "" {
- do.UseSelfSignedJWT = true
- }
- // Only default scopes if user did not also set an audience.
- if len(do.Scopes) == 0 && do.Audience == "" && io != nil && len(io.DefaultScopes) > 0 {
- do.Scopes = make([]string, len(io.DefaultScopes))
- copy(do.Scopes, io.DefaultScopes)
- }
- if len(do.Scopes) == 0 && do.Audience == "" && io != nil {
- do.Audience = o.InternalOptions.DefaultAudience
- }
- return do
-}
-
-// InternalOptions are only meant to be set by generated client code. These are
-// not meant to be set directly by consumers of this package. Configuration in
-// this type is considered EXPERIMENTAL and may be removed at any time in the
-// future without warning.
-type InternalOptions struct {
- // EnableJWTWithScope specifies if scope can be used with self-signed JWT.
- EnableJWTWithScope bool
- // DefaultAudience specifies a default audience to be used as the audience
- // field ("aud") for the JWT token authentication.
- DefaultAudience string
- // DefaultEndpointTemplate combined with UniverseDomain specifies the
- // default endpoint.
- DefaultEndpointTemplate string
- // DefaultMTLSEndpoint specifies the default mTLS endpoint.
- DefaultMTLSEndpoint string
- // DefaultScopes specifies the default OAuth2 scopes to be used for a
- // service.
- DefaultScopes []string
- // SkipValidation bypasses validation on Options. It should only be used
- // internally for clients that needs more control over their transport.
- SkipValidation bool
-}
-
-// AddAuthorizationMiddleware adds a middleware to the provided client's
-// transport that sets the Authorization header with the value produced by the
-// provided [cloud.google.com/go/auth.Credentials]. An error is returned only
-// if client or creds is nil.
-func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) error {
- if client == nil || creds == nil {
- return fmt.Errorf("httptransport: client and tp must not be nil")
- }
- base := client.Transport
- if base == nil {
- if dt, ok := http.DefaultTransport.(*http.Transport); ok {
- base = dt.Clone()
- } else {
- // Directly reuse the DefaultTransport if the application has
- // replaced it with an implementation of RoundTripper other than
- // http.Transport.
- base = http.DefaultTransport
- }
- }
- client.Transport = &authTransport{
- creds: creds,
- base: base,
- // TODO(quartzmo): Somehow set clientUniverseDomain from impersonate calls.
- }
- return nil
-}
-
-// NewClient returns a [net/http.Client] that can be used to communicate with a
-// Google cloud service, configured with the provided [Options]. It
-// automatically appends Authorization headers to all outgoing requests.
-func NewClient(opts *Options) (*http.Client, error) {
- if err := opts.validate(); err != nil {
- return nil, err
- }
-
- tOpts := &transport.Options{
- Endpoint: opts.Endpoint,
- ClientCertProvider: opts.ClientCertProvider,
- Client: opts.client(),
- UniverseDomain: opts.UniverseDomain,
- }
- if io := opts.InternalOptions; io != nil {
- tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate
- tOpts.DefaultMTLSEndpoint = io.DefaultMTLSEndpoint
- }
- clientCertProvider, dialTLSContext, err := transport.GetHTTPTransportConfig(tOpts)
- if err != nil {
- return nil, err
- }
- baseRoundTripper := opts.BaseRoundTripper
- if baseRoundTripper == nil {
- baseRoundTripper = defaultBaseTransport(clientCertProvider, dialTLSContext)
- }
- trans, err := newTransport(baseRoundTripper, opts)
- if err != nil {
- return nil, err
- }
- return &http.Client{
- Transport: trans,
- }, nil
-}
-
-// SetAuthHeader uses the provided token to set the Authorization header on a
-// request. If the token.Type is empty, the type is assumed to be Bearer.
-func SetAuthHeader(token *auth.Token, req *http.Request) {
- typ := token.Type
- if typ == "" {
- typ = internal.TokenTypeBearer
- }
- req.Header.Set("Authorization", typ+" "+token.Value)
-}
diff --git a/vendor/cloud.google.com/go/auth/httptransport/trace.go b/vendor/cloud.google.com/go/auth/httptransport/trace.go
deleted file mode 100644
index 467c477c..00000000
--- a/vendor/cloud.google.com/go/auth/httptransport/trace.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httptransport
-
-import (
- "encoding/binary"
- "encoding/hex"
- "fmt"
- "net/http"
- "strconv"
- "strings"
-
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
-)
-
-const (
- httpHeaderMaxSize = 200
- cloudTraceHeader = `X-Cloud-Trace-Context`
-)
-
-// asserts the httpFormat fulfills this foreign interface
-var _ propagation.HTTPFormat = (*httpFormat)(nil)
-
-// httpFormat implements propagation.httpFormat to propagate
-// traces in HTTP headers for Google Cloud Platform and Cloud Trace.
-type httpFormat struct{}
-
-// SpanContextFromRequest extracts a Cloud Trace span context from incoming requests.
-func (f *httpFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
- h := req.Header.Get(cloudTraceHeader)
- // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat.
- // Return if the header is empty or missing, or if the header is unreasonably
- // large, to avoid making unnecessary copies of a large string.
- if h == "" || len(h) > httpHeaderMaxSize {
- return trace.SpanContext{}, false
- }
-
- // Parse the trace id field.
- slash := strings.Index(h, `/`)
- if slash == -1 {
- return trace.SpanContext{}, false
- }
- tid, h := h[:slash], h[slash+1:]
-
- buf, err := hex.DecodeString(tid)
- if err != nil {
- return trace.SpanContext{}, false
- }
- copy(sc.TraceID[:], buf)
-
- // Parse the span id field.
- spanstr := h
- semicolon := strings.Index(h, `;`)
- if semicolon != -1 {
- spanstr, h = h[:semicolon], h[semicolon+1:]
- }
- sid, err := strconv.ParseUint(spanstr, 10, 64)
- if err != nil {
- return trace.SpanContext{}, false
- }
- binary.BigEndian.PutUint64(sc.SpanID[:], sid)
-
- // Parse the options field, options field is optional.
- if !strings.HasPrefix(h, "o=") {
- return sc, true
- }
- o, err := strconv.ParseUint(h[2:], 10, 32)
- if err != nil {
- return trace.SpanContext{}, false
- }
- sc.TraceOptions = trace.TraceOptions(o)
- return sc, true
-}
-
-// SpanContextToRequest modifies the given request to include a Cloud Trace header.
-func (f *httpFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
- sid := binary.BigEndian.Uint64(sc.SpanID[:])
- header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions))
- req.Header.Set(cloudTraceHeader, header)
-}
diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go
deleted file mode 100644
index 94caeb00..00000000
--- a/vendor/cloud.google.com/go/auth/httptransport/transport.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httptransport
-
-import (
- "context"
- "crypto/tls"
- "net"
- "net/http"
- "time"
-
- "cloud.google.com/go/auth"
- "cloud.google.com/go/auth/credentials"
- "cloud.google.com/go/auth/internal"
- "cloud.google.com/go/auth/internal/transport"
- "cloud.google.com/go/auth/internal/transport/cert"
- "go.opencensus.io/plugin/ochttp"
- "golang.org/x/net/http2"
-)
-
-const (
- quotaProjectHeaderKey = "X-Goog-User-Project"
-)
-
-func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, error) {
- var headers = opts.Headers
- ht := &headerTransport{
- base: base,
- headers: headers,
- }
- var trans http.RoundTripper = ht
- trans = addOCTransport(trans, opts)
- switch {
- case opts.DisableAuthentication:
- // Do nothing.
- case opts.APIKey != "":
- qp := internal.GetQuotaProject(nil, opts.Headers.Get(quotaProjectHeaderKey))
- if qp != "" {
- if headers == nil {
- headers = make(map[string][]string, 1)
- }
- headers.Set(quotaProjectHeaderKey, qp)
- }
- trans = &apiKeyTransport{
- Transport: trans,
- Key: opts.APIKey,
- }
- default:
- var creds *auth.Credentials
- if opts.Credentials != nil {
- creds = opts.Credentials
- } else {
- var err error
- creds, err = credentials.DetectDefault(opts.resolveDetectOptions())
- if err != nil {
- return nil, err
- }
- }
- qp, err := creds.QuotaProjectID(context.Background())
- if err != nil {
- return nil, err
- }
- if qp != "" {
- if headers == nil {
- headers = make(map[string][]string, 1)
- }
- headers.Set(quotaProjectHeaderKey, qp)
- }
- creds.TokenProvider = auth.NewCachedTokenProvider(creds.TokenProvider, nil)
- trans = &authTransport{
- base: trans,
- creds: creds,
- clientUniverseDomain: opts.UniverseDomain,
- }
- }
- return trans, nil
-}
-
-// defaultBaseTransport returns the base HTTP transport.
-// On App Engine, this is urlfetch.Transport.
-// Otherwise, use a default transport, taking most defaults from
-// http.DefaultTransport.
-// If TLSCertificate is available, set TLSClientConfig as well.
-func defaultBaseTransport(clientCertSource cert.Provider, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper {
- trans := http.DefaultTransport.(*http.Transport).Clone()
- trans.MaxIdleConnsPerHost = 100
-
- if clientCertSource != nil {
- trans.TLSClientConfig = &tls.Config{
- GetClientCertificate: clientCertSource,
- }
- }
- if dialTLSContext != nil {
- // If DialTLSContext is set, TLSClientConfig wil be ignored
- trans.DialTLSContext = dialTLSContext
- }
-
- // Configures the ReadIdleTimeout HTTP/2 option for the
- // transport. This allows broken idle connections to be pruned more quickly,
- // preventing the client from attempting to re-use connections that will no
- // longer work.
- http2Trans, err := http2.ConfigureTransports(trans)
- if err == nil {
- http2Trans.ReadIdleTimeout = time.Second * 31
- }
-
- return trans
-}
-
-type apiKeyTransport struct {
- // Key is the API Key to set on requests.
- Key string
- // Transport is the underlying HTTP transport.
- // If nil, http.DefaultTransport is used.
- Transport http.RoundTripper
-}
-
-func (t *apiKeyTransport) RoundTrip(req *http.Request) (*http.Response, error) {
- newReq := *req
- args := newReq.URL.Query()
- args.Set("key", t.Key)
- newReq.URL.RawQuery = args.Encode()
- return t.Transport.RoundTrip(&newReq)
-}
-
-type headerTransport struct {
- headers http.Header
- base http.RoundTripper
-}
-
-func (t *headerTransport) RoundTrip(req *http.Request) (*http.Response, error) {
- rt := t.base
- newReq := *req
- newReq.Header = make(http.Header)
- for k, vv := range req.Header {
- newReq.Header[k] = vv
- }
-
- for k, v := range t.headers {
- newReq.Header[k] = v
- }
-
- return rt.RoundTrip(&newReq)
-}
-
-func addOCTransport(trans http.RoundTripper, opts *Options) http.RoundTripper {
- if opts.DisableTelemetry {
- return trans
- }
- return &ochttp.Transport{
- Base: trans,
- Propagation: &httpFormat{},
- }
-}
-
-type authTransport struct {
- creds *auth.Credentials
- base http.RoundTripper
- clientUniverseDomain string
-}
-
-// getClientUniverseDomain returns the universe domain configured for the client.
-// The default value is "googleapis.com".
-func (t *authTransport) getClientUniverseDomain() string {
- if t.clientUniverseDomain == "" {
- return internal.DefaultUniverseDomain
- }
- return t.clientUniverseDomain
-}
-
-// RoundTrip authorizes and authenticates the request with an
-// access token from Transport's Source. Per the RoundTripper contract we must
-// not modify the initial request, so we clone it, and we must close the body
-// on any errors that happens during our token logic.
-func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) {
- reqBodyClosed := false
- if req.Body != nil {
- defer func() {
- if !reqBodyClosed {
- req.Body.Close()
- }
- }()
- }
- credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context())
- if err != nil {
- return nil, err
- }
- if err := transport.ValidateUniverseDomain(t.getClientUniverseDomain(), credentialsUniverseDomain); err != nil {
- return nil, err
- }
- token, err := t.creds.Token(req.Context())
- if err != nil {
- return nil, err
- }
- req2 := req.Clone(req.Context())
- SetAuthHeader(token, req2)
- reqBodyClosed = true
- return t.base.RoundTrip(req2)
-}
diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go b/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go
deleted file mode 100644
index 9cd4bed6..00000000
--- a/vendor/cloud.google.com/go/auth/internal/credsfile/credsfile.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package credsfile is meant to hide implementation details from the pubic
-// surface of the detect package. It should not import any other packages in
-// this module. It is located under the main internal package so other
-// sub-packages can use these parsed types as well.
-package credsfile
-
-import (
- "os"
- "os/user"
- "path/filepath"
- "runtime"
-)
-
-const (
- // GoogleAppCredsEnvVar is the environment variable for setting the
- // application default credentials.
- GoogleAppCredsEnvVar = "GOOGLE_APPLICATION_CREDENTIALS"
- userCredsFilename = "application_default_credentials.json"
-)
-
-// CredentialType represents different credential filetypes Google credentials
-// can be.
-type CredentialType int
-
-const (
- // UnknownCredType is an unidentified file type.
- UnknownCredType CredentialType = iota
- // UserCredentialsKey represents a user creds file type.
- UserCredentialsKey
- // ServiceAccountKey represents a service account file type.
- ServiceAccountKey
- // ImpersonatedServiceAccountKey represents a impersonated service account
- // file type.
- ImpersonatedServiceAccountKey
- // ExternalAccountKey represents a external account file type.
- ExternalAccountKey
- // GDCHServiceAccountKey represents a GDCH file type.
- GDCHServiceAccountKey
- // ExternalAccountAuthorizedUserKey represents a external account authorized
- // user file type.
- ExternalAccountAuthorizedUserKey
-)
-
-// parseCredentialType returns the associated filetype based on the parsed
-// typeString provided.
-func parseCredentialType(typeString string) CredentialType {
- switch typeString {
- case "service_account":
- return ServiceAccountKey
- case "authorized_user":
- return UserCredentialsKey
- case "impersonated_service_account":
- return ImpersonatedServiceAccountKey
- case "external_account":
- return ExternalAccountKey
- case "external_account_authorized_user":
- return ExternalAccountAuthorizedUserKey
- case "gdch_service_account":
- return GDCHServiceAccountKey
- default:
- return UnknownCredType
- }
-}
-
-// GetFileNameFromEnv returns the override if provided or detects a filename
-// from the environment.
-func GetFileNameFromEnv(override string) string {
- if override != "" {
- return override
- }
- return os.Getenv(GoogleAppCredsEnvVar)
-}
-
-// GetWellKnownFileName tries to locate the filepath for the user credential
-// file based on the environment.
-func GetWellKnownFileName() string {
- if runtime.GOOS == "windows" {
- return filepath.Join(os.Getenv("APPDATA"), "gcloud", userCredsFilename)
- }
- return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", userCredsFilename)
-}
-
-// guessUnixHomeDir default to checking for HOME, but not all unix systems have
-// this set, do have a fallback.
-func guessUnixHomeDir() string {
- if v := os.Getenv("HOME"); v != "" {
- return v
- }
- if u, err := user.Current(); err == nil {
- return u.HomeDir
- }
- return ""
-}
diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go
deleted file mode 100644
index 69e30779..00000000
--- a/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package credsfile
-
-import (
- "encoding/json"
-)
-
-// Config3LO is the internals of a client creds file.
-type Config3LO struct {
- ClientID string `json:"client_id"`
- ClientSecret string `json:"client_secret"`
- RedirectURIs []string `json:"redirect_uris"`
- AuthURI string `json:"auth_uri"`
- TokenURI string `json:"token_uri"`
-}
-
-// ClientCredentialsFile representation.
-type ClientCredentialsFile struct {
- Web *Config3LO `json:"web"`
- Installed *Config3LO `json:"installed"`
- UniverseDomain string `json:"universe_domain"`
-}
-
-// ServiceAccountFile representation.
-type ServiceAccountFile struct {
- Type string `json:"type"`
- ProjectID string `json:"project_id"`
- PrivateKeyID string `json:"private_key_id"`
- PrivateKey string `json:"private_key"`
- ClientEmail string `json:"client_email"`
- ClientID string `json:"client_id"`
- AuthURL string `json:"auth_uri"`
- TokenURL string `json:"token_uri"`
- UniverseDomain string `json:"universe_domain"`
-}
-
-// UserCredentialsFile representation.
-type UserCredentialsFile struct {
- Type string `json:"type"`
- ClientID string `json:"client_id"`
- ClientSecret string `json:"client_secret"`
- QuotaProjectID string `json:"quota_project_id"`
- RefreshToken string `json:"refresh_token"`
- UniverseDomain string `json:"universe_domain"`
-}
-
-// ExternalAccountFile representation.
-type ExternalAccountFile struct {
- Type string `json:"type"`
- ClientID string `json:"client_id"`
- ClientSecret string `json:"client_secret"`
- Audience string `json:"audience"`
- SubjectTokenType string `json:"subject_token_type"`
- ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"`
- TokenURL string `json:"token_url"`
- CredentialSource *CredentialSource `json:"credential_source,omitempty"`
- TokenInfoURL string `json:"token_info_url"`
- ServiceAccountImpersonation *ServiceAccountImpersonationInfo `json:"service_account_impersonation,omitempty"`
- QuotaProjectID string `json:"quota_project_id"`
- WorkforcePoolUserProject string `json:"workforce_pool_user_project"`
- UniverseDomain string `json:"universe_domain"`
-}
-
-// ExternalAccountAuthorizedUserFile representation.
-type ExternalAccountAuthorizedUserFile struct {
- Type string `json:"type"`
- Audience string `json:"audience"`
- ClientID string `json:"client_id"`
- ClientSecret string `json:"client_secret"`
- RefreshToken string `json:"refresh_token"`
- TokenURL string `json:"token_url"`
- TokenInfoURL string `json:"token_info_url"`
- RevokeURL string `json:"revoke_url"`
- QuotaProjectID string `json:"quota_project_id"`
- UniverseDomain string `json:"universe_domain"`
-}
-
-// CredentialSource stores the information necessary to retrieve the credentials for the STS exchange.
-//
-// One field amongst File, URL, and Executable should be filled, depending on the kind of credential in question.
-// The EnvironmentID should start with AWS if being used for an AWS credential.
-type CredentialSource struct {
- File string `json:"file"`
- URL string `json:"url"`
- Headers map[string]string `json:"headers"`
- Executable *ExecutableConfig `json:"executable,omitempty"`
- EnvironmentID string `json:"environment_id"`
- RegionURL string `json:"region_url"`
- RegionalCredVerificationURL string `json:"regional_cred_verification_url"`
- CredVerificationURL string `json:"cred_verification_url"`
- IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"`
- Format *Format `json:"format,omitempty"`
-}
-
-// Format describes the format of a [CredentialSource].
-type Format struct {
- // Type is either "text" or "json". When not provided "text" type is assumed.
- Type string `json:"type"`
- // SubjectTokenFieldName is only required for JSON format. This would be "access_token" for azure.
- SubjectTokenFieldName string `json:"subject_token_field_name"`
-}
-
-// ExecutableConfig represents the command to run for an executable
-// [CredentialSource].
-type ExecutableConfig struct {
- Command string `json:"command"`
- TimeoutMillis int `json:"timeout_millis"`
- OutputFile string `json:"output_file"`
-}
-
-// ServiceAccountImpersonationInfo has impersonation configuration.
-type ServiceAccountImpersonationInfo struct {
- TokenLifetimeSeconds int `json:"token_lifetime_seconds"`
-}
-
-// ImpersonatedServiceAccountFile representation.
-type ImpersonatedServiceAccountFile struct {
- Type string `json:"type"`
- ServiceAccountImpersonationURL string `json:"service_account_impersonation_url"`
- Delegates []string `json:"delegates"`
- CredSource json.RawMessage `json:"source_credentials"`
- UniverseDomain string `json:"universe_domain"`
-}
-
-// GDCHServiceAccountFile represents the Google Distributed Cloud Hosted (GDCH) service identity file.
-type GDCHServiceAccountFile struct {
- Type string `json:"type"`
- FormatVersion string `json:"format_version"`
- Project string `json:"project"`
- Name string `json:"name"`
- CertPath string `json:"ca_cert_path"`
- PrivateKeyID string `json:"private_key_id"`
- PrivateKey string `json:"private_key"`
- TokenURL string `json:"token_uri"`
- UniverseDomain string `json:"universe_domain"`
-}
diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go b/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go
deleted file mode 100644
index a02b9f5d..00000000
--- a/vendor/cloud.google.com/go/auth/internal/credsfile/parse.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package credsfile
-
-import (
- "encoding/json"
-)
-
-// ParseServiceAccount parses bytes into a [ServiceAccountFile].
-func ParseServiceAccount(b []byte) (*ServiceAccountFile, error) {
- var f *ServiceAccountFile
- if err := json.Unmarshal(b, &f); err != nil {
- return nil, err
- }
- return f, nil
-}
-
-// ParseClientCredentials parses bytes into a
-// [credsfile.ClientCredentialsFile].
-func ParseClientCredentials(b []byte) (*ClientCredentialsFile, error) {
- var f *ClientCredentialsFile
- if err := json.Unmarshal(b, &f); err != nil {
- return nil, err
- }
- return f, nil
-}
-
-// ParseUserCredentials parses bytes into a [UserCredentialsFile].
-func ParseUserCredentials(b []byte) (*UserCredentialsFile, error) {
- var f *UserCredentialsFile
- if err := json.Unmarshal(b, &f); err != nil {
- return nil, err
- }
- return f, nil
-}
-
-// ParseExternalAccount parses bytes into a [ExternalAccountFile].
-func ParseExternalAccount(b []byte) (*ExternalAccountFile, error) {
- var f *ExternalAccountFile
- if err := json.Unmarshal(b, &f); err != nil {
- return nil, err
- }
- return f, nil
-}
-
-// ParseExternalAccountAuthorizedUser parses bytes into a
-// [ExternalAccountAuthorizedUserFile].
-func ParseExternalAccountAuthorizedUser(b []byte) (*ExternalAccountAuthorizedUserFile, error) {
- var f *ExternalAccountAuthorizedUserFile
- if err := json.Unmarshal(b, &f); err != nil {
- return nil, err
- }
- return f, nil
-}
-
-// ParseImpersonatedServiceAccount parses bytes into a
-// [ImpersonatedServiceAccountFile].
-func ParseImpersonatedServiceAccount(b []byte) (*ImpersonatedServiceAccountFile, error) {
- var f *ImpersonatedServiceAccountFile
- if err := json.Unmarshal(b, &f); err != nil {
- return nil, err
- }
- return f, nil
-}
-
-// ParseGDCHServiceAccount parses bytes into a [GDCHServiceAccountFile].
-func ParseGDCHServiceAccount(b []byte) (*GDCHServiceAccountFile, error) {
- var f *GDCHServiceAccountFile
- if err := json.Unmarshal(b, &f); err != nil {
- return nil, err
- }
- return f, nil
-}
-
-type fileTypeChecker struct {
- Type string `json:"type"`
-}
-
-// ParseFileType determines the [CredentialType] based on bytes provided.
-func ParseFileType(b []byte) (CredentialType, error) {
- var f fileTypeChecker
- if err := json.Unmarshal(b, &f); err != nil {
- return 0, err
- }
- return parseCredentialType(f.Type), nil
-}
diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go
deleted file mode 100644
index 70534e80..00000000
--- a/vendor/cloud.google.com/go/auth/internal/internal.go
+++ /dev/null
@@ -1,184 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
- "context"
- "crypto/rsa"
- "crypto/x509"
- "encoding/json"
- "encoding/pem"
- "errors"
- "fmt"
- "io"
- "net/http"
- "os"
- "sync"
- "time"
-
- "cloud.google.com/go/compute/metadata"
-)
-
-const (
- // TokenTypeBearer is the auth header prefix for bearer tokens.
- TokenTypeBearer = "Bearer"
-
- // QuotaProjectEnvVar is the environment variable for setting the quota
- // project.
- QuotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT"
- projectEnvVar = "GOOGLE_CLOUD_PROJECT"
- maxBodySize = 1 << 20
-
- // DefaultUniverseDomain is the default value for universe domain.
- // Universe domain is the default service domain for a given Cloud universe.
- DefaultUniverseDomain = "googleapis.com"
-)
-
-// CloneDefaultClient returns a [http.Client] with some good defaults.
-func CloneDefaultClient() *http.Client {
- return &http.Client{
- Transport: http.DefaultTransport.(*http.Transport).Clone(),
- Timeout: 30 * time.Second,
- }
-}
-
-// ParseKey converts the binary contents of a private key file
-// to an *rsa.PrivateKey. It detects whether the private key is in a
-// PEM container or not. If so, it extracts the the private key
-// from PEM container before conversion. It only supports PEM
-// containers with no passphrase.
-func ParseKey(key []byte) (*rsa.PrivateKey, error) {
- block, _ := pem.Decode(key)
- if block != nil {
- key = block.Bytes
- }
- parsedKey, err := x509.ParsePKCS8PrivateKey(key)
- if err != nil {
- parsedKey, err = x509.ParsePKCS1PrivateKey(key)
- if err != nil {
- return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8: %w", err)
- }
- }
- parsed, ok := parsedKey.(*rsa.PrivateKey)
- if !ok {
- return nil, errors.New("private key is invalid")
- }
- return parsed, nil
-}
-
-// GetQuotaProject retrieves quota project with precedence being: override,
-// environment variable, creds json file.
-func GetQuotaProject(b []byte, override string) string {
- if override != "" {
- return override
- }
- if env := os.Getenv(QuotaProjectEnvVar); env != "" {
- return env
- }
- if b == nil {
- return ""
- }
- var v struct {
- QuotaProject string `json:"quota_project_id"`
- }
- if err := json.Unmarshal(b, &v); err != nil {
- return ""
- }
- return v.QuotaProject
-}
-
-// GetProjectID retrieves project with precedence being: override,
-// environment variable, creds json file.
-func GetProjectID(b []byte, override string) string {
- if override != "" {
- return override
- }
- if env := os.Getenv(projectEnvVar); env != "" {
- return env
- }
- if b == nil {
- return ""
- }
- var v struct {
- ProjectID string `json:"project_id"` // standard service account key
- Project string `json:"project"` // gdch key
- }
- if err := json.Unmarshal(b, &v); err != nil {
- return ""
- }
- if v.ProjectID != "" {
- return v.ProjectID
- }
- return v.Project
-}
-
-// ReadAll consumes the whole reader and safely reads the content of its body
-// with some overflow protection.
-func ReadAll(r io.Reader) ([]byte, error) {
- return io.ReadAll(io.LimitReader(r, maxBodySize))
-}
-
-// StaticCredentialsProperty is a helper for creating static credentials
-// properties.
-func StaticCredentialsProperty(s string) StaticProperty {
- return StaticProperty(s)
-}
-
-// StaticProperty always returns that value of the underlying string.
-type StaticProperty string
-
-// GetProperty loads the properly value provided the given context.
-func (p StaticProperty) GetProperty(context.Context) (string, error) {
- return string(p), nil
-}
-
-// ComputeUniverseDomainProvider fetches the credentials universe domain from
-// the google cloud metadata service.
-type ComputeUniverseDomainProvider struct {
- universeDomainOnce sync.Once
- universeDomain string
- universeDomainErr error
-}
-
-// GetProperty fetches the credentials universe domain from the google cloud
-// metadata service.
-func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string, error) {
- c.universeDomainOnce.Do(func() {
- c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx)
- })
- if c.universeDomainErr != nil {
- return "", c.universeDomainErr
- }
- return c.universeDomain, nil
-}
-
-// httpGetMetadataUniverseDomain is a package var for unit test substitution.
-var httpGetMetadataUniverseDomain = func(ctx context.Context) (string, error) {
- client := metadata.NewClient(&http.Client{Timeout: time.Second})
- // TODO(quartzmo): set ctx on request
- return client.Get("universe/universe_domain")
-}
-
-func getMetadataUniverseDomain(ctx context.Context) (string, error) {
- universeDomain, err := httpGetMetadataUniverseDomain(ctx)
- if err == nil {
- return universeDomain, nil
- }
- if _, ok := err.(metadata.NotDefinedError); ok {
- // http.StatusNotFound (404)
- return DefaultUniverseDomain, nil
- }
- return "", err
-}
diff --git a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go
deleted file mode 100644
index dc28b3c3..00000000
--- a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package jwt
-
-import (
- "bytes"
- "crypto"
- "crypto/rand"
- "crypto/rsa"
- "crypto/sha256"
- "encoding/base64"
- "encoding/json"
- "errors"
- "fmt"
- "strings"
- "time"
-)
-
-const (
- // HeaderAlgRSA256 is the RS256 [Header.Algorithm].
- HeaderAlgRSA256 = "RS256"
- // HeaderAlgES256 is the ES256 [Header.Algorithm].
- HeaderAlgES256 = "ES256"
- // HeaderType is the standard [Header.Type].
- HeaderType = "JWT"
-)
-
-// Header represents a JWT header.
-type Header struct {
- Algorithm string `json:"alg"`
- Type string `json:"typ"`
- KeyID string `json:"kid"`
-}
-
-func (h *Header) encode() (string, error) {
- b, err := json.Marshal(h)
- if err != nil {
- return "", err
- }
- return base64.RawURLEncoding.EncodeToString(b), nil
-}
-
-// Claims represents the claims set of a JWT.
-type Claims struct {
- // Iss is the issuer JWT claim.
- Iss string `json:"iss"`
- // Scope is the scope JWT claim.
- Scope string `json:"scope,omitempty"`
- // Exp is the expiry JWT claim. If unset, default is in one hour from now.
- Exp int64 `json:"exp"`
- // Iat is the subject issued at claim. If unset, default is now.
- Iat int64 `json:"iat"`
- // Aud is the audience JWT claim. Optional.
- Aud string `json:"aud"`
- // Sub is the subject JWT claim. Optional.
- Sub string `json:"sub,omitempty"`
- // AdditionalClaims contains any additional non-standard JWT claims. Optional.
- AdditionalClaims map[string]interface{} `json:"-"`
-}
-
-func (c *Claims) encode() (string, error) {
- // Compensate for skew
- now := time.Now().Add(-10 * time.Second)
- if c.Iat == 0 {
- c.Iat = now.Unix()
- }
- if c.Exp == 0 {
- c.Exp = now.Add(time.Hour).Unix()
- }
- if c.Exp < c.Iat {
- return "", fmt.Errorf("jwt: invalid Exp = %d; must be later than Iat = %d", c.Exp, c.Iat)
- }
-
- b, err := json.Marshal(c)
- if err != nil {
- return "", err
- }
-
- if len(c.AdditionalClaims) == 0 {
- return base64.RawURLEncoding.EncodeToString(b), nil
- }
-
- // Marshal private claim set and then append it to b.
- prv, err := json.Marshal(c.AdditionalClaims)
- if err != nil {
- return "", fmt.Errorf("invalid map of additional claims %v: %w", c.AdditionalClaims, err)
- }
-
- // Concatenate public and private claim JSON objects.
- if !bytes.HasSuffix(b, []byte{'}'}) {
- return "", fmt.Errorf("invalid JSON %s", b)
- }
- if !bytes.HasPrefix(prv, []byte{'{'}) {
- return "", fmt.Errorf("invalid JSON %s", prv)
- }
- b[len(b)-1] = ',' // Replace closing curly brace with a comma.
- b = append(b, prv[1:]...) // Append private claims.
- return base64.RawURLEncoding.EncodeToString(b), nil
-}
-
-// EncodeJWS encodes the data using the provided key as a JSON web signature.
-func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) {
- head, err := header.encode()
- if err != nil {
- return "", err
- }
- claims, err := c.encode()
- if err != nil {
- return "", err
- }
- ss := fmt.Sprintf("%s.%s", head, claims)
- h := sha256.New()
- h.Write([]byte(ss))
- sig, err := rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil))
- if err != nil {
- return "", err
- }
- return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil
-}
-
-// DecodeJWS decodes a claim set from a JWS payload.
-func DecodeJWS(payload string) (*Claims, error) {
- // decode returned id token to get expiry
- s := strings.Split(payload, ".")
- if len(s) < 2 {
- return nil, errors.New("invalid token received")
- }
- decoded, err := base64.RawURLEncoding.DecodeString(s[1])
- if err != nil {
- return nil, err
- }
- c := &Claims{}
- if err := json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c); err != nil {
- return nil, err
- }
- if err := json.NewDecoder(bytes.NewBuffer(decoded)).Decode(&c.AdditionalClaims); err != nil {
- return nil, err
- }
- return c, err
-}
-
-// VerifyJWS tests whether the provided JWT token's signature was produced by
-// the private key associated with the provided public key.
-func VerifyJWS(token string, key *rsa.PublicKey) error {
- parts := strings.Split(token, ".")
- if len(parts) != 3 {
- return errors.New("jwt: invalid token received, token must have 3 parts")
- }
-
- signedContent := parts[0] + "." + parts[1]
- signatureString, err := base64.RawURLEncoding.DecodeString(parts[2])
- if err != nil {
- return err
- }
-
- h := sha256.New()
- h.Write([]byte(signedContent))
- return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), signatureString)
-}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go
deleted file mode 100644
index 6ef88311..00000000
--- a/vendor/cloud.google.com/go/auth/internal/transport/cba.go
+++ /dev/null
@@ -1,298 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "context"
- "crypto/tls"
- "errors"
- "net"
- "net/http"
- "net/url"
- "os"
- "strconv"
- "strings"
-
- "cloud.google.com/go/auth/internal"
- "cloud.google.com/go/auth/internal/transport/cert"
- "github.com/google/s2a-go"
- "github.com/google/s2a-go/fallback"
- "google.golang.org/grpc/credentials"
-)
-
-const (
- mTLSModeAlways = "always"
- mTLSModeNever = "never"
- mTLSModeAuto = "auto"
-
- // Experimental: if true, the code will try MTLS with S2A as the default for transport security. Default value is false.
- googleAPIUseS2AEnv = "EXPERIMENTAL_GOOGLE_API_USE_S2A"
- googleAPIUseCertSource = "GOOGLE_API_USE_CLIENT_CERTIFICATE"
- googleAPIUseMTLS = "GOOGLE_API_USE_MTLS_ENDPOINT"
- googleAPIUseMTLSOld = "GOOGLE_API_USE_MTLS"
-
- universeDomainPlaceholder = "UNIVERSE_DOMAIN"
-)
-
-var (
- mdsMTLSAutoConfigSource mtlsConfigSource
- errUniverseNotSupportedMTLS = errors.New("mTLS is not supported in any universe other than googleapis.com")
-)
-
-// Options is a struct that is duplicated information from the individual
-// transport packages in order to avoid cyclic deps. It correlates 1:1 with
-// fields on httptransport.Options and grpctransport.Options.
-type Options struct {
- Endpoint string
- DefaultMTLSEndpoint string
- DefaultEndpointTemplate string
- ClientCertProvider cert.Provider
- Client *http.Client
- UniverseDomain string
- EnableDirectPath bool
- EnableDirectPathXds bool
-}
-
-// getUniverseDomain returns the default service domain for a given Cloud
-// universe.
-func (o *Options) getUniverseDomain() string {
- if o.UniverseDomain == "" {
- return internal.DefaultUniverseDomain
- }
- return o.UniverseDomain
-}
-
-// isUniverseDomainGDU returns true if the universe domain is the default Google
-// universe.
-func (o *Options) isUniverseDomainGDU() bool {
- return o.getUniverseDomain() == internal.DefaultUniverseDomain
-}
-
-// defaultEndpoint returns the DefaultEndpointTemplate merged with the
-// universe domain if the DefaultEndpointTemplate is set, otherwise returns an
-// empty string.
-func (o *Options) defaultEndpoint() string {
- if o.DefaultEndpointTemplate == "" {
- return ""
- }
- return strings.Replace(o.DefaultEndpointTemplate, universeDomainPlaceholder, o.getUniverseDomain(), 1)
-}
-
-// mergedEndpoint merges a user-provided Endpoint of format host[:port] with the
-// default endpoint.
-func (o *Options) mergedEndpoint() (string, error) {
- defaultEndpoint := o.defaultEndpoint()
- u, err := url.Parse(fixScheme(defaultEndpoint))
- if err != nil {
- return "", err
- }
- return strings.Replace(defaultEndpoint, u.Host, o.Endpoint, 1), nil
-}
-
-func fixScheme(baseURL string) string {
- if !strings.Contains(baseURL, "://") {
- baseURL = "https://" + baseURL
- }
- return baseURL
-}
-
-// GetGRPCTransportCredsAndEndpoint returns an instance of
-// [google.golang.org/grpc/credentials.TransportCredentials], and the
-// corresponding endpoint to use for GRPC client.
-func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCredentials, string, error) {
- config, err := getTransportConfig(opts)
- if err != nil {
- return nil, "", err
- }
-
- defaultTransportCreds := credentials.NewTLS(&tls.Config{
- GetClientCertificate: config.clientCertSource,
- })
- if config.s2aAddress == "" {
- return defaultTransportCreds, config.endpoint, nil
- }
-
- var fallbackOpts *s2a.FallbackOptions
- // In case of S2A failure, fall back to the endpoint that would've been used without S2A.
- if fallbackHandshake, err := fallback.DefaultFallbackClientHandshakeFunc(config.endpoint); err == nil {
- fallbackOpts = &s2a.FallbackOptions{
- FallbackClientHandshakeFunc: fallbackHandshake,
- }
- }
-
- s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{
- S2AAddress: config.s2aAddress,
- FallbackOpts: fallbackOpts,
- })
- if err != nil {
- // Use default if we cannot initialize S2A client transport credentials.
- return defaultTransportCreds, config.endpoint, nil
- }
- return s2aTransportCreds, config.s2aMTLSEndpoint, nil
-}
-
-// GetHTTPTransportConfig returns a client certificate source and a function for
-// dialing MTLS with S2A.
-func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context, string, string) (net.Conn, error), error) {
- config, err := getTransportConfig(opts)
- if err != nil {
- return nil, nil, err
- }
-
- if config.s2aAddress == "" {
- return config.clientCertSource, nil, nil
- }
-
- var fallbackOpts *s2a.FallbackOptions
- // In case of S2A failure, fall back to the endpoint that would've been used without S2A.
- if fallbackURL, err := url.Parse(config.endpoint); err == nil {
- if fallbackDialer, fallbackServerAddr, err := fallback.DefaultFallbackDialerAndAddress(fallbackURL.Hostname()); err == nil {
- fallbackOpts = &s2a.FallbackOptions{
- FallbackDialer: &s2a.FallbackDialer{
- Dialer: fallbackDialer,
- ServerAddr: fallbackServerAddr,
- },
- }
- }
- }
-
- dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{
- S2AAddress: config.s2aAddress,
- FallbackOpts: fallbackOpts,
- })
- return nil, dialTLSContextFunc, nil
-}
-
-func getTransportConfig(opts *Options) (*transportConfig, error) {
- clientCertSource, err := getClientCertificateSource(opts)
- if err != nil {
- return nil, err
- }
- endpoint, err := getEndpoint(opts, clientCertSource)
- if err != nil {
- return nil, err
- }
- defaultTransportConfig := transportConfig{
- clientCertSource: clientCertSource,
- endpoint: endpoint,
- }
-
- if !shouldUseS2A(clientCertSource, opts) {
- return &defaultTransportConfig, nil
- }
- if !opts.isUniverseDomainGDU() {
- return nil, errUniverseNotSupportedMTLS
- }
-
- s2aMTLSEndpoint := opts.DefaultMTLSEndpoint
-
- s2aAddress := GetS2AAddress()
- if s2aAddress == "" {
- return &defaultTransportConfig, nil
- }
- return &transportConfig{
- clientCertSource: clientCertSource,
- endpoint: endpoint,
- s2aAddress: s2aAddress,
- s2aMTLSEndpoint: s2aMTLSEndpoint,
- }, nil
-}
-
-// getClientCertificateSource returns a default client certificate source, if
-// not provided by the user.
-//
-// A nil default source can be returned if the source does not exist. Any exceptions
-// encountered while initializing the default source will be reported as client
-// error (ex. corrupt metadata file).
-func getClientCertificateSource(opts *Options) (cert.Provider, error) {
- if !isClientCertificateEnabled(opts) {
- return nil, nil
- } else if opts.ClientCertProvider != nil {
- return opts.ClientCertProvider, nil
- }
- return cert.DefaultProvider()
-
-}
-
-// isClientCertificateEnabled returns true by default for all GDU universe domain, unless explicitly overridden by env var
-func isClientCertificateEnabled(opts *Options) bool {
- if value, ok := os.LookupEnv(googleAPIUseCertSource); ok {
- // error as false is OK
- b, _ := strconv.ParseBool(value)
- return b
- }
- return opts.isUniverseDomainGDU()
-}
-
-type transportConfig struct {
- // The client certificate source.
- clientCertSource cert.Provider
- // The corresponding endpoint to use based on client certificate source.
- endpoint string
- // The S2A address if it can be used, otherwise an empty string.
- s2aAddress string
- // The MTLS endpoint to use with S2A.
- s2aMTLSEndpoint string
-}
-
-// getEndpoint returns the endpoint for the service, taking into account the
-// user-provided endpoint override "settings.Endpoint".
-//
-// If no endpoint override is specified, we will either return the default endpoint or
-// the default mTLS endpoint if a client certificate is available.
-//
-// You can override the default endpoint choice (mtls vs. regular) by setting the
-// GOOGLE_API_USE_MTLS_ENDPOINT environment variable.
-//
-// If the endpoint override is an address (host:port) rather than full base
-// URL (ex. https://...), then the user-provided address will be merged into
-// the default endpoint. For example, WithEndpoint("myhost:8000") and
-// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return "https://myhost:8080/bar/baz"
-func getEndpoint(opts *Options, clientCertSource cert.Provider) (string, error) {
- if opts.Endpoint == "" {
- mtlsMode := getMTLSMode()
- if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) {
- if !opts.isUniverseDomainGDU() {
- return "", errUniverseNotSupportedMTLS
- }
- return opts.DefaultMTLSEndpoint, nil
- }
- return opts.defaultEndpoint(), nil
- }
- if strings.Contains(opts.Endpoint, "://") {
- // User passed in a full URL path, use it verbatim.
- return opts.Endpoint, nil
- }
- if opts.defaultEndpoint() == "" {
- // If DefaultEndpointTemplate is not configured,
- // use the user provided endpoint verbatim. This allows a naked
- // "host[:port]" URL to be used with GRPC Direct Path.
- return opts.Endpoint, nil
- }
-
- // Assume user-provided endpoint is host[:port], merge it with the default endpoint.
- return opts.mergedEndpoint()
-}
-
-func getMTLSMode() string {
- mode := os.Getenv(googleAPIUseMTLS)
- if mode == "" {
- mode = os.Getenv(googleAPIUseMTLSOld) // Deprecated.
- }
- if mode == "" {
- return mTLSModeAuto
- }
- return strings.ToLower(mode)
-}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go
deleted file mode 100644
index 96582ce7..00000000
--- a/vendor/cloud.google.com/go/auth/internal/transport/cert/default_cert.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cert
-
-import (
- "crypto/tls"
- "errors"
- "sync"
-)
-
-// defaultCertData holds all the variables pertaining to
-// the default certificate provider created by [DefaultProvider].
-//
-// A singleton model is used to allow the provider to be reused
-// by the transport layer. As mentioned in [DefaultProvider] (provider nil, nil)
-// may be returned to indicate a default provider could not be found, which
-// will skip extra tls config in the transport layer .
-type defaultCertData struct {
- once sync.Once
- provider Provider
- err error
-}
-
-var (
- defaultCert defaultCertData
-)
-
-// Provider is a function that can be passed into crypto/tls.Config.GetClientCertificate.
-type Provider func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
-
-// errSourceUnavailable is a sentinel error to indicate certificate source is unavailable.
-var errSourceUnavailable = errors.New("certificate source is unavailable")
-
-// DefaultProvider returns a certificate source using the preferred EnterpriseCertificateProxySource.
-// If EnterpriseCertificateProxySource is not available, fall back to the legacy SecureConnectSource.
-//
-// If neither source is available (due to missing configurations), a nil Source and a nil Error are
-// returned to indicate that a default certificate source is unavailable.
-func DefaultProvider() (Provider, error) {
- defaultCert.once.Do(func() {
- defaultCert.provider, defaultCert.err = NewEnterpriseCertificateProxyProvider("")
- if errors.Is(defaultCert.err, errSourceUnavailable) {
- defaultCert.provider, defaultCert.err = NewSecureConnectProvider("")
- if errors.Is(defaultCert.err, errSourceUnavailable) {
- defaultCert.provider, defaultCert.err = nil, nil
- }
- }
- })
- return defaultCert.provider, defaultCert.err
-}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go
deleted file mode 100644
index 36651591..00000000
--- a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cert
-
-import (
- "crypto/tls"
- "errors"
-
- "github.com/googleapis/enterprise-certificate-proxy/client"
-)
-
-type ecpSource struct {
- key *client.Key
-}
-
-// NewEnterpriseCertificateProxyProvider creates a certificate source
-// using the Enterprise Certificate Proxy client, which delegates
-// certifcate related operations to an OS-specific "signer binary"
-// that communicates with the native keystore (ex. keychain on MacOS).
-//
-// The configFilePath points to a config file containing relevant parameters
-// such as the certificate issuer and the location of the signer binary.
-// If configFilePath is empty, the client will attempt to load the config from
-// a well-known gcloud location.
-func NewEnterpriseCertificateProxyProvider(configFilePath string) (Provider, error) {
- key, err := client.Cred(configFilePath)
- if err != nil {
- if errors.Is(err, client.ErrCredUnavailable) {
- return nil, errSourceUnavailable
- }
- return nil, err
- }
-
- return (&ecpSource{
- key: key,
- }).getClientCertificate, nil
-}
-
-func (s *ecpSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) {
- var cert tls.Certificate
- cert.PrivateKey = s.key
- cert.Certificate = s.key.CertificateChain()
- return &cert, nil
-}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go
deleted file mode 100644
index 3227aba2..00000000
--- a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cert
-
-import (
- "crypto/tls"
- "crypto/x509"
- "encoding/json"
- "errors"
- "fmt"
- "os"
- "os/exec"
- "os/user"
- "path/filepath"
- "sync"
- "time"
-)
-
-const (
- metadataPath = ".secureConnect"
- metadataFile = "context_aware_metadata.json"
-)
-
-type secureConnectSource struct {
- metadata secureConnectMetadata
-
- // Cache the cert to avoid executing helper command repeatedly.
- cachedCertMutex sync.Mutex
- cachedCert *tls.Certificate
-}
-
-type secureConnectMetadata struct {
- Cmd []string `json:"cert_provider_command"`
-}
-
-// NewSecureConnectProvider creates a certificate source using
-// the Secure Connect Helper and its associated metadata file.
-//
-// The configFilePath points to the location of the context aware metadata file.
-// If configFilePath is empty, use the default context aware metadata location.
-func NewSecureConnectProvider(configFilePath string) (Provider, error) {
- if configFilePath == "" {
- user, err := user.Current()
- if err != nil {
- // Error locating the default config means Secure Connect is not supported.
- return nil, errSourceUnavailable
- }
- configFilePath = filepath.Join(user.HomeDir, metadataPath, metadataFile)
- }
-
- file, err := os.ReadFile(configFilePath)
- if err != nil {
- if errors.Is(err, os.ErrNotExist) {
- // Config file missing means Secure Connect is not supported.
- return nil, errSourceUnavailable
- }
- return nil, err
- }
-
- var metadata secureConnectMetadata
- if err := json.Unmarshal(file, &metadata); err != nil {
- return nil, fmt.Errorf("cert: could not parse JSON in %q: %w", configFilePath, err)
- }
- if err := validateMetadata(metadata); err != nil {
- return nil, fmt.Errorf("cert: invalid config in %q: %w", configFilePath, err)
- }
- return (&secureConnectSource{
- metadata: metadata,
- }).getClientCertificate, nil
-}
-
-func validateMetadata(metadata secureConnectMetadata) error {
- if len(metadata.Cmd) == 0 {
- return errors.New("empty cert_provider_command")
- }
- return nil
-}
-
-func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) {
- s.cachedCertMutex.Lock()
- defer s.cachedCertMutex.Unlock()
- if s.cachedCert != nil && !isCertificateExpired(s.cachedCert) {
- return s.cachedCert, nil
- }
- // Expand OS environment variables in the cert provider command such as "$HOME".
- for i := 0; i < len(s.metadata.Cmd); i++ {
- s.metadata.Cmd[i] = os.ExpandEnv(s.metadata.Cmd[i])
- }
- command := s.metadata.Cmd
- data, err := exec.Command(command[0], command[1:]...).Output()
- if err != nil {
- return nil, err
- }
- cert, err := tls.X509KeyPair(data, data)
- if err != nil {
- return nil, err
- }
- s.cachedCert = &cert
- return &cert, nil
-}
-
-// isCertificateExpired returns true if the given cert is expired or invalid.
-func isCertificateExpired(cert *tls.Certificate) bool {
- if len(cert.Certificate) == 0 {
- return true
- }
- parsed, err := x509.ParseCertificate(cert.Certificate[0])
- if err != nil {
- return true
- }
- return time.Now().After(parsed.NotAfter)
-}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go
deleted file mode 100644
index ea1e1feb..00000000
--- a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cert
-
-import (
- "crypto/tls"
- "encoding/json"
- "errors"
- "io"
- "os"
-
- "github.com/googleapis/enterprise-certificate-proxy/client/util"
-)
-
-type certConfigs struct {
- Workload *workloadSource `json:"workload"`
-}
-
-type workloadSource struct {
- CertPath string `json:"cert_path"`
- KeyPath string `json:"key_path"`
-}
-
-type certificateConfig struct {
- CertConfigs certConfigs `json:"cert_configs"`
-}
-
-// NewWorkloadX509CertProvider creates a certificate source
-// that reads a certificate and private key file from the local file system.
-// This is intended to be used for workload identity federation.
-//
-// The configFilePath points to a config file containing relevant parameters
-// such as the certificate and key file paths.
-// If configFilePath is empty, the client will attempt to load the config from
-// a well-known gcloud location.
-func NewWorkloadX509CertProvider(configFilePath string) (Provider, error) {
- if configFilePath == "" {
- envFilePath := util.GetConfigFilePathFromEnv()
- if envFilePath != "" {
- configFilePath = envFilePath
- } else {
- configFilePath = util.GetDefaultConfigFilePath()
- }
- }
-
- certFile, keyFile, err := getCertAndKeyFiles(configFilePath)
- if err != nil {
- return nil, err
- }
-
- source := &workloadSource{
- CertPath: certFile,
- KeyPath: keyFile,
- }
- return source.getClientCertificate, nil
-}
-
-// getClientCertificate attempts to load the certificate and key from the files specified in the
-// certificate config.
-func (s *workloadSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) {
- cert, err := tls.LoadX509KeyPair(s.CertPath, s.KeyPath)
- if err != nil {
- return nil, err
- }
- return &cert, nil
-}
-
-// getCertAndKeyFiles attempts to read the provided config file and return the certificate and private
-// key file paths.
-func getCertAndKeyFiles(configFilePath string) (string, string, error) {
- jsonFile, err := os.Open(configFilePath)
- if err != nil {
- if errors.Is(err, os.ErrNotExist) {
- return "", "", errSourceUnavailable
- }
- return "", "", err
- }
-
- byteValue, err := io.ReadAll(jsonFile)
- if err != nil {
- return "", "", err
- }
-
- var config certificateConfig
- if err := json.Unmarshal(byteValue, &config); err != nil {
- return "", "", err
- }
-
- if config.CertConfigs.Workload == nil {
- return "", "", errors.New("no Workload Identity Federation certificate information found in the certificate configuration file")
- }
-
- certFile := config.CertConfigs.Workload.CertPath
- keyFile := config.CertConfigs.Workload.KeyPath
-
- if certFile == "" {
- return "", "", errors.New("certificate configuration is missing the certificate file location")
- }
-
- if keyFile == "" {
- return "", "", errors.New("certificate configuration is missing the key file location")
- }
-
- return certFile, keyFile, nil
-}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go
deleted file mode 100644
index 2ed532de..00000000
--- a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "encoding/json"
- "log"
- "os"
- "strconv"
- "sync"
- "time"
-
- "cloud.google.com/go/auth/internal/transport/cert"
- "cloud.google.com/go/compute/metadata"
-)
-
-const (
- configEndpointSuffix = "instance/platform-security/auto-mtls-configuration"
-)
-
-var (
- // The period an MTLS config can be reused before needing refresh.
- configExpiry = time.Hour
-
- // mdsMTLSAutoConfigSource is an instance of reuseMTLSConfigSource, with metadataMTLSAutoConfig as its config source.
- mtlsOnce sync.Once
-)
-
-// GetS2AAddress returns the S2A address to be reached via plaintext connection.
-// Returns empty string if not set or invalid.
-func GetS2AAddress() string {
- c, err := getMetadataMTLSAutoConfig().Config()
- if err != nil {
- return ""
- }
- if !c.Valid() {
- return ""
- }
- return c.S2A.PlaintextAddress
-}
-
-type mtlsConfigSource interface {
- Config() (*mtlsConfig, error)
-}
-
-// mtlsConfig contains the configuration for establishing MTLS connections with Google APIs.
-type mtlsConfig struct {
- S2A *s2aAddresses `json:"s2a"`
- Expiry time.Time
-}
-
-func (c *mtlsConfig) Valid() bool {
- return c != nil && c.S2A != nil && !c.expired()
-}
-func (c *mtlsConfig) expired() bool {
- return c.Expiry.Before(time.Now())
-}
-
-// s2aAddresses contains the plaintext and/or MTLS S2A addresses.
-type s2aAddresses struct {
- // PlaintextAddress is the plaintext address to reach S2A
- PlaintextAddress string `json:"plaintext_address"`
- // MTLSAddress is the MTLS address to reach S2A
- MTLSAddress string `json:"mtls_address"`
-}
-
-// getMetadataMTLSAutoConfig returns mdsMTLSAutoConfigSource, which is backed by config from MDS with auto-refresh.
-func getMetadataMTLSAutoConfig() mtlsConfigSource {
- mtlsOnce.Do(func() {
- mdsMTLSAutoConfigSource = &reuseMTLSConfigSource{
- src: &metadataMTLSAutoConfig{},
- }
- })
- return mdsMTLSAutoConfigSource
-}
-
-// reuseMTLSConfigSource caches a valid version of mtlsConfig, and uses `src` to refresh upon config expiry.
-// It implements the mtlsConfigSource interface, so calling Config() on it returns an mtlsConfig.
-type reuseMTLSConfigSource struct {
- src mtlsConfigSource // src.Config() is called when config is expired
- mu sync.Mutex // mutex guards config
- config *mtlsConfig // cached config
-}
-
-func (cs *reuseMTLSConfigSource) Config() (*mtlsConfig, error) {
- cs.mu.Lock()
- defer cs.mu.Unlock()
-
- if cs.config.Valid() {
- return cs.config, nil
- }
- c, err := cs.src.Config()
- if err != nil {
- return nil, err
- }
- cs.config = c
- return c, nil
-}
-
-// metadataMTLSAutoConfig is an implementation of the interface mtlsConfigSource
-// It has the logic to query MDS and return an mtlsConfig
-type metadataMTLSAutoConfig struct{}
-
-var httpGetMetadataMTLSConfig = func() (string, error) {
- return metadata.Get(configEndpointSuffix)
-}
-
-func (cs *metadataMTLSAutoConfig) Config() (*mtlsConfig, error) {
- resp, err := httpGetMetadataMTLSConfig()
- if err != nil {
- log.Printf("querying MTLS config from MDS endpoint failed: %v", err)
- return defaultMTLSConfig(), nil
- }
- var config mtlsConfig
- err = json.Unmarshal([]byte(resp), &config)
- if err != nil {
- log.Printf("unmarshalling MTLS config from MDS endpoint failed: %v", err)
- return defaultMTLSConfig(), nil
- }
-
- if config.S2A == nil {
- log.Printf("returned MTLS config from MDS endpoint is invalid: %v", config)
- return defaultMTLSConfig(), nil
- }
-
- // set new expiry
- config.Expiry = time.Now().Add(configExpiry)
- return &config, nil
-}
-
-func defaultMTLSConfig() *mtlsConfig {
- return &mtlsConfig{
- S2A: &s2aAddresses{
- PlaintextAddress: "",
- MTLSAddress: "",
- },
- Expiry: time.Now().Add(configExpiry),
- }
-}
-
-func shouldUseS2A(clientCertSource cert.Provider, opts *Options) bool {
- // If client cert is found, use that over S2A.
- if clientCertSource != nil {
- return false
- }
- // If EXPERIMENTAL_GOOGLE_API_USE_S2A is not set to true, skip S2A.
- if !isGoogleS2AEnabled() {
- return false
- }
- // If DefaultMTLSEndpoint is not set or has endpoint override, skip S2A.
- if opts.DefaultMTLSEndpoint == "" || opts.Endpoint != "" {
- return false
- }
- // If custom HTTP client is provided, skip S2A.
- if opts.Client != nil {
- return false
- }
- // If directPath is enabled, skip S2A.
- return !opts.EnableDirectPath && !opts.EnableDirectPathXds
-}
-
-func isGoogleS2AEnabled() bool {
- b, err := strconv.ParseBool(os.Getenv(googleAPIUseS2AEnv))
- if err != nil {
- return false
- }
- return b
-}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go
deleted file mode 100644
index b76386d3..00000000
--- a/vendor/cloud.google.com/go/auth/internal/transport/transport.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package transport provided internal helpers for the two transport packages
-// (grpctransport and httptransport).
-package transport
-
-import (
- "fmt"
-
- "cloud.google.com/go/auth/credentials"
-)
-
-// CloneDetectOptions clones a user set detect option into some new memory that
-// we can internally manipulate before sending onto the detect package.
-func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOptions {
- if oldDo == nil {
- // it is valid for users not to set this, but we will need to to default
- // some options for them in this case so return some initialized memory
- // to work with.
- return &credentials.DetectOptions{}
- }
- newDo := &credentials.DetectOptions{
- // Simple types
- Audience: oldDo.Audience,
- Subject: oldDo.Subject,
- EarlyTokenRefresh: oldDo.EarlyTokenRefresh,
- TokenURL: oldDo.TokenURL,
- STSAudience: oldDo.STSAudience,
- CredentialsFile: oldDo.CredentialsFile,
- UseSelfSignedJWT: oldDo.UseSelfSignedJWT,
- UniverseDomain: oldDo.UniverseDomain,
-
- // These fields are are pointer types that we just want to use exactly
- // as the user set, copy the ref
- Client: oldDo.Client,
- AuthHandlerOptions: oldDo.AuthHandlerOptions,
- }
-
- // Smartly size this memory and copy below.
- if oldDo.CredentialsJSON != nil {
- newDo.CredentialsJSON = make([]byte, len(oldDo.CredentialsJSON))
- copy(newDo.CredentialsJSON, oldDo.CredentialsJSON)
- }
- if oldDo.Scopes != nil {
- newDo.Scopes = make([]string, len(oldDo.Scopes))
- copy(newDo.Scopes, oldDo.Scopes)
- }
-
- return newDo
-}
-
-// ValidateUniverseDomain verifies that the universe domain configured for the
-// client matches the universe domain configured for the credentials.
-func ValidateUniverseDomain(clientUniverseDomain, credentialsUniverseDomain string) error {
- if clientUniverseDomain != credentialsUniverseDomain {
- return fmt.Errorf(
- "the configured universe domain (%q) does not match the universe "+
- "domain found in the credentials (%q). If you haven't configured "+
- "the universe domain explicitly, \"googleapis.com\" is the default",
- clientUniverseDomain,
- credentialsUniverseDomain)
- }
- return nil
-}
diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md
deleted file mode 100644
index ff9747be..00000000
--- a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md
+++ /dev/null
@@ -1,40 +0,0 @@
-# Changelog
-
-## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.1...auth/oauth2adapt/v0.2.2) (2024-04-23)
-
-
-### Bug Fixes
-
-* **auth/oauth2adapt:** Bump x/net to v0.24.0 ([ba31ed5](https://github.com/googleapis/google-cloud-go/commit/ba31ed5fda2c9664f2e1cf972469295e63deb5b4))
-
-## [0.2.1](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.0...auth/oauth2adapt/v0.2.1) (2024-04-18)
-
-
-### Bug Fixes
-
-* **auth/oauth2adapt:** Adapt Token Types to be translated ([#9801](https://github.com/googleapis/google-cloud-go/issues/9801)) ([70f4115](https://github.com/googleapis/google-cloud-go/commit/70f411555ebbf2b71e6d425cc8d2030644c6b438)), refs [#9800](https://github.com/googleapis/google-cloud-go/issues/9800)
-
-## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.1.0...auth/oauth2adapt/v0.2.0) (2024-04-16)
-
-
-### Features
-
-* **auth/oauth2adapt:** Add helpers for working with credentials types ([#9694](https://github.com/googleapis/google-cloud-go/issues/9694)) ([cf33b55](https://github.com/googleapis/google-cloud-go/commit/cf33b5514423a2ac5c2a323a1cd99aac34fd4233))
-
-
-### Bug Fixes
-
-* **auth/oauth2adapt:** Update protobuf dep to v1.33.0 ([30b038d](https://github.com/googleapis/google-cloud-go/commit/30b038d8cac0b8cd5dd4761c87f3f298760dd33a))
-
-## 0.1.0 (2023-10-19)
-
-
-### Features
-
-* **auth/oauth2adapt:** Adds a new module to translate types ([#8595](https://github.com/googleapis/google-cloud-go/issues/8595)) ([6933c5a](https://github.com/googleapis/google-cloud-go/commit/6933c5a0c1fc8e58cbfff8bbca439d671b94672f))
-* **auth/oauth2adapt:** Fixup deps for release ([#8747](https://github.com/googleapis/google-cloud-go/issues/8747)) ([749d243](https://github.com/googleapis/google-cloud-go/commit/749d243862b025a6487a4d2d339219889b4cfe70))
-
-
-### Bug Fixes
-
-* **auth/oauth2adapt:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d))
diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE b/vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE
deleted file mode 100644
index d6456956..00000000
--- a/vendor/cloud.google.com/go/auth/oauth2adapt/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go
deleted file mode 100644
index 9835ac57..00000000
--- a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package oauth2adapt helps converts types used in [cloud.google.com/go/auth]
-// and [golang.org/x/oauth2].
-package oauth2adapt
-
-import (
- "context"
- "encoding/json"
- "errors"
-
- "cloud.google.com/go/auth"
- "golang.org/x/oauth2"
- "golang.org/x/oauth2/google"
-)
-
-// TokenProviderFromTokenSource converts any [golang.org/x/oauth2.TokenSource]
-// into a [cloud.google.com/go/auth.TokenProvider].
-func TokenProviderFromTokenSource(ts oauth2.TokenSource) auth.TokenProvider {
- return &tokenProviderAdapter{ts: ts}
-}
-
-type tokenProviderAdapter struct {
- ts oauth2.TokenSource
-}
-
-// Token fulfills the [cloud.google.com/go/auth.TokenProvider] interface. It
-// is a light wrapper around the underlying TokenSource.
-func (tp *tokenProviderAdapter) Token(context.Context) (*auth.Token, error) {
- tok, err := tp.ts.Token()
- if err != nil {
- var err2 *oauth2.RetrieveError
- if ok := errors.As(err, &err2); ok {
- return nil, AuthErrorFromRetrieveError(err2)
- }
- return nil, err
- }
- return &auth.Token{
- Value: tok.AccessToken,
- Type: tok.Type(),
- Expiry: tok.Expiry,
- }, nil
-}
-
-// TokenSourceFromTokenProvider converts any
-// [cloud.google.com/go/auth.TokenProvider] into a
-// [golang.org/x/oauth2.TokenSource].
-func TokenSourceFromTokenProvider(tp auth.TokenProvider) oauth2.TokenSource {
- return &tokenSourceAdapter{tp: tp}
-}
-
-type tokenSourceAdapter struct {
- tp auth.TokenProvider
-}
-
-// Token fulfills the [golang.org/x/oauth2.TokenSource] interface. It
-// is a light wrapper around the underlying TokenProvider.
-func (ts *tokenSourceAdapter) Token() (*oauth2.Token, error) {
- tok, err := ts.tp.Token(context.Background())
- if err != nil {
- var err2 *auth.Error
- if ok := errors.As(err, &err2); ok {
- return nil, AddRetrieveErrorToAuthError(err2)
- }
- return nil, err
- }
- return &oauth2.Token{
- AccessToken: tok.Value,
- TokenType: tok.Type,
- Expiry: tok.Expiry,
- }, nil
-}
-
-// AuthCredentialsFromOauth2Credentials converts a [golang.org/x/oauth2/google.Credentials]
-// to a [cloud.google.com/go/auth.Credentials].
-func AuthCredentialsFromOauth2Credentials(creds *google.Credentials) *auth.Credentials {
- if creds == nil {
- return nil
- }
- return auth.NewCredentials(&auth.CredentialsOptions{
- TokenProvider: TokenProviderFromTokenSource(creds.TokenSource),
- JSON: creds.JSON,
- ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) {
- return creds.ProjectID, nil
- }),
- UniverseDomainProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) {
- return creds.GetUniverseDomain()
- }),
- })
-}
-
-// Oauth2CredentialsFromAuthCredentials converts a [cloud.google.com/go/auth.Credentials]
-// to a [golang.org/x/oauth2/google.Credentials].
-func Oauth2CredentialsFromAuthCredentials(creds *auth.Credentials) *google.Credentials {
- if creds == nil {
- return nil
- }
- // Throw away errors as old credentials are not request aware. Also, no
- // network requests are currently happening for this use case.
- projectID, _ := creds.ProjectID(context.Background())
-
- return &google.Credentials{
- TokenSource: TokenSourceFromTokenProvider(creds.TokenProvider),
- ProjectID: projectID,
- JSON: creds.JSON(),
- UniverseDomainProvider: func() (string, error) {
- return creds.UniverseDomain(context.Background())
- },
- }
-}
-
-type oauth2Error struct {
- ErrorCode string `json:"error"`
- ErrorDescription string `json:"error_description"`
- ErrorURI string `json:"error_uri"`
-}
-
-// AddRetrieveErrorToAuthError returns the same error provided and adds a
-// [golang.org/x/oauth2.RetrieveError] to the error chain by setting the `Err` field on the
-// [cloud.google.com/go/auth.Error].
-func AddRetrieveErrorToAuthError(err *auth.Error) *auth.Error {
- if err == nil {
- return nil
- }
- e := &oauth2.RetrieveError{
- Response: err.Response,
- Body: err.Body,
- }
- err.Err = e
- if len(err.Body) > 0 {
- var oErr oauth2Error
- // ignore the error as it only fills in extra details
- json.Unmarshal(err.Body, &oErr)
- e.ErrorCode = oErr.ErrorCode
- e.ErrorDescription = oErr.ErrorDescription
- e.ErrorURI = oErr.ErrorURI
- }
- return err
-}
-
-// AuthErrorFromRetrieveError returns an [cloud.google.com/go/auth.Error] that
-// wraps the provided [golang.org/x/oauth2.RetrieveError].
-func AuthErrorFromRetrieveError(err *oauth2.RetrieveError) *auth.Error {
- if err == nil {
- return nil
- }
- return &auth.Error{
- Response: err.Response,
- Body: err.Body,
- Err: err,
- }
-}
diff --git a/vendor/cloud.google.com/go/auth/threelegged.go b/vendor/cloud.google.com/go/auth/threelegged.go
deleted file mode 100644
index 1b8d83c4..00000000
--- a/vendor/cloud.google.com/go/auth/threelegged.go
+++ /dev/null
@@ -1,373 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package auth
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "mime"
- "net/http"
- "net/url"
- "strconv"
- "strings"
- "time"
-
- "cloud.google.com/go/auth/internal"
-)
-
-// AuthorizationHandler is a 3-legged-OAuth helper that prompts the user for
-// OAuth consent at the specified auth code URL and returns an auth code and
-// state upon approval.
-type AuthorizationHandler func(authCodeURL string) (code string, state string, err error)
-
-// Options3LO are the options for doing a 3-legged OAuth2 flow.
-type Options3LO struct {
- // ClientID is the application's ID.
- ClientID string
- // ClientSecret is the application's secret. Not required if AuthHandlerOpts
- // is set.
- ClientSecret string
- // AuthURL is the URL for authenticating.
- AuthURL string
- // TokenURL is the URL for retrieving a token.
- TokenURL string
- // AuthStyle is used to describe how to client info in the token request.
- AuthStyle Style
- // RefreshToken is the token used to refresh the credential. Not required
- // if AuthHandlerOpts is set.
- RefreshToken string
- // RedirectURL is the URL to redirect users to. Optional.
- RedirectURL string
- // Scopes specifies requested permissions for the Token. Optional.
- Scopes []string
-
- // URLParams are the set of values to apply to the token exchange. Optional.
- URLParams url.Values
- // Client is the client to be used to make the underlying token requests.
- // Optional.
- Client *http.Client
- // EarlyTokenExpiry is the time before the token expires that it should be
- // refreshed. If not set the default value is 10 seconds. Optional.
- EarlyTokenExpiry time.Duration
-
- // AuthHandlerOpts provides a set of options for doing a
- // 3-legged OAuth2 flow with a custom [AuthorizationHandler]. Optional.
- AuthHandlerOpts *AuthorizationHandlerOptions
-}
-
-func (o *Options3LO) validate() error {
- if o == nil {
- return errors.New("auth: options must be provided")
- }
- if o.ClientID == "" {
- return errors.New("auth: client ID must be provided")
- }
- if o.AuthHandlerOpts == nil && o.ClientSecret == "" {
- return errors.New("auth: client secret must be provided")
- }
- if o.AuthURL == "" {
- return errors.New("auth: auth URL must be provided")
- }
- if o.TokenURL == "" {
- return errors.New("auth: token URL must be provided")
- }
- if o.AuthStyle == StyleUnknown {
- return errors.New("auth: auth style must be provided")
- }
- if o.AuthHandlerOpts == nil && o.RefreshToken == "" {
- return errors.New("auth: refresh token must be provided")
- }
- return nil
-}
-
-// PKCEOptions holds parameters to support PKCE.
-type PKCEOptions struct {
- // Challenge is the un-padded, base64-url-encoded string of the encrypted code verifier.
- Challenge string // The un-padded, base64-url-encoded string of the encrypted code verifier.
- // ChallengeMethod is the encryption method (ex. S256).
- ChallengeMethod string
- // Verifier is the original, non-encrypted secret.
- Verifier string // The original, non-encrypted secret.
-}
-
-type tokenJSON struct {
- AccessToken string `json:"access_token"`
- TokenType string `json:"token_type"`
- RefreshToken string `json:"refresh_token"`
- ExpiresIn int `json:"expires_in"`
- // error fields
- ErrorCode string `json:"error"`
- ErrorDescription string `json:"error_description"`
- ErrorURI string `json:"error_uri"`
-}
-
-func (e *tokenJSON) expiry() (t time.Time) {
- if v := e.ExpiresIn; v != 0 {
- return time.Now().Add(time.Duration(v) * time.Second)
- }
- return
-}
-
-func (o *Options3LO) client() *http.Client {
- if o.Client != nil {
- return o.Client
- }
- return internal.CloneDefaultClient()
-}
-
-// authCodeURL returns a URL that points to a OAuth2 consent page.
-func (o *Options3LO) authCodeURL(state string, values url.Values) string {
- var buf bytes.Buffer
- buf.WriteString(o.AuthURL)
- v := url.Values{
- "response_type": {"code"},
- "client_id": {o.ClientID},
- }
- if o.RedirectURL != "" {
- v.Set("redirect_uri", o.RedirectURL)
- }
- if len(o.Scopes) > 0 {
- v.Set("scope", strings.Join(o.Scopes, " "))
- }
- if state != "" {
- v.Set("state", state)
- }
- if o.AuthHandlerOpts != nil {
- if o.AuthHandlerOpts.PKCEOpts != nil &&
- o.AuthHandlerOpts.PKCEOpts.Challenge != "" {
- v.Set(codeChallengeKey, o.AuthHandlerOpts.PKCEOpts.Challenge)
- }
- if o.AuthHandlerOpts.PKCEOpts != nil &&
- o.AuthHandlerOpts.PKCEOpts.ChallengeMethod != "" {
- v.Set(codeChallengeMethodKey, o.AuthHandlerOpts.PKCEOpts.ChallengeMethod)
- }
- }
- for k := range values {
- v.Set(k, v.Get(k))
- }
- if strings.Contains(o.AuthURL, "?") {
- buf.WriteByte('&')
- } else {
- buf.WriteByte('?')
- }
- buf.WriteString(v.Encode())
- return buf.String()
-}
-
-// New3LOTokenProvider returns a [TokenProvider] based on the 3-legged OAuth2
-// configuration. The TokenProvider is caches and auto-refreshes tokens by
-// default.
-func New3LOTokenProvider(opts *Options3LO) (TokenProvider, error) {
- if err := opts.validate(); err != nil {
- return nil, err
- }
- if opts.AuthHandlerOpts != nil {
- return new3LOTokenProviderWithAuthHandler(opts), nil
- }
- return NewCachedTokenProvider(&tokenProvider3LO{opts: opts, refreshToken: opts.RefreshToken, client: opts.client()}, &CachedTokenProviderOptions{
- ExpireEarly: opts.EarlyTokenExpiry,
- }), nil
-}
-
-// AuthorizationHandlerOptions provides a set of options to specify for doing a
-// 3-legged OAuth2 flow with a custom [AuthorizationHandler].
-type AuthorizationHandlerOptions struct {
- // AuthorizationHandler specifies the handler used to for the authorization
- // part of the flow.
- Handler AuthorizationHandler
- // State is used verify that the "state" is identical in the request and
- // response before exchanging the auth code for OAuth2 token.
- State string
- // PKCEOpts allows setting configurations for PKCE. Optional.
- PKCEOpts *PKCEOptions
-}
-
-func new3LOTokenProviderWithAuthHandler(opts *Options3LO) TokenProvider {
- return NewCachedTokenProvider(&tokenProviderWithHandler{opts: opts, state: opts.AuthHandlerOpts.State}, &CachedTokenProviderOptions{
- ExpireEarly: opts.EarlyTokenExpiry,
- })
-}
-
-// exchange handles the final exchange portion of the 3lo flow. Returns a Token,
-// refreshToken, and error.
-func (o *Options3LO) exchange(ctx context.Context, code string) (*Token, string, error) {
- // Build request
- v := url.Values{
- "grant_type": {"authorization_code"},
- "code": {code},
- }
- if o.RedirectURL != "" {
- v.Set("redirect_uri", o.RedirectURL)
- }
- if o.AuthHandlerOpts != nil &&
- o.AuthHandlerOpts.PKCEOpts != nil &&
- o.AuthHandlerOpts.PKCEOpts.Verifier != "" {
- v.Set(codeVerifierKey, o.AuthHandlerOpts.PKCEOpts.Verifier)
- }
- for k := range o.URLParams {
- v.Set(k, o.URLParams.Get(k))
- }
- return fetchToken(ctx, o, v)
-}
-
-// This struct is not safe for concurrent access alone, but the way it is used
-// in this package by wrapping it with a cachedTokenProvider makes it so.
-type tokenProvider3LO struct {
- opts *Options3LO
- client *http.Client
- refreshToken string
-}
-
-func (tp *tokenProvider3LO) Token(ctx context.Context) (*Token, error) {
- if tp.refreshToken == "" {
- return nil, errors.New("auth: token expired and refresh token is not set")
- }
- v := url.Values{
- "grant_type": {"refresh_token"},
- "refresh_token": {tp.refreshToken},
- }
- for k := range tp.opts.URLParams {
- v.Set(k, tp.opts.URLParams.Get(k))
- }
-
- tk, rt, err := fetchToken(ctx, tp.opts, v)
- if err != nil {
- return nil, err
- }
- if tp.refreshToken != rt && rt != "" {
- tp.refreshToken = rt
- }
- return tk, err
-}
-
-type tokenProviderWithHandler struct {
- opts *Options3LO
- state string
-}
-
-func (tp tokenProviderWithHandler) Token(ctx context.Context) (*Token, error) {
- url := tp.opts.authCodeURL(tp.state, nil)
- code, state, err := tp.opts.AuthHandlerOpts.Handler(url)
- if err != nil {
- return nil, err
- }
- if state != tp.state {
- return nil, errors.New("auth: state mismatch in 3-legged-OAuth flow")
- }
- tok, _, err := tp.opts.exchange(ctx, code)
- return tok, err
-}
-
-// fetchToken returns a Token, refresh token, and/or an error.
-func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, string, error) {
- var refreshToken string
- if o.AuthStyle == StyleInParams {
- if o.ClientID != "" {
- v.Set("client_id", o.ClientID)
- }
- if o.ClientSecret != "" {
- v.Set("client_secret", o.ClientSecret)
- }
- }
- req, err := http.NewRequest("POST", o.TokenURL, strings.NewReader(v.Encode()))
- if err != nil {
- return nil, refreshToken, err
- }
- req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
- if o.AuthStyle == StyleInHeader {
- req.SetBasicAuth(url.QueryEscape(o.ClientID), url.QueryEscape(o.ClientSecret))
- }
-
- // Make request
- r, err := o.client().Do(req.WithContext(ctx))
- if err != nil {
- return nil, refreshToken, err
- }
- body, err := internal.ReadAll(r.Body)
- r.Body.Close()
- if err != nil {
- return nil, refreshToken, fmt.Errorf("auth: cannot fetch token: %w", err)
- }
-
- failureStatus := r.StatusCode < 200 || r.StatusCode > 299
- tokError := &Error{
- Response: r,
- Body: body,
- }
-
- var token *Token
- // errors ignored because of default switch on content
- content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
- switch content {
- case "application/x-www-form-urlencoded", "text/plain":
- // some endpoints return a query string
- vals, err := url.ParseQuery(string(body))
- if err != nil {
- if failureStatus {
- return nil, refreshToken, tokError
- }
- return nil, refreshToken, fmt.Errorf("auth: cannot parse response: %w", err)
- }
- tokError.code = vals.Get("error")
- tokError.description = vals.Get("error_description")
- tokError.uri = vals.Get("error_uri")
- token = &Token{
- Value: vals.Get("access_token"),
- Type: vals.Get("token_type"),
- Metadata: make(map[string]interface{}, len(vals)),
- }
- for k, v := range vals {
- token.Metadata[k] = v
- }
- refreshToken = vals.Get("refresh_token")
- e := vals.Get("expires_in")
- expires, _ := strconv.Atoi(e)
- if expires != 0 {
- token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
- }
- default:
- var tj tokenJSON
- if err = json.Unmarshal(body, &tj); err != nil {
- if failureStatus {
- return nil, refreshToken, tokError
- }
- return nil, refreshToken, fmt.Errorf("auth: cannot parse json: %w", err)
- }
- tokError.code = tj.ErrorCode
- tokError.description = tj.ErrorDescription
- tokError.uri = tj.ErrorURI
- token = &Token{
- Value: tj.AccessToken,
- Type: tj.TokenType,
- Expiry: tj.expiry(),
- Metadata: make(map[string]interface{}),
- }
- json.Unmarshal(body, &token.Metadata) // optional field, skip err check
- refreshToken = tj.RefreshToken
- }
- // according to spec, servers should respond status 400 in error case
- // https://www.rfc-editor.org/rfc/rfc6749#section-5.2
- // but some unorthodox servers respond 200 in error case
- if failureStatus || tokError.code != "" {
- return nil, refreshToken, tokError
- }
- if token.Value == "" {
- return nil, refreshToken, errors.New("auth: server response missing access_token")
- }
- return token, refreshToken, nil
-}
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/.envrc.example b/vendor/cloud.google.com/go/cloudsqlconn/.envrc.example
deleted file mode 100644
index 7c3dd2b0..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/.envrc.example
+++ /dev/null
@@ -1,5 +0,0 @@
-export POSTGRES_USER=some-user
-export POSTGRES_PASS=some-password
-export POSTGRES_DB=some-db-name
-export POSTGRES_CONNECTION_NAME=some-project:some-region:some-instance
-export POSTGRES_USER_IAM=some-iam-user
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/.gitignore b/vendor/cloud.google.com/go/cloudsqlconn/.gitignore
deleted file mode 100644
index d47ba91a..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/.gitignore
+++ /dev/null
@@ -1,6 +0,0 @@
-# IDEs
-.vscode/
-.idea/
-
-# direnv
-.envrc
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/.golangci.yml b/vendor/cloud.google.com/go/cloudsqlconn/.golangci.yml
deleted file mode 100644
index 2cd90c52..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/.golangci.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright 2022 Google LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# .golangci.yml
-linters:
- disable-all: true
- enable:
- - goimports
- - revive
-issues:
- exclude-use-default: false
-linters-settings:
- revive:
- rules:
- - name: blank-imports
- - name: context-as-argument
- - name: context-keys-type
- - name: dot-imports
- - name: error-return
- - name: error-strings
- - name: error-naming
- - name: exported
- - name: if-return
- - name: increment-decrement
- - name: var-naming
- - name: var-declaration
- - name: range
- - name: receiver-naming
- - name: time-naming
- - name: unexported-return
- - name: indent-error-flow
- - name: errorf
- - name: empty-block
- - name: superfluous-else
- - name: unused-parameter
- - name: unreachable-code
- - name: redefines-builtin-id
- - name: range-val-in-closure
- - name: range-val-address
- - name: import-shadowing
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/CHANGELOG.md b/vendor/cloud.google.com/go/cloudsqlconn/CHANGELOG.md
deleted file mode 100644
index c22f998c..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/CHANGELOG.md
+++ /dev/null
@@ -1,381 +0,0 @@
-# Changelog
-
-## [1.11.0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.10.1...v1.11.0) (2024-06-12)
-
-
-### Features
-
-* generate RSA key lazily for lazy refresh ([#826](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/826)) ([bf293e2](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/bf293e25e2d52f395734c597c86dfe85ede5f4cd)), closes [#823](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/823)
-* invalidate cache on failed IP lookup ([#812](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/812)) ([4b68de3](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/4b68de3693e25642acd847d0c8ac393982d00c9b)), closes [#780](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/780)
-
-
-### Bug Fixes
-
-* ensure connection count is correctly reported ([#824](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/824)) ([b286049](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/b286049a7ade2a9e3cf44ea36f56946cfa58f60a))
-* invalidate cache on failed `Warmup` and `EngineVersion` ([#827](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/827)) ([c3915a6](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/c3915a6790f3d4e3cff266a0d8c506a09ecf9634))
-
-## [1.10.1](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.10.0...v1.10.1) (2024-05-22)
-
-
-### Bug Fixes
-
-* remove duplicate refresh operations ([#806](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/806)) ([beb3605](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/beb36052af2221d7ff238edc4c98c733cac2999d)), closes [#771](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/771)
-
-## [1.10.0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.9.0...v1.10.0) (2024-05-14)
-
-
-### Features
-
-* expose context to debug logger ([#797](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/797)) ([847f7c1](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/847f7c10cc796761e81a86e0551f00832a5056d5))
-
-
-### Bug Fixes
-
-* retry 50x errors with exponential backoff ([#781](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/781)) ([40dc789](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/40dc789baabbe40cebabee7a287222940b120e6a))
-
-## [1.9.0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.8.1...v1.9.0) (2024-04-16)
-
-
-### Features
-
-* add support for a lazy refresh ([#772](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/772)) ([931150f](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/931150f492cb461cf623a9bbafae6f704b9c5a36)), closes [#770](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/770)
-
-
-### Bug Fixes
-
-* return a friendly error if the dialer is closed ([#766](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/766)) ([d1c13e0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/d1c13e039a29ccbc085e2d3ca8451f83825e8d32))
-
-## [1.8.1](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.8.0...v1.8.1) (2024-03-12)
-
-
-### Bug Fixes
-
-* strip monotonic clock reading in cert check ([#750](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/750)) ([6ae33b0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/6ae33b0a6e281293823e75ff97a51575c053bf9f)), closes [#749](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/749)
-
-## [1.8.0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.7.0...v1.8.0) (2024-03-08)
-
-
-### Features
-
-* add support for TPC ([#732](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/732)) ([b7364d9](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/b7364d93cc93893b2af8eeda6cdf9cf36aaf9d67))
-
-## [1.7.0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.6.0...v1.7.0) (2024-02-13)
-
-
-### Features
-
-* add support for debug logging ([#726](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/726)) ([d8ca89e](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/d8ca89e4403e2e3cf6ac278a19b4d93b77797ec6))
-* add support for Go 1.22 ([#723](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/723)) ([ebe31dc](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/ebe31dcaf2ec215470ce3b224732f4ff6282ba22))
-
-
-### Bug Fixes
-
-* ensure background refresh is closed cleanly ([#715](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/715)) ([0b4c342](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/0b4c3420bb5158cab63c51158e109b3bea926b59))
-
-## [1.6.0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.5.2...v1.6.0) (2024-01-17)
-
-
-### Features
-
-* add connection name to public API ([#698](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/698)) ([84f3b6e](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/84f3b6eedcf13402bcbf7da720924cf242893beb))
-
-## [1.5.2](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.5.1...v1.5.2) (2023-12-12)
-
-
-### Bug Fixes
-
-* ensure cert refresh recovers from sleep ([#686](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/686)) ([95671ad](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/95671ada40905cf14209b5c54058463689ce6b20))
-
-## [1.5.1](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.5.0...v1.5.1) (2023-11-14)
-
-
-### Bug Fixes
-
-* bump dependencies to latest ([#667](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/667)) ([86544f5](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/86544f5a477f694c8ceb862b13c3b83d19d72d5d))
-
-## [1.5.0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.4.5...v1.5.0) (2023-10-24)
-
-
-### Features
-
-* add pgx v5 support ([#639](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/639)) ([#642](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/642)) ([8d86d92](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/8d86d92147d06ca10d754439638d6fd1b2154182))
-
-
-### Bug Fixes
-
-* use different driver names for v4 and v5 testing ([#639](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/639)) ([#654](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/654)) ([fa73c41](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/fa73c4184a9887e6e9217e5b50db97aa3fdc0d28))
-* use HandshakeContext by default ([#656](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/656)) ([49aad1f](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/49aad1f30bf560e6cf1e2ff52da46f3ff2cd2312))
-
-## [1.4.5](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.4.4...v1.4.5) (2023-10-11)
-
-
-### Bug Fixes
-
-* bump dependencies to latest ([#649](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/649)) ([0ddac9f](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/0ddac9fa7de17f740021408ed25ffbb0b0133d9e))
-* bump minimum supported Go version to 1.19 ([#637](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/637)) ([4a28a78](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/4a28a788a94d64e1ce6ddd76fa3a041c82c8f2b1))
-
-## [1.4.4](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.4.3...v1.4.4) (2023-09-12)
-
-
-### Bug Fixes
-
-* update dependencies to latest versions ([#621](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/621)) ([32f1e27](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/32f1e2762b8ced0a3332e4928fdc61ad5d731530))
-
-## [1.4.3](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.4.2...v1.4.3) (2023-08-18)
-
-
-### Bug Fixes
-
-* update ForceRefresh to block if invalid ([#605](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/605)) ([61c72e3](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/61c72e3e76d04863b6971aeb86726c3b1252e5ed))
-
-## [1.4.2](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.4.1...v1.4.2) (2023-08-15)
-
-
-### Bug Fixes
-
-* re-use existing connection info on force refresh ([#602](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/602)) ([d049851](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/d049851361fc48bb339232c6609a2f2932d2d684))
-
-## [1.4.1](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.4.0...v1.4.1) (2023-08-07)
-
-
-### Bug Fixes
-
-* avoid holding lock over IO ([#576](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/576)) ([1e4560f](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/1e4560f7b41547882a2e9f7ef3ece94bb1bb48be))
-
-## [1.4.0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.3.0...v1.4.0) (2023-07-06)
-
-
-### Features
-
-* add support for PSC connections ([#565](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/565)) ([10a46b0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/10a46b0a36440d6b84498468346833729c21bbb4))
-
-## [1.3.0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.2.4...v1.3.0) (2023-06-13)
-
-
-### Features
-
-* add support for WithOneOffDialFunc ([#558](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/558)) ([14592f3](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/14592f3d21e58fbd038cffdb6c4f67d7e3526302))
-
-
-### Bug Fixes
-
-* close background refresh for bad instances ([#550](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/550)) ([31f06fc](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/31f06fc078f097b6cef4f7c19228a724a00c3408))
-
-## [1.2.4](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.2.3...v1.2.4) (2023-05-09)
-
-
-### Bug Fixes
-
-* update dependencies to latest versions ([#539](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/539)) ([f1a4008](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/f1a40083289ef0051b757f7a12921cfefc65a249))
-
-## [1.2.3](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.2.2...v1.2.3) (2023-04-11)
-
-
-### Bug Fixes
-
-* update dependencies to latest versions ([#517](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/517)) ([55bad80](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/55bad80b3ae64b4b9c7135db2c12dd49e0ad230e))
-
-## [1.2.2](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.2.1...v1.2.2) (2023-03-09)
-
-
-### Bug Fixes
-
-* strip monotonic clock readings for refresh calculations ([#471](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/471)) ([94048af](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/94048afd001fd960f316e961501b871ab648296e))
-
-## [1.2.1](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.2.0...v1.2.1) (2023-02-15)
-
-
-### Bug Fixes
-
-* don't initialize default creds when using a token ([#460](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/460)) ([fc5c435](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/fc5c435b92ddfe6be5bbe77264486c0b712ba4d1))
-
-## [1.2.0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.1.1...v1.2.0) (2023-02-14)
-
-
-### Features
-
-* add support for Go 1.20 ([#445](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/445)) ([4df53ef](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/4df53ef4e742d6cd4c80bb79ed90d7ecd2110868))
-
-
-### Bug Fixes
-
-* error when dialer is misconfigured with token source ([#453](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/453)) ([7b45a7e](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/7b45a7e27c164dbf1f7903ed7792e4d81dd467b7))
-* improve reliability of certificate refresh ([#448](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/448)) ([47bd3f3](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/47bd3f385ad0cc7bbd057f3273ed03d2587e9ac8))
-* prevent repeated context expired errors ([#458](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/458)) ([7ffeafe](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/7ffeafea9729d08ad04c403c07b70d4f184664a0))
-
-## [1.1.1](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.1.0...v1.1.1) (2023-01-10)
-
-
-### Bug Fixes
-
-* move MySQL liveness check into driver code ([#417](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/417)) ([0de68fb](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/0de68fbc32d87e4cabab301be8a11f9eba50e13d))
-* use handshake context when possible ([#427](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/427)) ([37c4e70](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/37c4e70aa7082c49b84aaedb2066ddb67e1d920f))
-
-## [1.1.0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.0.1...v1.1.0) (2022-12-06)
-
-
-### Features
-
-* add support for MySQL Auto IAM AuthN ([#309](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/309)) ([6c4f20e](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/6c4f20eae857c215098b7b991fffc7d15bbead5b))
-* improve refresh duration calculation ([#364](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/364)) ([10b0bf7](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/10b0bf7d9d3c69238df3d0a88ffab54f03f7d7a6))
-
-
-### Bug Fixes
-
-* handle context cancellations during instance refresh ([#372](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/372)) ([cdb59c7](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/cdb59c797968f46419673378c96e79d40da453dc)), closes [#370](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/370)
-* remove leading slash from metric names ([#393](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/393)) ([ac5ca26](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/ac5ca264e17adf0c5780ea2317f4df03c6e1923d))
-
-## [1.0.1](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v1.0.0...v1.0.1) (2022-11-01)
-
-
-### Bug Fixes
-
-* update dependencies to latest versions ([#365](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/365)) ([5479502](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/547950268712f48d8613aac3d7e2a1e494b6a680))
-
-## [1.0.0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v0.5.2...v1.0.0) (2022-10-18)
-
-
-### Features
-
-* add WithAutoIP option ([#346](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/346)) ([bd20b6b](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/bd20b6bfe746cfea778b9e1a9702de28047e5950))
-* Downscope OAuth2 token included in ephemeral certificate ([#332](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/332)) ([d13dd6f](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/d13dd6f3e7db0179511539315dec1c2dc96f0e3e))
-
-
-### Bug Fixes
-
-* throw error when Auto IAM AuthN is unsupported ([#310](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/310)) ([652e196](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/652e196b427ce9673676e214c6ad3905b21a68b0))
-
-
-### Miscellaneous Chores
-
-* set next version to v1.0.0 ([#349](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/349)) ([a76d2db](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/a76d2db0b31447dc96707679973ff87b3c755bf5))
-
-## [0.5.2](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v0.5.1...v0.5.2) (2022-09-07)
-
-
-### Bug Fixes
-
-* update dependencies to latest versions ([#300](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/300)) ([5504df6](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/5504df6e03bda7b56e01146e63b715f775443d85))
-
-## [0.5.1](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v0.5.0...v0.5.1) (2022-08-01)
-
-
-### Bug Fixes
-
-* remove unnecessary import path restrictions ([#258](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/258)) ([bc57877](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/bc57877f16a61e42c603d4dc50ff4d01fc01d9d9))
-
-## [0.5.0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v0.4.0...v0.5.0) (2022-07-12)
-
-
-### Features
-
-* expose the WithQuotaProject dialer option ([#237](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/237)) ([bda8917](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/bda891776d5d44d49ed3e4a268f27bd10a23427e))
-
-
-### Bug Fixes
-
-* support MySQL driver’s conn check. ([#226](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/226)) ([4b48e3b](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/4b48e3bfe7a5bd8c398592f21eb25ac43644e123))
-
-## [0.4.0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v0.3.1...v0.4.0) (2022-06-07)
-
-
-### Features
-
-* add DialOption for IAM DB Authentication ([#171](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/171)) ([c103acc](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/c103acc6b49f94a1a733dc0e5c8b41890172dd8b))
-* Add Warmup function for starting background refresh ([#163](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/163)) ([2459f92](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/2459f92911eeca46102f56966c8cefa7cee8a0ae))
-
-
-### Bug Fixes
-
-* adjust alignment for 32-bit arch ([#197](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/197)) ([86e96ad](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/86e96adf30cbc82ba170dc70ce4d0694a3b595ce))
-
-### [0.3.1](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v0.3.0...v0.3.1) (2022-05-03)
-
-
-### Bug Fixes
-
-* update dependencies to latest versions ([#185](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/185)) ([702a380](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/702a3802d0383c0d71277779d80d62a5e5c23157))
-
-## [0.3.0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v0.2.0...v0.3.0) (2022-04-04)
-
-
-### Features
-
-* add option to configure SQL Admin API URL ([#148](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/148)) ([c791369](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/c79136972083480d16f65a4696a7747bae942afe))
-* add WithUserAgent opt ([#156](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/156)) ([bd89dc5](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/bd89dc50bb50d1d6ff9cf5a146071b307a54683a))
-* drop support for Go 1.15 ([#145](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/145)) ([791641b](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/791641bb2d0ab93955b218b9bc6f5335b8ead243))
-* use connect API for instance metadata ([#150](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/150)) ([1086ad0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/1086ad01cc7907051147d572f4f27ab1ba538027))
-
-
-### Bug Fixes
-
-* memory leak in database/sql integration ([#162](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/162)) ([47cdf2d](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/47cdf2da2230801b591bf4f459bfcbe7e9432cd1))
-* prevent unnecessary allocation of conn config ([#164](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/164)) ([49c7828](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/49c782809aff84b6141027f1a2634b0a0db2b18a))
-
-## [0.2.0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/compare/v0.1.0...v0.2.0) (2022-03-01)
-
-
-### ⚠ BREAKING CHANGES
-
-* use singular name for package (#101)
-
-### Features
-
-* add dial_failure_count metric ([#127](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/127)) ([34cdbb9](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/34cdbb92efa6f186bd8afdde3c8dcc810e77911e))
-* add metrics for refresh success and failure ([#133](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/133)) ([a36a212](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/a36a212dbd30474721669f10fbfda1e76a22d325))
-* drop support and testing for Go 1.14 ([#128](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/128)) ([aceadcc](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/aceadcc4835b6fe18639a696755302bb00f82bc2))
-
-
-### Bug Fixes
-
-* custom drivers report error on cleanup ([#102](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/102)) ([648b75a](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/648b75a4d8e43b3641d827086047a9c6783c1306))
-* use singular name for package ([#101](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/101)) ([5e5589d](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/5e5589db3bb0a86d9c167cd6b85358535238176a))
-
-
-## 0.1.0 (2022-02-08)
-
-
-### ⚠ BREAKING CHANGES
-
-* remove singleton Dial (#92)
-* return cleanup func to close dialer (#75)
-* dialer is a io.Closer (#76)
-* initialize dialer in register func (#73)
-* rename DialerOption to Option (#64)
-
-### Features
-
-* Add Close method to Dialer ([#34](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/34)) ([91ee305](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/91ee305b6af83d48ba5fc445ad1191fd99785079))
-* add concrete errors to public API ([#36](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/36)) ([7441b71](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/7441b7176d8bce5d2e054aa7e53f1509aece9898))
-* add custom driver for MySQL ([#70](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/70)) ([755c334](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/755c3344f28e33d18a1d7acc414352ee73e39d8a))
-* add custom driver for SQL Server ([#71](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/71)) ([14eb60a](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/14eb60a88532dd81cda4d602d044c98013ee0af6))
-* add default useragent ([#17](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/17)) ([57d7ed9](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/57d7ed9da73c731196bdc5120134b6dec72d9c68))
-* Add DialerOption for specifying a refresh timeout ([#12](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/12)) ([94df7cf](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/94df7cfa21dc60463afb1ad3519455d507d610f3))
-* add DialOptions for configuring Dial ([#8](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/8)) ([e2d53ee](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/e2d53ee6c66ba58114d8a49ca86f0eb3a56ce481))
-* Add EngineVersion method to Dialer ([#59](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/59)) ([6a78bfd](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/6a78bfd4a73807e4fce455ae0d6cd4f531710edd))
-* Add initial dialer ([#1](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/1)) ([7e89552](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/7e8955216cc91999e3d8d17ed9eced8f63564ca7))
-* add initial support for metrics ([#40](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/40)) ([ee396ff](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/ee396fffb10ea52af9072d0fdd09a8b4e9d4b736))
-* add support for configuring the HTTP client ([#55](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/55)) ([de9e72e](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/de9e72e1dc6961f6b6ed3fe9cf4381344dd5fa37))
-* add support for IAM DB Authn ([#44](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/44)) ([92e28cf](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/92e28cfccd573c0908588ad3594ef9de403e5e51))
-* add support for tracing ([#32](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/32)) ([4d2acbc](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/4d2acbcecb11acbbc58f95c711051a02fb31e82f))
-* allow for configuring the Dial func ([#57](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/57)) ([4cb523e](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/4cb523e80b4a388b37c8ce251a533a3b8d370029))
-* expose Dialer and add DialerOptions ([#7](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/7)) ([1235a9f](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/1235a9f62beb678f18695afc6d22d0b8e6b7b506))
-* force early refresh of instance info if connect fails ([#19](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/19)) ([eb06ae2](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/eb06ae26609cbc46fa65e50c080508d53ec0b9c2))
-* improve reliablity of refresh operations ([#49](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/49)) ([3a52440](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/3a5244075f68f3c95f26218f9008bb7451934f80))
-* improve RSA keypair generation ([#10](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/10)) ([e2a5238](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/e2a52388ff047144272089db60cb0b1fce7c16bf))
-* initialize dialer in register func ([#73](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/73)) ([7633cfd](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/7633cfd2eaadeef065686f85ae9f2faa5087e917))
-* **postgres/pgxv4:** add support for postgres driver ([#61](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/61)) ([295a5dc](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/295a5dcfbdaeb12884333e678f8b9f7f44de2b46))
-* remove singleton Dial ([#92](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/92)) ([0a1966c](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/0a1966c4fe0400e8dcd14b2531db20ad7bc10855))
-* return cleanup func to close dialer ([#75](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/75)) ([fa9b845](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/fa9b84576a7adcf8f0ad4296723685d681ada89e))
-* use cloud.google.com/go/cloudsqlconn ([#30](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/30)) ([a251fd7](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/a251fd727813223dc08f40bc5060add3235564e6))
-
-
-### Bug Fixes
-
-* dialer is a io.Closer ([#76](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/76)) ([89de96c](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/89de96c2a4d636cc3dfe44aa1b47ab3492d5cf0c))
-* perform refresh operations asynchronously ([#11](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/11)) ([925d6c2](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/925d6c2686d519d182dc196c752ed0c7edb0e28c))
-* rate limit refresh attempts per instance ([#18](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/18)) ([1092ccc](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/1092ccc04361293f6ea07fdc97cde30cf1cb1866))
-* rename DialerOption to Option ([#64](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/64)) ([016a821](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/016a821ba191b7b2117c7d240507e32c289e3f0e))
-* schedule refreshes based on result expiration instead of fixed interval ([#21](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/21)) ([65073d0](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/65073d0ea9582abbe01c7ca0698681624e3c7834))
-* **trace:** use LastValue for open connections ([#58](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/58)) ([4ee6bea](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/4ee6bea069c196454dd48034457a16ba416b725c))
-* use ctx for NewService ([#24](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/issues/24)) ([77fd677](https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/commit/77fd677ccb827feb89e6bb41eb45c22f3a2b1861))
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/CODE_OF_CONDUCT.md b/vendor/cloud.google.com/go/cloudsqlconn/CODE_OF_CONDUCT.md
deleted file mode 100644
index 46b2a08e..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# Contributor Code of Conduct
-
-As contributors and maintainers of this project,
-and in the interest of fostering an open and welcoming community,
-we pledge to respect all people who contribute through reporting issues,
-posting feature requests, updating documentation,
-submitting pull requests or patches, and other activities.
-
-We are committed to making participation in this project
-a harassment-free experience for everyone,
-regardless of level of experience, gender, gender identity and expression,
-sexual orientation, disability, personal appearance,
-body size, race, ethnicity, age, religion, or nationality.
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery
-* Personal attacks
-* Trolling or insulting/derogatory comments
-* Public or private harassment
-* Publishing other's private information,
-such as physical or electronic
-addresses, without explicit permission
-* Other unethical or unprofessional conduct.
-
-Project maintainers have the right and responsibility to remove, edit, or reject
-comments, commits, code, wiki edits, issues, and other contributions
-that are not aligned to this Code of Conduct.
-By adopting this Code of Conduct,
-project maintainers commit themselves to fairly and consistently
-applying these principles to every aspect of managing this project.
-Project maintainers who do not follow or enforce the Code of Conduct
-may be permanently removed from the project team.
-
-This code of conduct applies both within project spaces and in public spaces
-when an individual is representing the project or its community.
-
-Instances of abusive, harassing, or otherwise unacceptable behavior
-may be reported by opening an issue
-or contacting one or more of the project maintainers.
-
-This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
-available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/CONTRIBUTING.md b/vendor/cloud.google.com/go/cloudsqlconn/CONTRIBUTING.md
deleted file mode 100644
index 22b241cb..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/CONTRIBUTING.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# How to Contribute
-
-We'd love to accept your patches and contributions to this project. There are
-just a few small guidelines you need to follow.
-
-## Contributor License Agreement
-
-Contributions to this project must be accompanied by a Contributor License
-Agreement (CLA). You (or your employer) retain the copyright to your
-contribution; this simply gives us permission to use and redistribute your
-contributions as part of the project. Head over to
- to see your current agreements on file or
-to sign a new one.
-
-You generally only need to submit a CLA once, so if you've already submitted one
-(even if it was for a different project), you probably don't need to do it
-again.
-
-## Code reviews
-
-All submissions, including submissions by project members, require review. We
-use GitHub pull requests for this purpose. Consult
-[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
-information on using pull requests.
-
-## Community Guidelines
-
-This project follows
-[Google's Open Source Community Guidelines](https://opensource.google/conduct/).
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/LICENSE b/vendor/cloud.google.com/go/cloudsqlconn/LICENSE
deleted file mode 100644
index d6456956..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/README.md b/vendor/cloud.google.com/go/cloudsqlconn/README.md
deleted file mode 100644
index 19e69d0f..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/README.md
+++ /dev/null
@@ -1,493 +0,0 @@
-
-
-
-
-
-
-Cloud SQL Go Connector
-
-[![Open In Codelab][codelab-badge]][codelab]
-[![CI][ci-badge]][ci-build]
-[![Go Reference][pkg-badge]][pkg-docs]
-
-[ci-badge]: https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/actions/workflows/tests.yaml/badge.svg?event=push
-[ci-build]: https://github.com/GoogleCloudPlatform/cloud-sql-go-connector/actions/workflows/tests.yaml?query=event%3Apush+branch%3Amain
-[pkg-badge]: https://pkg.go.dev/badge/cloud.google.com/go/cloudsqlconn.svg
-[pkg-docs]: https://pkg.go.dev/cloud.google.com/go/cloudsqlconn
-[codelab-badge]: https://img.shields.io/badge/Open%20In%20Codelab-blue?labelColor=grey&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAMAAABEpIrGAAAAyVBMVEX////////////////////////+8/L0oZrrTkHqQzXzlY13yKEPnVgdo2KHzqvw+fX4xMDtWk604MssqWz73NnvcWdKtYHS7eD85+ZowpayncOWapvVS0360MzD5tWW1LZxo/dChfSLaKHfR0FMhe2rY4TyiYBlm/bQ4Pz+7sD7xCPPthRJpkegwvl9q/fn8P7+9+D80VL7vASisCQsoU4spV393YHPwDm40ftNjPX7wBP95qHz9/7/++/b6P3+8tD8zUKIsvj81WJbutStAAAABnRSTlMAIKDw/zDiNY+eAAAA+klEQVR4AbzRRYICMRRF0VB5QLlrO+7uDvvfVKfSv91mnGlunDFWUDh+xJUCE4ocv+JFMZ/jD7zAFPxJYRx/4gz/uGpQKquaDsEwLdv5HrieJriAbwqB/yUII00qA7YpxcmHoKRrJAUSk2QOKLi5vdMk7x7CQ0CF9fgSPFUqlWpN09QyiG1REsugkqs3miW8cTIqHApyrTbedLq9vgzkCoMKGY4gjSdTYTY3F74M0G5VyADCckpWnbd3WG+oaAMdGt7uPj7UfvC2BC2wPIACMspvuzkCp60YPo9/+M328LKHcHiek6EmnRMMAUAw2RPMOASzHsHMSzD7AwCdmyeTDUqFKQAAAABJRU5ErkJggg==
-[codelab]: https://codelabs.developers.google.com/codelabs/cloud-sql-go-connector
-
-The _Cloud SQL Go Connector_ is a Cloud SQL connector designed for use with the
-Go language. Using a Cloud SQL connector provides a native alternative to the
-[Cloud SQL Auth Proxy][] while providing the following benefits:
-
-* **IAM Authorization:** uses IAM permissions to control who/what can connect to
- your Cloud SQL instances
-* **Improved Security:** uses robust, updated TLS 1.3 encryption and
- identity verification between the client connector and the server-side proxy,
- independent of the database protocol.
-* **Convenience:** removes the requirement to use and distribute SSL
- certificates, as well as manage firewalls or source/destination IP addresses.
-* (optionally) **IAM DB Authentication:** provides support for
- [Cloud SQL’s automatic IAM DB AuthN][iam-db-authn] feature.
-
-[iam-db-authn]: https://cloud.google.com/sql/docs/postgres/authentication
-[Cloud SQL Auth Proxy]: https://cloud.google.com/sql/docs/postgres/sql-proxy
-
-For users migrating from the Cloud SQL Proxy drivers, see the [migration
-guide](./migration-guide.md).
-
-For a quick example, try out the Go Connector in a [Codelab][codelab].
-
-## Installation
-
-You can install this repo with `go get`:
-```sh
-go get cloud.google.com/go/cloudsqlconn
-```
-
-## Usage
-
-This package provides several functions for authorizing and encrypting
-connections. These functions can be used with your database driver to connect to
-your Cloud SQL instance.
-
-The instance connection name for your Cloud SQL instance is always in the
-format `project:region:instance`.
-
-### APIs and Services
-
-This package requires the following to successfully make Cloud SQL Connections:
-
-- IAM principal (user, service account, etc.) with the
-[Cloud SQL Client][client-role] role or equivalent. This IAM principal will
- be used for [credentials](#credentials).
-- The [Cloud SQL Admin API][admin-api] to be enabled within your Google Cloud
-Project. By default, the API will be called in the project associated with
-the IAM principal.
-
-[admin-api]: https://console.cloud.google.com/apis/api/sqladmin.googleapis.com
-[client-role]: https://cloud.google.com/sql/docs/mysql/roles-and-permissions
-
-### Credentials
-
-This project uses the [Application Default Credentials (ADC)][adc] strategy for
-resolving credentials. Please see [these instructions for how to set your ADC][set-adc]
-(Google Cloud Application vs Local Development, IAM user vs service account credentials),
-or consult the [golang.org/x/oauth2/google][google-auth] documentation.
-
-To explicitly set a specific source for the Credentials, see [Using
-Options](#using-options) below.
-
-[adc]: https://cloud.google.com/docs/authentication#adc
-[set-adc]: https://cloud.google.com/docs/authentication/provide-credentials-adc
-[google-auth]: https://pkg.go.dev/golang.org/x/oauth2/google#hdr-Credentials
-
-### Connecting to a database
-
-#### Postgres
-
-Postgres users have the option of using the `database/sql` interface or
-using [pgx][] directly. See [pgx's advice on which to choose][pgx-advice].
-
-[pgx]: https://github.com/jackc/pgx
-[pgx-advice]: https://github.com/jackc/pgx#choosing-between-the-pgx-and-databasesql-interfaces
-
-##### Using the dialer with pgx
-
-To use the dialer with [pgx][], we recommend using connection pooling with
-[pgxpool](https://pkg.go.dev/github.com/jackc/pgx/v5/pgxpool) by configuring
-a [Config.DialFunc][dial-func] like so:
-
-``` go
-import (
- "context"
- "net"
-
- "cloud.google.com/go/cloudsqlconn"
- "github.com/jackc/pgx/v5/pgxpool"
-)
-
-func connect() {
- // Configure the driver to connect to the database
- dsn := "user=myuser password=mypass dbname=mydb sslmode=disable"
- config, err := pgxpool.ParseConfig(dsn)
- if err != nil {
- /* handle error */
- }
-
- // Create a new dialer with any options
- d, err := cloudsqlconn.NewDialer(context.Background())
- if err != nil {
- /* handle error */
- }
-
- // Tell the driver to use the Cloud SQL Go Connector to create connections
- config.ConnConfig.DialFunc = func(ctx context.Context, _ string, instance string) (net.Conn, error) {
- return d.Dial(ctx, "project:region:instance")
- }
-
- // Interact with the driver directly as you normally would
- pool, err := pgxpool.NewWithConfig(context.Background(), config)
- if err != nil {
- /* handle error */
- }
-
- // call cleanup when you're done with the database connection
- cleanup := func() error { return d.Close() }
- // ... etc
-}
-```
-
-[dial-func]: https://pkg.go.dev/github.com/jackc/pgconn#Config
-
-##### Using the dialer with `database/sql`
-
-To use `database/sql`, call `pgxv5.RegisterDriver` with any necessary Dialer
-configuration. Note: the connection string must use the keyword/value format
-with host set to the instance connection name. The returned `cleanup` func
-will stop the dialer's background refresh goroutine and so should only be called
-when you're done with the `Dialer`.
-
-``` go
-import (
- "database/sql"
-
- "cloud.google.com/go/cloudsqlconn"
- "cloud.google.com/go/cloudsqlconn/postgres/pgxv5"
-)
-
-func connect() {
- cleanup, err := pgxv5.RegisterDriver("cloudsql-postgres", cloudsqlconn.WithIAMAuthN())
- if err != nil {
- // ... handle error
- }
- // call cleanup when you're done with the database connection
- defer cleanup()
-
- db, err := sql.Open(
- "cloudsql-postgres",
- "host=project:region:instance user=myuser password=mypass dbname=mydb sslmode=disable",
- )
- // ... etc
-}
-```
-
-#### MySQL
-
-To use `database/sql`, use `mysql.RegisterDriver` with any necessary Dialer
-configuration. The returned `cleanup` func
-will stop the dialer's background refresh goroutine and so should only be called
-when you're done with the `Dialer`.
-
-```go
-import (
- "database/sql"
-
- "cloud.google.com/go/cloudsqlconn"
- "cloud.google.com/go/cloudsqlconn/mysql/mysql"
-)
-
-func connect() {
- cleanup, err := mysql.RegisterDriver("cloudsql-mysql", cloudsqlconn.WithCredentialsFile("key.json"))
- if err != nil {
- // ... handle error
- }
- // call cleanup when you're done with the database connection
- defer cleanup()
-
- db, err := sql.Open(
- "cloudsql-mysql",
- "myuser:mypass@cloudsql-mysql(project:region:instance)/mydb",
- )
- // ... etc
-}
-```
-
-#### SQL Server
-
-To use `database/sql`, use `mssql.RegisterDriver` with any necessary Dialer
-configuration. The returned `cleanup` func
-will stop the dialer's background refresh goroutine and so should only be called
-when you're done with the `Dialer`.
-
-``` go
-import (
- "database/sql"
-
- "cloud.google.com/go/cloudsqlconn"
- "cloud.google.com/go/cloudsqlconn/sqlserver/mssql"
-)
-
-func connect() {
- cleanup, err := mssql.RegisterDriver("cloudsql-sqlserver", cloudsqlconn.WithCredentialsFile("key.json"))
- if err != nil {
- // ... handle error
- }
- // call cleanup when you're done with the database connection
- defer cleanup()
-
- db, err := sql.Open(
- "cloudsql-sqlserver",
- "sqlserver://user:password@localhost?database=mydb&cloudsql=project:region:instance",
- )
- // ... etc
-}
-```
-
-### Using Options
-
-If you need to customize something about the `Dialer`, you can initialize
-directly with `NewDialer`:
-
-```go
-d, err := cloudsqlconn.NewDialer(
- ctx,
- cloudsqlconn.WithCredentialsFile("key.json"),
-)
-if err != nil {
- log.Fatalf("unable to initialize dialer: %s", err)
-}
-
-conn, err := d.Dial(ctx, "project:region:instance")
-```
-
-For a full list of customizable behavior, see Option.
-
-### Using DialOptions
-
-If you want to customize things about how the connection is created, use
-`Option`:
-
-```go
-conn, err := d.Dial(
- ctx,
- "project:region:instance",
- cloudsqlconn.WithPrivateIP(),
-)
-```
-
-You can also use the `WithDefaultDialOptions` Option to specify
-DialOptions to be used by default:
-
-```go
-d, err := cloudsqlconn.NewDialer(
- ctx,
- cloudsqlconn.WithDefaultDialOptions(
- cloudsqlconn.WithPrivateIP(),
- ),
-)
-```
-
-### Automatic IAM Database Authentication
-
-Connections using [Automatic IAM database authentication][] are supported when
-using Postgres or MySQL drivers.
-
-Make sure to [configure your Cloud SQL Instance to allow IAM authentication][configure-iam-authn]
-and [add an IAM database user][add-iam-user].
-
-A `Dialer` can be configured to connect to a Cloud SQL instance using
-automatic IAM database authentication with the `WithIAMAuthN` Option
-(recommended) or the `WithDialIAMAuthN` DialOption.
-
-```go
-d, err := cloudsqlconn.NewDialer(ctx, cloudsqlconn.WithIAMAuthN())
-```
-
-When configuring the DSN for IAM authentication, the `password` field can be
-omitted and the `user` field should be formatted as follows:
-> Postgres: For an IAM user account, this is the user's email address.
-> For a service account, it is the service account's email without the
-> `.gserviceaccount.com` domain suffix.
->
-> MySQL: For an IAM user account, this is the user's email address, without
-> the `@` or domain name. For example, for `test-user@gmail.com`, set the
-> `user` field to `test-user`. For a service account, this is the service
-> account's email address without the `@project-id.iam.gserviceaccount.com`
-> suffix.
-
-Example DSNs using the `test-sa@test-project.iam.gserviceaccount.com`
-service account to connect can be found below.
-
-**Postgres**:
-
-```go
-dsn := "user=test-sa@test-project.iam dbname=mydb sslmode=disable"
-```
-
-**MySQL**:
-
-```go
-dsn := "user=test-sa dbname=mydb sslmode=disable"
-```
-
-[Automatic IAM database authentication]: https://cloud.google.com/sql/docs/postgres/authentication#automatic
-[configure-iam-authn]: https://cloud.google.com/sql/docs/postgres/create-edit-iam-instances#configure-iam-db-instance
-[add-iam-user]: https://cloud.google.com/sql/docs/postgres/create-manage-iam-users#creating-a-database-user
-
-### Enabling Metrics and Tracing
-
-This library includes support for metrics and tracing using [OpenCensus][].
-To enable metrics or tracing, you need to configure an [exporter][].
-OpenCensus supports many backends for exporters.
-
-Supported metrics include:
-
-- `cloudsqlconn/dial_latency`: The distribution of dialer latencies (ms)
-- `cloudsqlconn/open_connections`: The current number of open Cloud SQL
- connections
-- `cloudsqlconn/dial_failure_count`: The number of failed dial attempts
-- `cloudsqlconn/refresh_success_count`: The number of successful certificate
- refresh operations
-- `cloudsqlconn/refresh_failure_count`: The number of failed refresh
- operations.
-
-Supported traces include:
-
-- `cloud.google.com/go/cloudsqlconn.Dial`: The dial operation including
- refreshing an ephemeral certificate and connecting the instance
-- `cloud.google.com/go/cloudsqlconn/internal.InstanceInfo`: The call to retrieve
- instance metadata (e.g., database engine type, IP address, etc)
-- `cloud.google.com/go/cloudsqlconn/internal.Connect`: The connection attempt
- using the ephemeral certificate
-- SQL Admin API client operations
-
-For example, to use [Cloud Monitoring][] and [Cloud Trace][], you would
-configure an exporter like so:
-
-```golang
-import (
- "contrib.go.opencensus.io/exporter/stackdriver"
- "go.opencensus.io/trace"
-)
-
-func main() {
- sd, err := stackdriver.NewExporter(stackdriver.Options{
- ProjectID: "mycoolproject",
- })
- if err != nil {
- // handle error
- }
- defer sd.Flush()
- trace.RegisterExporter(sd)
-
- sd.StartMetricsExporter()
- defer sd.StopMetricsExporter()
-
- // Use cloudsqlconn as usual.
- // ...
-}
-```
-
-As OpenTelemetry has now reached feature parity with OpenCensus, the migration
-from OpenCensus to OpenTelemetry is strongly encouraged.
-[OpenTelemetry bridge](https://github.com/open-telemetry/opentelemetry-go/tree/main/bridge/opencensus)
-can be leveraged to migrate to OpenTelemetry without the need of replacing the
-OpenCensus APIs in this library. Example code is shown below for migrating an
-application using the OpenTelemetry bridge for traces.
-
-```golang
-import (
- texporter "github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace"
- "go.opencensus.io/trace"
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/bridge/opencensus"
- sdktrace "go.opentelemetry.io/otel/sdk/trace"
- "google.golang.org/api/option"
-)
-
-func main() {
- // trace.AlwaysSample() is expensive. Replacing it with your own
- // sampler for production environments is recommended.
- trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
-
- exporter, err := texporter.New(
- texporter.WithTraceClientOptions([]option.ClientOption{option.WithTelemetryDisabled()}),
- texporter.WithProjectID("mycoolproject"),
- )
- if err != nil {
- // Handle error
- }
-
- tp := sdktrace.NewTracerProvider(sdktrace.WithSyncer(exporter))
- otel.SetTracerProvider(tp)
- tracer := tp.Tracer("Cloud SQL Go Connector Trace")
- trace.DefaultTracer = opencensus.NewTracer(tracer)
-
- // Use cloudsqlconn as usual.
- // ...
-}
-```
-
-A known OpenTelemetry issue has been reported [here](https://github.com/googleapis/google-cloud-go/issues/7100).
-It shouldn't impact database operations.
-
-[OpenCensus]: https://opencensus.io/
-[exporter]: https://opencensus.io/exporters/
-[Cloud Monitoring]: https://cloud.google.com/monitoring
-[Cloud Trace]: https://cloud.google.com/trace
-
-### Debug Logging
-
-The Go Connector supports optional debug logging to help diagnose problems with
-the background certificate refresh. To enable it, provide a logger that
-implements the `debug.ContextLogger` interface when initializing the Dialer.
-
-For example:
-
-``` go
-import (
- "context"
- "net"
-
- "cloud.google.com/go/cloudsqlconn"
-)
-
-type myLogger struct{}
-
-func (l *myLogger) Debugf(ctx context.Context, format string, args ...interface{}) {
- // Log as you like here
-}
-
-func connect() {
- l := &myLogger{}
-
- d, err := NewDialer(
- context.Background(),
- cloudsqlconn.WithContextDebugLogger(l),
- )
- // use dialer as usual...
-}
-```
-
-## Support policy
-
-### Major version lifecycle
-
-This project uses [semantic versioning](https://semver.org/), and uses the
-following lifecycle regarding support for a major version:
-
-**Active** - Active versions get all new features and security fixes (that
-wouldn’t otherwise introduce a breaking change). New major versions are
-guaranteed to be "active" for a minimum of 1 year.
-
-**Deprecated** - Deprecated versions continue to receive security and critical
-bug fixes, but do not receive new features. Deprecated versions will be
-supported for 1 year.
-
-**Unsupported** - Any major version that has been deprecated for >=1 year is
-considered unsupported.
-
-### Supported Go Versions
-
-We follow the [Go Version Support Policy][go-policy] used by Google Cloud
-Libraries for Go.
-
-[go-policy]: https://github.com/googleapis/google-cloud-go#go-versions-supported
-
-### Release cadence
-
-This project aims for a release on at least a monthly basis. If no new features
-or fixes have been added, a new PATCH version with the latest dependencies is
-released.
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/SECURITY.md b/vendor/cloud.google.com/go/cloudsqlconn/SECURITY.md
deleted file mode 100644
index 8b58ae9c..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/SECURITY.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# Security Policy
-
-To report a security issue, please use [g.co/vulnz](https://g.co/vulnz).
-
-The Google Security Team will respond within 5 working days of your report on g.co/vulnz.
-
-We use g.co/vulnz for our intake, and do coordination and disclosure here using GitHub Security Advisory to privately discuss and fix the issue.
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/debug/debug.go b/vendor/cloud.google.com/go/cloudsqlconn/debug/debug.go
deleted file mode 100644
index 49017995..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/debug/debug.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package debug
-
-import "context"
-
-// Logger is the interface used for debug logging. By default, it is unused.
-//
-// Prefer ContextLogger instead.
-type Logger interface {
- // Debugf is for reporting information about internal operations.
- Debugf(format string, args ...interface{})
-}
-
-// ContextLogger is the interface used for debug logging. By default, it is unused.
-type ContextLogger interface {
- // Debugf is for reporting information about internal operations.
- Debugf(ctx context.Context, format string, args ...interface{})
-}
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/dialer.go b/vendor/cloud.google.com/go/cloudsqlconn/dialer.go
deleted file mode 100644
index b1f859fd..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/dialer.go
+++ /dev/null
@@ -1,567 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cloudsqlconn
-
-import (
- "context"
- "crypto/rand"
- "crypto/rsa"
- "crypto/tls"
- _ "embed"
- "errors"
- "fmt"
- "io"
- "net"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "cloud.google.com/go/cloudsqlconn/debug"
- "cloud.google.com/go/cloudsqlconn/errtype"
- "cloud.google.com/go/cloudsqlconn/instance"
- "cloud.google.com/go/cloudsqlconn/internal/cloudsql"
- "cloud.google.com/go/cloudsqlconn/internal/trace"
- "github.com/google/uuid"
- "golang.org/x/net/proxy"
- "golang.org/x/oauth2"
- "golang.org/x/oauth2/google"
- "google.golang.org/api/option"
- sqladmin "google.golang.org/api/sqladmin/v1beta4"
-)
-
-const (
- // defaultTCPKeepAlive is the default keep alive value used on connections to a Cloud SQL instance.
- defaultTCPKeepAlive = 30 * time.Second
- // serverProxyPort is the port the server-side proxy receives connections on.
- serverProxyPort = "3307"
- // iamLoginScope is the OAuth2 scope used for tokens embedded in the ephemeral
- // certificate.
- iamLoginScope = "https://www.googleapis.com/auth/sqlservice.login"
-)
-
-var (
- // ErrDialerClosed is used when a caller invokes Dial after closing the
- // Dialer.
- ErrDialerClosed = errors.New("cloudsqlconn: dialer is closed")
- // versionString indicates the version of this library.
- //go:embed version.txt
- versionString string
- userAgent = "cloud-sql-go-connector/" + strings.TrimSpace(versionString)
-)
-
-// keyGenerator encapsulates the details of RSA key generation to provide lazy
-// generation, custom keys, or a default RSA generator.
-type keyGenerator struct {
- once sync.Once
- key *rsa.PrivateKey
- err error
- genFunc func() (*rsa.PrivateKey, error)
-}
-
-// newKeyGenerator initializes a keyGenerator that will (in order):
-// - always return the RSA key if one is provided, or
-// - generate an RSA key lazily when it's requested, or
-// - (default) immediately generate an RSA key as part of the initializer.
-func newKeyGenerator(
- k *rsa.PrivateKey, lazy bool, genFunc func() (*rsa.PrivateKey, error),
-) (*keyGenerator, error) {
- g := &keyGenerator{genFunc: genFunc}
- switch {
- case k != nil:
- // If the caller has provided a key, initialize the key and consume the
- // sync.Once now.
- g.once.Do(func() { g.key, g.err = k, nil })
- case lazy:
- // If lazy refresh is enabled, do nothing and wait for the call to
- // rsaKey.
- default:
- // If no key has been provided and lazy refresh isn't enabled, generate
- // the key and consume the sync.Once now.
- g.once.Do(func() { g.key, g.err = g.genFunc() })
- }
- return g, g.err
-}
-
-// rsaKey will generate an RSA key if one is not already cached. Otherwise, it
-// will return the cached key.
-func (g *keyGenerator) rsaKey() (*rsa.PrivateKey, error) {
- g.once.Do(func() { g.key, g.err = g.genFunc() })
-
- return g.key, g.err
-}
-
-type connectionInfoCache interface {
- ConnectionInfo(context.Context) (cloudsql.ConnectionInfo, error)
- UpdateRefresh(*bool)
- ForceRefresh()
- io.Closer
-}
-
-// monitoredCache is a wrapper around a connectionInfoCache that tracks the
-// number of connections to the associated instance.
-type monitoredCache struct {
- openConns *uint64
-
- connectionInfoCache
-}
-
-// A Dialer is used to create connections to Cloud SQL instances.
-//
-// Use NewDialer to initialize a Dialer.
-type Dialer struct {
- lock sync.RWMutex
- cache map[instance.ConnName]monitoredCache
- keyGenerator *keyGenerator
- refreshTimeout time.Duration
- // closed reports if the dialer has been closed.
- closed chan struct{}
-
- sqladmin *sqladmin.Service
- logger debug.ContextLogger
-
- // lazyRefresh determines what kind of caching is used for ephemeral
- // certificates. When lazyRefresh is true, the dialer will use a lazy
- // cache, refresh certificates only when a connection attempt needs a fresh
- // certificate. Otherwise, a refresh ahead cache will be used. The refresh
- // ahead cache assumes a background goroutine may run consistently.
- lazyRefresh bool
-
- // defaultDialConfig holds the constructor level DialOptions, so that it
- // can be copied and mutated by the Dial function.
- defaultDialConfig dialConfig
-
- // dialerID uniquely identifies a Dialer. Used for monitoring purposes,
- // *only* when a client has configured OpenCensus exporters.
- dialerID string
-
- // dialFunc is the function used to connect to the address on the named
- // network. By default, it is golang.org/x/net/proxy#Dial.
- dialFunc func(cxt context.Context, network, addr string) (net.Conn, error)
-
- // iamTokenSource supplies the OAuth2 token used for IAM DB Authn.
- iamTokenSource oauth2.TokenSource
-}
-
-var (
- errUseTokenSource = errors.New("use WithTokenSource when IAM AuthN is not enabled")
- errUseIAMTokenSource = errors.New("use WithIAMAuthNTokenSources instead of WithTokenSource be used when IAM AuthN is enabled")
-)
-
-type nullLogger struct{}
-
-func (nullLogger) Debugf(_ context.Context, _ string, _ ...interface{}) {}
-
-// NewDialer creates a new Dialer.
-//
-// Initial calls to NewDialer make take longer than normal because generation of an
-// RSA keypair is performed. Calls with a WithRSAKeyPair DialOption or after a default
-// RSA keypair is generated will be faster.
-func NewDialer(ctx context.Context, opts ...Option) (*Dialer, error) {
- cfg := &dialerConfig{
- refreshTimeout: cloudsql.RefreshTimeout,
- dialFunc: proxy.Dial,
- logger: nullLogger{},
- useragents: []string{userAgent},
- serviceUniverse: "googleapis.com",
- }
- for _, opt := range opts {
- opt(cfg)
- if cfg.err != nil {
- return nil, cfg.err
- }
- }
- if cfg.useIAMAuthN && cfg.setTokenSource && !cfg.setIAMAuthNTokenSource {
- return nil, errUseIAMTokenSource
- }
- if cfg.setIAMAuthNTokenSource && !cfg.useIAMAuthN {
- return nil, errUseTokenSource
- }
- // Add this to the end to make sure it's not overridden
- cfg.sqladminOpts = append(cfg.sqladminOpts, option.WithUserAgent(strings.Join(cfg.useragents, " ")))
-
- // If callers have not provided a token source, either explicitly with
- // WithTokenSource or implicitly with WithCredentialsJSON etc., then use the
- // default token source.
- if !cfg.setCredentials {
- c, err := google.FindDefaultCredentials(ctx, sqladmin.SqlserviceAdminScope)
- if err != nil {
- return nil, fmt.Errorf("failed to create default credentials: %v", err)
- }
- ud, err := c.GetUniverseDomain()
- if err != nil {
- return nil, fmt.Errorf("failed to get universe domain: %v", err)
- }
- cfg.credentialsUniverse = ud
- cfg.sqladminOpts = append(cfg.sqladminOpts, option.WithTokenSource(c.TokenSource))
- scoped, err := google.DefaultTokenSource(ctx, iamLoginScope)
- if err != nil {
- return nil, fmt.Errorf("failed to create scoped token source: %v", err)
- }
- cfg.iamLoginTokenSource = scoped
- }
-
- if cfg.setUniverseDomain && cfg.setAdminAPIEndpoint {
- return nil, errors.New(
- "can not use WithAdminAPIEndpoint and WithUniverseDomain Options together, " +
- "use WithAdminAPIEndpoint (it already contains the universe domain)",
- )
- }
-
- if cfg.credentialsUniverse != "" && cfg.serviceUniverse != "" {
- if cfg.credentialsUniverse != cfg.serviceUniverse {
- return nil, fmt.Errorf(
- "the configured service universe domain (%s) does not match the credential universe domain (%s)",
- cfg.serviceUniverse, cfg.credentialsUniverse,
- )
- }
- }
-
- client, err := sqladmin.NewService(ctx, cfg.sqladminOpts...)
- if err != nil {
- return nil, fmt.Errorf("failed to create sqladmin client: %v", err)
- }
-
- dc := dialConfig{
- ipType: cloudsql.PublicIP,
- tcpKeepAlive: defaultTCPKeepAlive,
- useIAMAuthN: cfg.useIAMAuthN,
- }
- for _, opt := range cfg.dialOpts {
- opt(&dc)
- }
-
- if err := trace.InitMetrics(); err != nil {
- return nil, err
- }
- g, err := newKeyGenerator(cfg.rsaKey, cfg.lazyRefresh,
- func() (*rsa.PrivateKey, error) {
- return rsa.GenerateKey(rand.Reader, 2048)
- })
- if err != nil {
- return nil, err
- }
- d := &Dialer{
- closed: make(chan struct{}),
- cache: make(map[instance.ConnName]monitoredCache),
- lazyRefresh: cfg.lazyRefresh,
- keyGenerator: g,
- refreshTimeout: cfg.refreshTimeout,
- sqladmin: client,
- logger: cfg.logger,
- defaultDialConfig: dc,
- dialerID: uuid.New().String(),
- iamTokenSource: cfg.iamLoginTokenSource,
- dialFunc: cfg.dialFunc,
- }
- return d, nil
-}
-
-// Dial returns a net.Conn connected to the specified Cloud SQL instance. The
-// icn argument must be the instance's connection name, which is in the format
-// "project-name:region:instance-name".
-func (d *Dialer) Dial(ctx context.Context, icn string, opts ...DialOption) (conn net.Conn, err error) {
- select {
- case <-d.closed:
- return nil, ErrDialerClosed
- default:
- }
- startTime := time.Now()
- var endDial trace.EndSpanFunc
- ctx, endDial = trace.StartSpan(ctx, "cloud.google.com/go/cloudsqlconn.Dial",
- trace.AddInstanceName(icn),
- trace.AddDialerID(d.dialerID),
- )
- defer func() {
- go trace.RecordDialError(context.Background(), icn, d.dialerID, err)
- endDial(err)
- }()
- cn, err := instance.ParseConnName(icn)
- if err != nil {
- return nil, err
- }
-
- cfg := d.defaultDialConfig
- for _, opt := range opts {
- opt(&cfg)
- }
-
- var endInfo trace.EndSpanFunc
- ctx, endInfo = trace.StartSpan(ctx, "cloud.google.com/go/cloudsqlconn/internal.InstanceInfo")
- c, err := d.connectionInfoCache(ctx, cn, &cfg.useIAMAuthN)
- if err != nil {
- endInfo(err)
- return nil, err
- }
- ci, err := c.ConnectionInfo(ctx)
- if err != nil {
- d.removeCached(ctx, cn, c, err)
- endInfo(err)
- return nil, err
- }
- endInfo(err)
-
- // If the client certificate has expired (as when the computer goes to
- // sleep, and the refresh cycle cannot run), force a refresh immediately.
- // The TLS handshake will not fail on an expired client certificate. It's
- // not until the first read where the client cert error will be surfaced.
- // So check that the certificate is valid before proceeding.
- if !validClientCert(ctx, cn, d.logger, ci.Expiration) {
- d.logger.Debugf(ctx, "[%v] Refreshing certificate now", cn.String())
- c.ForceRefresh()
- // Block on refreshed connection info
- ci, err = c.ConnectionInfo(ctx)
- if err != nil {
- d.removeCached(ctx, cn, c, err)
- return nil, err
- }
- }
-
- var connectEnd trace.EndSpanFunc
- ctx, connectEnd = trace.StartSpan(ctx, "cloud.google.com/go/cloudsqlconn/internal.Connect")
- defer func() { connectEnd(err) }()
- addr, err := ci.Addr(cfg.ipType)
- if err != nil {
- d.removeCached(ctx, cn, c, err)
- return nil, err
- }
- addr = net.JoinHostPort(addr, serverProxyPort)
- f := d.dialFunc
- if cfg.dialFunc != nil {
- f = cfg.dialFunc
- }
- d.logger.Debugf(ctx, "[%v] Dialing %v", cn.String(), addr)
- conn, err = f(ctx, "tcp", addr)
- if err != nil {
- d.logger.Debugf(ctx, "[%v] Dialing %v failed: %v", cn.String(), addr, err)
- // refresh the instance info in case it caused the connection failure
- c.ForceRefresh()
- return nil, errtype.NewDialError("failed to dial", cn.String(), err)
- }
- if c, ok := conn.(*net.TCPConn); ok {
- if err := c.SetKeepAlive(true); err != nil {
- return nil, errtype.NewDialError("failed to set keep-alive", cn.String(), err)
- }
- if err := c.SetKeepAlivePeriod(cfg.tcpKeepAlive); err != nil {
- return nil, errtype.NewDialError("failed to set keep-alive period", cn.String(), err)
- }
- }
-
- tlsConn := tls.Client(conn, ci.TLSConfig())
- err = tlsConn.HandshakeContext(ctx)
- if err != nil {
- d.logger.Debugf(ctx, "[%v] TLS handshake failed: %v", cn.String(), err)
- // refresh the instance info in case it caused the handshake failure
- c.ForceRefresh()
- _ = tlsConn.Close() // best effort close attempt
- return nil, errtype.NewDialError("handshake failed", cn.String(), err)
- }
-
- latency := time.Since(startTime).Milliseconds()
- go func() {
- n := atomic.AddUint64(c.openConns, 1)
- trace.RecordOpenConnections(ctx, int64(n), d.dialerID, cn.String())
- trace.RecordDialLatency(ctx, icn, d.dialerID, latency)
- }()
-
- return newInstrumentedConn(tlsConn, func() {
- n := atomic.AddUint64(c.openConns, ^uint64(0))
- trace.RecordOpenConnections(context.Background(), int64(n), d.dialerID, cn.String())
- }), nil
-}
-
-// removeCached stops all background refreshes and deletes the connection
-// info cache from the map of caches.
-func (d *Dialer) removeCached(
- ctx context.Context,
- i instance.ConnName, c connectionInfoCache, err error,
-) {
- d.logger.Debugf(
- ctx,
- "[%v] Removing connection info from cache: %v",
- i.String(),
- err,
- )
- d.lock.Lock()
- defer d.lock.Unlock()
- c.Close()
- delete(d.cache, i)
-}
-
-// validClientCert checks that the ephemeral client certificate retrieved from
-// the cache is unexpired. The time comparisons strip the monotonic clock value
-// to ensure an accurate result, even after laptop sleep.
-func validClientCert(
- ctx context.Context, cn instance.ConnName,
- l debug.ContextLogger, expiration time.Time,
-) bool {
- // Use UTC() to strip monotonic clock value to guard against inaccurate
- // comparisons, especially after laptop sleep.
- // See the comments on the monotonic clock in the Go documentation for
- // details: https://pkg.go.dev/time#hdr-Monotonic_Clocks
- now := time.Now().UTC()
- valid := expiration.UTC().After(now)
- l.Debugf(
- ctx,
- "[%v] Now = %v, Current cert expiration = %v",
- cn.String(),
- now.Format(time.RFC3339),
- expiration.UTC().Format(time.RFC3339),
- )
- l.Debugf(ctx, "[%v] Cert is valid = %v", cn.String(), valid)
- return valid
-}
-
-// EngineVersion returns the engine type and version for the instance
-// connection name. The value will correspond to one of the following types for
-// the instance:
-// https://cloud.google.com/sql/docs/mysql/admin-api/rest/v1beta4/SqlDatabaseVersion
-func (d *Dialer) EngineVersion(ctx context.Context, icn string) (string, error) {
- cn, err := instance.ParseConnName(icn)
- if err != nil {
- return "", err
- }
- c, err := d.connectionInfoCache(ctx, cn, &d.defaultDialConfig.useIAMAuthN)
- if err != nil {
- return "", err
- }
- ci, err := c.ConnectionInfo(ctx)
- if err != nil {
- d.removeCached(ctx, cn, c, err)
- return "", err
- }
- return ci.DBVersion, nil
-}
-
-// Warmup starts the background refresh necessary to connect to the instance.
-// Use Warmup to start the refresh process early if you don't know when you'll
-// need to call "Dial".
-func (d *Dialer) Warmup(ctx context.Context, icn string, opts ...DialOption) error {
- cn, err := instance.ParseConnName(icn)
- if err != nil {
- return err
- }
- cfg := d.defaultDialConfig
- for _, opt := range opts {
- opt(&cfg)
- }
- c, err := d.connectionInfoCache(ctx, cn, &cfg.useIAMAuthN)
- if err != nil {
- return err
- }
- _, err = c.ConnectionInfo(ctx)
- if err != nil {
- d.removeCached(ctx, cn, c, err)
- }
- return err
-}
-
-// newInstrumentedConn initializes an instrumentedConn that on closing will
-// decrement the number of open connects and record the result.
-func newInstrumentedConn(conn net.Conn, closeFunc func()) *instrumentedConn {
- return &instrumentedConn{
- Conn: conn,
- closeFunc: closeFunc,
- }
-}
-
-// instrumentedConn wraps a net.Conn and invokes closeFunc when the connection
-// is closed.
-type instrumentedConn struct {
- net.Conn
- closeFunc func()
-}
-
-// Close delegates to the underlying net.Conn interface and reports the close
-// to the provided closeFunc only when Close returns no error.
-func (i *instrumentedConn) Close() error {
- err := i.Conn.Close()
- if err != nil {
- return err
- }
- go i.closeFunc()
- return nil
-}
-
-// Close closes the Dialer; it prevents the Dialer from refreshing the information
-// needed to connect.
-func (d *Dialer) Close() error {
- // Check if Close has already been called.
- select {
- case <-d.closed:
- return nil
- default:
- }
- close(d.closed)
- d.lock.Lock()
- defer d.lock.Unlock()
- for _, i := range d.cache {
- i.Close()
- }
- return nil
-}
-
-// connectionInfoCache is a helper function for returning the appropriate
-// connection info Cache in a threadsafe way. It will create a new cache,
-// modify the existing one, or leave it unchanged as needed.
-func (d *Dialer) connectionInfoCache(
- ctx context.Context, cn instance.ConnName, useIAMAuthN *bool,
-) (monitoredCache, error) {
- d.lock.RLock()
- c, ok := d.cache[cn]
- d.lock.RUnlock()
- if !ok {
- d.lock.Lock()
- defer d.lock.Unlock()
- // Recheck to ensure instance wasn't created or changed between locks
- c, ok = d.cache[cn]
- if !ok {
- var useIAMAuthNDial bool
- if useIAMAuthN != nil {
- useIAMAuthNDial = *useIAMAuthN
- }
- d.logger.Debugf(ctx, "[%v] Connection info added to cache", cn.String())
- k, err := d.keyGenerator.rsaKey()
- if err != nil {
- return monitoredCache{}, err
- }
- var cache connectionInfoCache
- if d.lazyRefresh {
- cache = cloudsql.NewLazyRefreshCache(
- cn,
- d.logger,
- d.sqladmin, k,
- d.refreshTimeout, d.iamTokenSource,
- d.dialerID, useIAMAuthNDial,
- )
- } else {
- cache = cloudsql.NewRefreshAheadCache(
- cn,
- d.logger,
- d.sqladmin, k,
- d.refreshTimeout, d.iamTokenSource,
- d.dialerID, useIAMAuthNDial,
- )
- }
- var count uint64
- c = monitoredCache{openConns: &count, connectionInfoCache: cache}
- d.cache[cn] = c
- }
- }
-
- c.UpdateRefresh(useIAMAuthN)
-
- return c, nil
-}
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/doc.go b/vendor/cloud.google.com/go/cloudsqlconn/doc.go
deleted file mode 100644
index da602cc7..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/doc.go
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package cloudsqlconn provides functions for authorizing and encrypting
-// connections. These functions can be used with a database driver to
-// connect to a Cloud SQL instance.
-//
-// The instance connection name for a Cloud SQL instance is always in the
-// format "project:region:instance".
-//
-// # Creating a Dialer
-//
-// To start working with this package, create a Dialer. There are two ways of
-// creating a Dialer, which one you use depends on your database driver.
-//
-// # Postgres
-//
-// Postgres users have the option of using the [database/sql] interface or using [pgx] directly.
-//
-// To use a dialer with [pgx], we recommend using connection pooling with
-// [pgxpool]. To create the dialer use the NewDialer func.
-//
-// import (
-// "context"
-// "net"
-//
-// "cloud.google.com/go/cloudsqlconn"
-// "github.com/jackc/pgx/v4/pgxpool"
-// )
-//
-// func connect() {
-// // Configure the driver to connect to the database
-// dsn := "user=myuser password=mypass dbname=mydb sslmode=disable"
-// config, err := pgxpool.ParseConfig(dsn)
-// if err != nil {
-// // handle error
-// }
-//
-// // Create a new dialer with any options
-// d, err := cloudsqlconn.NewDialer(context.Background())
-// if err != nil {
-// // handle error
-// }
-//
-// // Tell the driver to use the Cloud SQL Go Connector to create connections
-// config.ConnConfig.DialFunc = func(ctx context.Context, _ string, instance string) (net.Conn, error) {
-// return d.Dial(ctx, "project:region:instance")
-// }
-//
-// // Interact with the driver directly as you normally would
-// conn, err := pgxpool.ConnectConfig(context.Background(), config)
-// if err != nil {
-// // handle error
-// }
-//
-// // call cleanup when you're done with the database connection
-// cleanup := func() error { return d.Close() }
-// // ... etc
-// }
-//
-// To use [database/sql], call pgxv4.RegisterDriver with any necessary Dialer
-// configuration.
-//
-// Note: the connection string must use the keyword/value format
-// with host set to the instance connection name. The returned cleanup func
-// will stop the dialer's background refresh goroutine and so should only be
-// called when you're done with the Dialer.
-//
-// import (
-// "database/sql"
-//
-// "cloud.google.com/go/cloudsqlconn"
-// "cloud.google.com/go/cloudsqlconn/postgres/pgxv4"
-// )
-//
-// func connect() {
-// // adjust options as needed
-// cleanup, err := pgxv4.RegisterDriver("cloudsql-postgres", cloudsqlconn.WithIAMAuthN())
-// if err != nil {
-// // ... handle error
-// }
-// // call cleanup when you're done with the database connection
-// defer cleanup()
-//
-// db, err := sql.Open(
-// "cloudsql-postgres",
-// "host=project:region:instance user=myuser password=mypass dbname=mydb sslmode=disable",
-// )
-// // ... etc
-// }
-//
-// # MySQL
-//
-// MySQL users should use [database/sql]. Use mysql.RegisterDriver with any
-// necessary Dialer configuration.
-//
-// Note: The returned cleanup func will stop the dialer's background refresh
-// goroutine and should only be called when you're done with the Dialer.
-//
-// import (
-// "database/sql"
-//
-// "cloud.google.com/go/cloudsqlconn"
-// "cloud.google.com/go/cloudsqlconn/mysql/mysql"
-// )
-//
-// func connect() {
-// // adjust options as needed
-// cleanup, err := mysql.RegisterDriver("cloudsql-mysql", cloudsqlconn.WithIAMAuthN())
-// if err != nil {
-// // ... handle error
-// }
-// // call cleanup when you're done with the database connection
-// defer cleanup()
-//
-// db, err := sql.Open(
-// "cloudsql-mysql",
-// "myuser:mypass@cloudsql-mysql(project:region:instance)/mydb",
-// )
-// // ... etc
-// }
-//
-// # SQL Server
-//
-// SQL Server users should use [database/sql]. Use mssql.RegisterDriver with any
-// necessary Dialer configuration.
-//
-// Note: The returned cleanup func will stop the dialer's background refresh
-// goroutine and should only be called when you're done with the Dialer.
-//
-// import (
-// "database/sql"
-//
-// "cloud.google.com/go/cloudsqlconn"
-// "cloud.google.com/go/cloudsqlconn/sqlserver/mssql"
-// )
-//
-// func connect() {
-// cleanup, err := mssql.RegisterDriver("cloudsql-sqlserver")
-// if err != nil {
-// // ... handle error
-// }
-// // call cleanup when you're done with the database connection
-// defer cleanup()
-//
-// db, err := sql.Open(
-// "cloudsql-sqlserver",
-// "sqlserver://user:password@localhost?database=mydb&cloudsql=project:region:instance",
-// )
-// // ... etc
-// }
-//
-// [database/sql]: https://pkg.go.dev/database/sql
-// [pgx]: https://github.com/jackc/pgx
-// [pgxpool]: https://pkg.go.dev/github.com/jackc/pgx/v4/pgxpool
-package cloudsqlconn
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/errtype/errors.go b/vendor/cloud.google.com/go/cloudsqlconn/errtype/errors.go
deleted file mode 100644
index f6bd0252..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/errtype/errors.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2021 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package errtype provides a number of concrete types which are used by the
-// cloudsqlconn package.
-package errtype
-
-import "fmt"
-
-type genericError struct {
- Message string
- ConnName string
-}
-
-func (e *genericError) Error() string {
- return fmt.Sprintf("%v (connection name = %q)", e.Message, e.ConnName)
-}
-
-// NewConfigError initializes a ConfigError.
-func NewConfigError(msg, cn string) *ConfigError {
- return &ConfigError{
- genericError: &genericError{Message: "Config error: " + msg, ConnName: cn},
- }
-}
-
-// ConfigError represents an incorrect request by the user. Config errors
-// usually indicate a semantic error (e.g., the instance connection name is
-// malformed, the SQL instance does not support the requested IP type, etc.)
-// ConfigError's should not be retried.
-type ConfigError struct{ *genericError }
-
-// NewRefreshError initializes a RefreshError.
-func NewRefreshError(msg, cn string, err error) *RefreshError {
- return &RefreshError{
- genericError: &genericError{Message: msg, ConnName: cn},
- Err: err,
- }
-}
-
-// RefreshError means that an error occurred during the background
-// refresh operation. In general, this is an unexpected error caused by
-// an interaction with the API itself (e.g., missing certificates,
-// invalid certificate encoding, region mismatch with the requested
-// instance connection name, etc.). RefreshError's usually can be retried.
-type RefreshError struct {
- *genericError
- // Err is the underlying error and may be nil.
- Err error
-}
-
-func (e *RefreshError) Error() string {
- if e.Err == nil {
- return fmt.Sprintf("Refresh error: %v", e.genericError)
- }
- return fmt.Sprintf("Refresh error: %v: %v", e.genericError, e.Err)
-}
-
-func (e *RefreshError) Unwrap() error { return e.Err }
-
-// NewDialError initializes a DialError.
-func NewDialError(msg, cn string, err error) *DialError {
- return &DialError{
- genericError: &genericError{Message: msg, ConnName: cn},
- Err: err,
- }
-}
-
-// DialError represents a problem that occurred when trying to dial a SQL
-// instance (e.g., a failure to set the keep-alive property, a TLS handshake
-// failure, a missing certificate, etc.). DialError's are often network-related
-// and can be retried.
-type DialError struct {
- *genericError
- // Err is the underlying error and may be nil.
- Err error
-}
-
-func (e *DialError) Error() string {
- if e.Err == nil {
- return fmt.Sprintf("Dial error: %v", e.genericError)
- }
- return fmt.Sprintf("Dial error: %v: %v", e.genericError, e.Err)
-}
-
-func (e *DialError) Unwrap() error { return e.Err }
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/instance/conn_name.go b/vendor/cloud.google.com/go/cloudsqlconn/instance/conn_name.go
deleted file mode 100644
index d4f33601..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/instance/conn_name.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package instance
-
-import (
- "fmt"
- "regexp"
-
- "cloud.google.com/go/cloudsqlconn/errtype"
-)
-
-var (
- // Instance connection name is the format ::
- // Additionally, we have to support legacy "domain-scoped" projects
- // (e.g. "google.com:PROJECT")
- connNameRegex = regexp.MustCompile("([^:]+(:[^:]+)?):([^:]+):([^:]+)")
-)
-
-// ConnName represents the "instance connection name", in the format
-// "project:region:name".
-type ConnName struct {
- project string
- region string
- name string
-}
-
-func (c *ConnName) String() string {
- return fmt.Sprintf("%s:%s:%s", c.project, c.region, c.name)
-}
-
-// Project returns the project within which the Cloud SQL instance runs.
-func (c *ConnName) Project() string {
- return c.project
-}
-
-// Region returns the region where the Cloud SQL instance runs.
-func (c *ConnName) Region() string {
- return c.region
-}
-
-// Name returns the Cloud SQL instance name
-func (c *ConnName) Name() string {
- return c.name
-}
-
-// ParseConnName initializes a new ConnName struct.
-func ParseConnName(cn string) (ConnName, error) {
- b := []byte(cn)
- m := connNameRegex.FindSubmatch(b)
- if m == nil {
- err := errtype.NewConfigError(
- "invalid instance connection name, expected PROJECT:REGION:INSTANCE",
- cn,
- )
- return ConnName{}, err
- }
-
- c := ConnName{
- project: string(m[1]),
- region: string(m[3]),
- name: string(m[4]),
- }
- return c, nil
-}
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/instance.go b/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/instance.go
deleted file mode 100644
index bb6d52d1..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/instance.go
+++ /dev/null
@@ -1,484 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cloudsql
-
-import (
- "context"
- "crypto/rsa"
- "crypto/tls"
- "crypto/x509"
- "fmt"
- "sync"
- "time"
-
- "cloud.google.com/go/cloudsqlconn/debug"
- "cloud.google.com/go/cloudsqlconn/errtype"
- "cloud.google.com/go/cloudsqlconn/instance"
- "golang.org/x/oauth2"
- "golang.org/x/time/rate"
- sqladmin "google.golang.org/api/sqladmin/v1beta4"
-)
-
-const (
- // the refresh buffer is the amount of time before a refresh operation's
- // certificate expires that a new refresh operation begins.
- refreshBuffer = 4 * time.Minute
-
- // refreshInterval is the amount of time between refresh attempts as
- // enforced by the rate limiter.
- refreshInterval = 30 * time.Second
-
- // RefreshTimeout is the maximum amount of time to wait for a refresh
- // cycle to complete. This value should be greater than the
- // refreshInterval.
- RefreshTimeout = 60 * time.Second
-
- // refreshBurst is the initial burst allowed by the rate limiter.
- refreshBurst = 2
-)
-
-// refreshOperation is a pending result of a refresh operation of data used to
-// connect securely. It should only be initialized by the Instance struct as
-// part of a refresh cycle.
-type refreshOperation struct {
- // indicates the struct is ready to read from
- ready chan struct{}
- // timer that triggers refresh, can be used to cancel.
- timer *time.Timer
- result ConnectionInfo
- err error
-}
-
-// cancel prevents the the refresh operation from starting, if it hasn't
-// already started. Returns true if timer was stopped successfully, or false if
-// it has already started.
-func (r *refreshOperation) cancel() bool {
- return r.timer.Stop()
-}
-
-// isValid returns true if this result is complete, successful, and is still
-// valid.
-func (r *refreshOperation) isValid() bool {
- // verify the refreshOperation has finished running
- select {
- default:
- return false
- case <-r.ready:
- if r.err != nil || time.Now().After(r.result.Expiration.Round(0)) {
- return false
- }
- return true
- }
-}
-
-// RefreshAheadCache manages the information used to connect to the Cloud SQL
-// instance by periodically calling the Cloud SQL Admin API. It automatically
-// refreshes the required information approximately 4 minutes before the
-// previous certificate expires (every ~56 minutes).
-type RefreshAheadCache struct {
- // openConns is the number of open connections to the instance.
- openConns uint64
-
- connName instance.ConnName
- logger debug.ContextLogger
-
- // refreshTimeout sets the maximum duration a refresh cycle can run
- // for.
- refreshTimeout time.Duration
- // l controls the rate at which refresh cycles are run.
- l *rate.Limiter
- r adminAPIClient
-
- mu sync.RWMutex
- useIAMAuthNDial bool
- // cur represents the current refreshOperation that will be used to
- // create connections. If a valid complete refreshOperation isn't
- // available it's possible for cur to be equal to next.
- cur *refreshOperation
- // next represents a future or ongoing refreshOperation. Once complete,
- // it will replace cur and schedule a replacement to occur.
- next *refreshOperation
-
- // ctx is the default ctx for refresh operations. Canceling it prevents
- // new refresh operations from being triggered.
- ctx context.Context
- cancel context.CancelFunc
-}
-
-// NewRefreshAheadCache initializes a new Instance given an instance connection name
-func NewRefreshAheadCache(
- cn instance.ConnName,
- l debug.ContextLogger,
- client *sqladmin.Service,
- key *rsa.PrivateKey,
- refreshTimeout time.Duration,
- ts oauth2.TokenSource,
- dialerID string,
- useIAMAuthNDial bool,
-) *RefreshAheadCache {
- ctx, cancel := context.WithCancel(context.Background())
- i := &RefreshAheadCache{
- connName: cn,
- logger: l,
- l: rate.NewLimiter(rate.Every(refreshInterval), refreshBurst),
- r: newAdminAPIClient(
- l,
- client,
- key,
- ts,
- dialerID,
- ),
- refreshTimeout: refreshTimeout,
- useIAMAuthNDial: useIAMAuthNDial,
- ctx: ctx,
- cancel: cancel,
- }
- // For the initial refresh operation, set cur = next so that connection
- // requests block until the first refresh is complete.
- i.mu.Lock()
- i.cur = i.scheduleRefresh(0)
- i.next = i.cur
- i.mu.Unlock()
- return i
-}
-
-// Close closes the instance; it stops the refresh cycle and prevents it from
-// making additional calls to the Cloud SQL Admin API.
-func (i *RefreshAheadCache) Close() error {
- i.mu.Lock()
- defer i.mu.Unlock()
- i.cancel()
- i.cur.cancel()
- i.next.cancel()
- return nil
-}
-
-// ConnectionInfo contains all necessary information to connect securely to the
-// server-side Proxy running on a Cloud SQL instance.
-type ConnectionInfo struct {
- ConnectionName instance.ConnName
- ClientCertificate tls.Certificate
- ServerCaCert *x509.Certificate
- DBVersion string
- Expiration time.Time
-
- addrs map[string]string
-}
-
-// NewConnectionInfo initializes a ConnectionInfo struct.
-func NewConnectionInfo(
- cn instance.ConnName,
- version string,
- ipAddrs map[string]string,
- serverCaCert *x509.Certificate,
- clientCert tls.Certificate,
-) ConnectionInfo {
- return ConnectionInfo{
- addrs: ipAddrs,
- ServerCaCert: serverCaCert,
- ClientCertificate: clientCert,
- Expiration: clientCert.Leaf.NotAfter,
- DBVersion: version,
- ConnectionName: cn,
- }
-}
-
-// Addr returns the IP address or DNS name for the given IP type.
-func (c ConnectionInfo) Addr(ipType string) (string, error) {
- var (
- addr string
- ok bool
- )
- switch ipType {
- case AutoIP:
- // Try Public first
- addr, ok = c.addrs[PublicIP]
- if !ok {
- // Try Private second
- addr, ok = c.addrs[PrivateIP]
- }
- default:
- addr, ok = c.addrs[ipType]
- }
- if !ok {
- err := errtype.NewConfigError(
- fmt.Sprintf("instance does not have IP of type %q", ipType),
- c.ConnectionName.String(),
- )
- return "", err
- }
- return addr, nil
-}
-
-// TLSConfig constructs a TLS configuration for the given connection info.
-func (c ConnectionInfo) TLSConfig() *tls.Config {
- pool := x509.NewCertPool()
- pool.AddCert(c.ServerCaCert)
- return &tls.Config{
- ServerName: c.ConnectionName.String(),
- Certificates: []tls.Certificate{c.ClientCertificate},
- RootCAs: pool,
- // We need to set InsecureSkipVerify to true due to
- // https://github.com/GoogleCloudPlatform/cloudsql-proxy/issues/194
- // https://tip.golang.org/doc/go1.11#crypto/x509
- //
- // Since we have a secure channel to the Cloud SQL API which we use to
- // retrieve the certificates, we instead need to implement our own
- // VerifyPeerCertificate function that will verify that the certificate
- // is OK.
- InsecureSkipVerify: true,
- VerifyPeerCertificate: verifyPeerCertificateFunc(c.ConnectionName, pool),
- MinVersion: tls.VersionTLS13,
- }
-}
-
-// verifyPeerCertificateFunc creates a VerifyPeerCertificate func that
-// verifies that the peer certificate is in the cert pool. We need to define
-// our own because CloudSQL instances use the instance name (e.g.,
-// my-project:my-instance) instead of a valid domain name for the certificate's
-// Common Name.
-func verifyPeerCertificateFunc(
- cn instance.ConnName, pool *x509.CertPool,
-) func(rawCerts [][]byte, _ [][]*x509.Certificate) error {
- return func(rawCerts [][]byte, _ [][]*x509.Certificate) error {
- if len(rawCerts) == 0 {
- return errtype.NewDialError(
- "no certificate to verify", cn.String(), nil,
- )
- }
-
- cert, err := x509.ParseCertificate(rawCerts[0])
- if err != nil {
- return errtype.NewDialError(
- "failed to parse X.509 certificate", cn.String(), err,
- )
- }
-
- opts := x509.VerifyOptions{Roots: pool}
- if _, err = cert.Verify(opts); err != nil {
- return errtype.NewDialError(
- "failed to verify certificate", cn.String(), err,
- )
- }
-
- certInstanceName := fmt.Sprintf("%s:%s", cn.Project(), cn.Name())
- if cert.Subject.CommonName != certInstanceName {
- return errtype.NewDialError(
- fmt.Sprintf(
- "certificate had CN %q, expected %q",
- cert.Subject.CommonName, certInstanceName,
- ),
- cn.String(),
- nil,
- )
- }
- return nil
- }
-}
-
-// ConnectionInfo returns an IP address specified by ipType (i.e., public or
-// private) and a TLS config that can be used to connect to a Cloud SQL
-// instance.
-func (i *RefreshAheadCache) ConnectionInfo(ctx context.Context) (ConnectionInfo, error) {
- op, err := i.refreshOperation(ctx)
- if err != nil {
- return ConnectionInfo{}, err
- }
- return op.result, nil
-}
-
-// UpdateRefresh cancels all existing refresh attempts and schedules new
-// attempts with the provided config only if it differs from the current
-// configuration.
-func (i *RefreshAheadCache) UpdateRefresh(useIAMAuthNDial *bool) {
- i.mu.Lock()
- defer i.mu.Unlock()
- if useIAMAuthNDial != nil && *useIAMAuthNDial != i.useIAMAuthNDial {
- // Cancel any pending refreshes
- i.cur.cancel()
- i.next.cancel()
-
- i.useIAMAuthNDial = *useIAMAuthNDial
- // reschedule a new refresh immediately
- i.cur = i.scheduleRefresh(0)
- i.next = i.cur
- }
-}
-
-// ForceRefresh triggers an immediate refresh operation to be scheduled and
-// used for future connection attempts. Until the refresh completes, the
-// existing connection info will be available for use if valid.
-func (i *RefreshAheadCache) ForceRefresh() {
- i.mu.Lock()
- defer i.mu.Unlock()
- // If the next refresh hasn't started yet, we can cancel it and start an
- // immediate one
- if i.next.cancel() {
- i.next = i.scheduleRefresh(0)
- }
- // block all sequential connection attempts on the next refresh operation
- // if current is invalid
- if !i.cur.isValid() {
- i.cur = i.next
- }
-}
-
-// refreshOperation returns the most recent refresh operation
-// waiting for it to complete if necessary
-func (i *RefreshAheadCache) refreshOperation(ctx context.Context) (*refreshOperation, error) {
- i.mu.RLock()
- cur := i.cur
- i.mu.RUnlock()
- var err error
- select {
- case <-cur.ready:
- err = cur.err
- case <-ctx.Done():
- err = ctx.Err()
- case <-i.ctx.Done():
- err = i.ctx.Err()
- }
- if err != nil {
- return nil, err
- }
- return cur, nil
-}
-
-// refreshDuration returns the duration to wait before starting the next
-// refresh. Usually that duration will be half of the time until certificate
-// expiration.
-func refreshDuration(now, certExpiry time.Time) time.Duration {
- d := certExpiry.Sub(now.Round(0))
- if d < time.Hour {
- // Something is wrong with the certificate, refresh now.
- if d < refreshBuffer {
- return 0
- }
- // Otherwise wait until 4 minutes before expiration for next
- // refresh cycle.
- return d - refreshBuffer
- }
- return d / 2
-}
-
-// scheduleRefresh schedules a refresh operation to be triggered after a given
-// duration. The returned refreshOperation can be used to either Cancel or Wait
-// for the operation's completion.
-func (i *RefreshAheadCache) scheduleRefresh(d time.Duration) *refreshOperation {
- r := &refreshOperation{}
- r.ready = make(chan struct{})
- r.timer = time.AfterFunc(d, func() {
- // instance has been closed, don't schedule anything
- if err := i.ctx.Err(); err != nil {
- i.logger.Debugf(
- context.Background(),
- "[%v] Instance is closed, stopping refresh operations",
- i.connName.String(),
- )
- r.err = err
- close(r.ready)
- return
- }
- i.logger.Debugf(
- context.Background(),
- "[%v] Connection info refresh operation started",
- i.connName.String(),
- )
-
- ctx, cancel := context.WithTimeout(i.ctx, i.refreshTimeout)
- defer cancel()
-
- // avoid refreshing too often to try not to tax the SQL Admin
- // API quotas
- err := i.l.Wait(ctx)
- if err != nil {
- r.err = errtype.NewDialError(
- "context was canceled or expired before refresh completed",
- i.connName.String(),
- nil,
- )
- } else {
- var useIAMAuthN bool
- i.mu.Lock()
- useIAMAuthN = i.useIAMAuthNDial
- i.mu.Unlock()
- r.result, r.err = i.r.ConnectionInfo(
- ctx, i.connName, useIAMAuthN,
- )
- }
- switch r.err {
- case nil:
- i.logger.Debugf(
- ctx,
- "[%v] Connection info refresh operation complete",
- i.connName.String(),
- )
- i.logger.Debugf(
- ctx,
- "[%v] Current certificate expiration = %v",
- i.connName.String(),
- r.result.Expiration.UTC().Format(time.RFC3339),
- )
- default:
- i.logger.Debugf(
- ctx,
- "[%v] Connection info refresh operation failed, err = %v",
- i.connName.String(),
- r.err,
- )
- }
-
- close(r.ready)
-
- // Once the refresh is complete, update "current" with working
- // refreshOperation and schedule a new refresh
- i.mu.Lock()
- defer i.mu.Unlock()
-
- // if failed, scheduled the next refresh immediately
- if r.err != nil {
- i.logger.Debugf(
- ctx,
- "[%v] Connection info refresh operation scheduled immediately",
- i.connName.String(),
- )
- i.next = i.scheduleRefresh(0)
- // If the latest refreshOperation is bad, avoid replacing the
- // used refreshOperation while it's still valid and potentially
- // able to provide successful connections. TODO: This
- // means that errors while the current refreshOperation is still
- // valid are suppressed. We should try to surface
- // errors in a more meaningful way.
- if !i.cur.isValid() {
- i.cur = r
- }
- return
- }
-
- // Update the current results, and schedule the next refresh in
- // the future
- i.cur = r
- t := refreshDuration(time.Now(), i.cur.result.Expiration)
- i.logger.Debugf(
- ctx,
- "[%v] Connection info refresh operation scheduled at %v (now + %v)",
- i.connName.String(),
- time.Now().Add(t).UTC().Format(time.RFC3339),
- t.Round(time.Minute),
- )
- i.next = i.scheduleRefresh(t)
- })
- return r
-}
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/lazy.go b/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/lazy.go
deleted file mode 100644
index 5b65b3b9..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/lazy.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cloudsql
-
-import (
- "context"
- "crypto/rsa"
- "sync"
- "time"
-
- "cloud.google.com/go/cloudsqlconn/debug"
- "cloud.google.com/go/cloudsqlconn/instance"
- "golang.org/x/oauth2"
- sqladmin "google.golang.org/api/sqladmin/v1beta4"
-)
-
-// LazyRefreshCache is caches connection info and refreshes the cache only when
-// a caller requests connection info and the current certificate is expired.
-type LazyRefreshCache struct {
- connName instance.ConnName
- logger debug.ContextLogger
- r adminAPIClient
- mu sync.Mutex
- useIAMAuthNDial bool
- needsRefresh bool
- cached ConnectionInfo
-}
-
-// NewLazyRefreshCache initializes a new LazyRefreshCache.
-func NewLazyRefreshCache(
- cn instance.ConnName,
- l debug.ContextLogger,
- client *sqladmin.Service,
- key *rsa.PrivateKey,
- _ time.Duration,
- ts oauth2.TokenSource,
- dialerID string,
- useIAMAuthNDial bool,
-) *LazyRefreshCache {
- return &LazyRefreshCache{
- connName: cn,
- logger: l,
- r: newAdminAPIClient(
- l,
- client,
- key,
- ts,
- dialerID,
- ),
- useIAMAuthNDial: useIAMAuthNDial,
- }
-}
-
-// ConnectionInfo returns connection info for the associated instance. New
-// connection info is retrieved under two conditions:
-// - the current connection info's certificate has expired, or
-// - a caller has separately called ForceRefresh
-func (c *LazyRefreshCache) ConnectionInfo(
- ctx context.Context,
-) (ConnectionInfo, error) {
- c.mu.Lock()
- defer c.mu.Unlock()
- // strip monotonic clock with UTC()
- now := time.Now().UTC()
- // Pad expiration with a buffer to give the client plenty of time to
- // establish a connection to the server with the certificate.
- exp := c.cached.Expiration.UTC().Add(-refreshBuffer)
- if !c.needsRefresh && now.Before(exp) {
- c.logger.Debugf(
- ctx,
- "[%v] Connection info is still valid, using cached info",
- c.connName.String(),
- )
- return c.cached, nil
- }
-
- c.logger.Debugf(
- ctx,
- "[%v] Connection info refresh operation started",
- c.connName.String(),
- )
- ci, err := c.r.ConnectionInfo(ctx, c.connName, c.useIAMAuthNDial)
- if err != nil {
- c.logger.Debugf(
- ctx,
- "[%v] Connection info refresh operation failed, err = %v",
- c.connName.String(),
- err,
- )
- return ConnectionInfo{}, err
- }
- c.logger.Debugf(
- ctx,
- "[%v] Connection info refresh operation complete",
- c.connName.String(),
- )
- c.logger.Debugf(
- ctx,
- "[%v] Current certificate expiration = %v",
- c.connName.String(),
- ci.Expiration.UTC().Format(time.RFC3339),
- )
- c.cached = ci
- c.needsRefresh = false
- return ci, nil
-}
-
-// UpdateRefresh updates the refresh operation to either enable or disable IAM
-// authentication for the cached connection info.
-func (c *LazyRefreshCache) UpdateRefresh(useIAMAuthNDial *bool) {
- c.mu.Lock()
- defer c.mu.Unlock()
- if useIAMAuthNDial != nil && *useIAMAuthNDial != c.useIAMAuthNDial {
- c.useIAMAuthNDial = *useIAMAuthNDial
- c.needsRefresh = true
- }
-}
-
-// ForceRefresh invalidates the caches and configures the next call to
-// ConnectionInfo to retrieve a fresh connection info.
-func (c *LazyRefreshCache) ForceRefresh() {
- c.mu.Lock()
- defer c.mu.Unlock()
- c.needsRefresh = true
-}
-
-// Close is a no-op and provided purely for a consistent interface with other
-// caching types.
-func (c *LazyRefreshCache) Close() error {
- return nil
-}
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/refresh.go b/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/refresh.go
deleted file mode 100644
index 3d2e6b58..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/refresh.go
+++ /dev/null
@@ -1,363 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cloudsql
-
-import (
- "context"
- "crypto/rsa"
- "crypto/tls"
- "crypto/x509"
- "encoding/pem"
- "fmt"
- "strings"
- "time"
-
- "cloud.google.com/go/cloudsqlconn/debug"
- "cloud.google.com/go/cloudsqlconn/errtype"
- "cloud.google.com/go/cloudsqlconn/instance"
- "cloud.google.com/go/cloudsqlconn/internal/trace"
- "golang.org/x/oauth2"
- sqladmin "google.golang.org/api/sqladmin/v1beta4"
-)
-
-const (
- // PublicIP is the value for public IP Cloud SQL instances.
- PublicIP = "PUBLIC"
- // PrivateIP is the value for private IP Cloud SQL instances.
- PrivateIP = "PRIVATE"
- // PSC is the value for private service connect Cloud SQL instances.
- PSC = "PSC"
- // AutoIP selects public IP if available and otherwise selects private
- // IP.
- AutoIP = "AutoIP"
-)
-
-// metadata contains information about a Cloud SQL instance needed to create
-// connections.
-type metadata struct {
- ipAddrs map[string]string
- serverCaCert *x509.Certificate
- version string
-}
-
-// fetchMetadata uses the Cloud SQL Admin APIs get method to retrieve the
-// information about a Cloud SQL instance that is used to create secure
-// connections.
-func fetchMetadata(
- ctx context.Context, client *sqladmin.Service, inst instance.ConnName,
-) (m metadata, err error) {
-
- var end trace.EndSpanFunc
- ctx, end = trace.StartSpan(ctx, "cloud.google.com/go/cloudsqlconn/internal.FetchMetadata")
- defer func() { end(err) }()
-
- db, err := retry50x(ctx, func(ctx2 context.Context) (*sqladmin.ConnectSettings, error) {
- return client.Connect.Get(
- inst.Project(), inst.Name(),
- ).Context(ctx2).Do()
- }, exponentialBackoff)
- if err != nil {
- return metadata{}, errtype.NewRefreshError("failed to get instance metadata", inst.String(), err)
- }
- // validate the instance is supported for authenticated connections
- if db.Region != inst.Region() {
- msg := fmt.Sprintf(
- "provided region was mismatched - got %s, want %s",
- inst.Region(), db.Region,
- )
- return metadata{}, errtype.NewConfigError(msg, inst.String())
- }
- if db.BackendType != "SECOND_GEN" {
- return metadata{}, errtype.NewConfigError(
- "unsupported instance - only Second Generation instances are supported",
- inst.String(),
- )
- }
-
- // parse any ip addresses that might be used to connect
- ipAddrs := make(map[string]string)
- for _, ip := range db.IpAddresses {
- switch ip.Type {
- case "PRIMARY":
- ipAddrs[PublicIP] = ip.IpAddress
- case "PRIVATE":
- ipAddrs[PrivateIP] = ip.IpAddress
- }
- }
-
- // resolve DnsName into IP address for PSC
- if db.DnsName != "" {
- ipAddrs[PSC] = db.DnsName
- }
-
- if len(ipAddrs) == 0 {
- return metadata{}, errtype.NewConfigError(
- "cannot connect to instance - it has no supported IP addresses",
- inst.String(),
- )
- }
-
- // parse the server-side CA certificate
- b, _ := pem.Decode([]byte(db.ServerCaCert.Cert))
- if b == nil {
- return metadata{}, errtype.NewRefreshError("failed to decode valid PEM cert", inst.String(), nil)
- }
- cert, err := x509.ParseCertificate(b.Bytes)
- if err != nil {
- return metadata{}, errtype.NewRefreshError(
- fmt.Sprintf("failed to parse as X.509 certificate: %v", err),
- inst.String(),
- nil,
- )
- }
-
- m = metadata{
- ipAddrs: ipAddrs,
- serverCaCert: cert,
- version: db.DatabaseVersion,
- }
-
- return m, nil
-}
-
-var expired = time.Time{}.Add(1)
-
-// canRefresh determines if the provided token was refreshed or if it still has
-// the sentinel expiration, which means the token was provided without a
-// refresh token (as with the Cloud SQL Proxy's --token flag) and therefore
-// cannot be refreshed.
-func canRefresh(t *oauth2.Token) bool {
- return t.Expiry.Unix() != expired.Unix()
-}
-
-// refreshToken will retrieve a new token, only if a refresh token is present.
-func refreshToken(ts oauth2.TokenSource, tok *oauth2.Token) (*oauth2.Token, error) {
- expiredToken := &oauth2.Token{
- AccessToken: tok.AccessToken,
- TokenType: tok.TokenType,
- RefreshToken: tok.RefreshToken,
- Expiry: expired,
- }
- return oauth2.ReuseTokenSource(expiredToken, ts).Token()
-}
-
-// fetchEphemeralCert uses the Cloud SQL Admin API's createEphemeral method to
-// create a signed TLS certificate that authorized to connect via the Cloud SQL
-// instance's serverside proxy. The cert if valid for approximately one hour.
-func fetchEphemeralCert(
- ctx context.Context,
- client *sqladmin.Service,
- inst instance.ConnName,
- key *rsa.PrivateKey,
- ts oauth2.TokenSource,
-) (c tls.Certificate, err error) {
- var end trace.EndSpanFunc
- ctx, end = trace.StartSpan(ctx, "cloud.google.com/go/cloudsqlconn/internal.FetchEphemeralCert")
- defer func() { end(err) }()
- clientPubKey, err := x509.MarshalPKIXPublicKey(&key.PublicKey)
- if err != nil {
- return tls.Certificate{}, err
- }
-
- req := sqladmin.GenerateEphemeralCertRequest{
- PublicKey: string(pem.EncodeToMemory(&pem.Block{Bytes: clientPubKey, Type: "RSA PUBLIC KEY"})),
- }
- var tok *oauth2.Token
- if ts != nil {
- var tokErr error
- tok, tokErr = ts.Token()
- if tokErr != nil {
- return tls.Certificate{}, errtype.NewRefreshError(
- "failed to retrieve Oauth2 token",
- inst.String(),
- tokErr,
- )
- }
- // Always refresh the token to ensure its expiration is far enough in
- // the future.
- tok, tokErr = refreshToken(ts, tok)
- if tokErr != nil {
- return tls.Certificate{}, errtype.NewRefreshError(
- "failed to refresh Oauth2 token",
- inst.String(),
- tokErr,
- )
- }
- req.AccessToken = tok.AccessToken
- }
- resp, err := retry50x(ctx, func(ctx2 context.Context) (*sqladmin.GenerateEphemeralCertResponse, error) {
- return client.Connect.GenerateEphemeralCert(
- inst.Project(), inst.Name(), &req,
- ).Context(ctx2).Do()
- }, exponentialBackoff)
- if err != nil {
- return tls.Certificate{}, errtype.NewRefreshError(
- "create ephemeral cert failed",
- inst.String(),
- err,
- )
- }
-
- // parse the client cert
- b, _ := pem.Decode([]byte(resp.EphemeralCert.Cert))
- if b == nil {
- return tls.Certificate{}, errtype.NewRefreshError(
- "failed to decode valid PEM cert",
- inst.String(),
- nil,
- )
- }
- clientCert, err := x509.ParseCertificate(b.Bytes)
- if err != nil {
- return tls.Certificate{}, errtype.NewRefreshError(
- fmt.Sprintf("failed to parse as X.509 certificate: %v", err),
- inst.String(),
- nil,
- )
- }
- if ts != nil {
- // Adjust the certificate's expiration to be the earliest of
- // the token's expiration or the certificate's expiration.
- if canRefresh(tok) && tok.Expiry.Before(clientCert.NotAfter) {
- clientCert.NotAfter = tok.Expiry
- }
- }
-
- c = tls.Certificate{
- Certificate: [][]byte{clientCert.Raw},
- PrivateKey: key,
- Leaf: clientCert,
- }
- return c, nil
-}
-
-// newAdminAPIClient creates a Refresher.
-func newAdminAPIClient(
- l debug.ContextLogger,
- svc *sqladmin.Service,
- key *rsa.PrivateKey,
- ts oauth2.TokenSource,
- dialerID string,
-) adminAPIClient {
- return adminAPIClient{
- dialerID: dialerID,
- logger: l,
- key: key,
- client: svc,
- ts: ts,
- }
-}
-
-// adminAPIClient manages the SQL Admin API access to instance metadata and to
-// ephemeral certificates.
-type adminAPIClient struct {
- // dialerID is the unique ID of the associated dialer.
- dialerID string
- logger debug.ContextLogger
- // key is used to generate the client certificate
- key *rsa.PrivateKey
- client *sqladmin.Service
- // ts is the TokenSource used for IAM DB AuthN.
- ts oauth2.TokenSource
-}
-
-// ConnectionInfo immediately performs a full refresh operation using the Cloud
-// SQL Admin API.
-func (c adminAPIClient) ConnectionInfo(
- ctx context.Context, cn instance.ConnName, iamAuthNDial bool,
-) (ci ConnectionInfo, err error) {
-
- var refreshEnd trace.EndSpanFunc
- ctx, refreshEnd = trace.StartSpan(ctx, "cloud.google.com/go/cloudsqlconn/internal.RefreshConnection",
- trace.AddInstanceName(cn.String()),
- )
- defer func() {
- go trace.RecordRefreshResult(context.Background(), cn.String(), c.dialerID, err)
- refreshEnd(err)
- }()
-
- // start async fetching the instance's metadata
- type mdRes struct {
- md metadata
- err error
- }
- mdC := make(chan mdRes, 1)
- go func() {
- defer close(mdC)
- md, err := fetchMetadata(ctx, c.client, cn)
- mdC <- mdRes{md, err}
- }()
-
- // start async fetching the certs
- type ecRes struct {
- ec tls.Certificate
- err error
- }
- ecC := make(chan ecRes, 1)
- go func() {
- defer close(ecC)
- var iamTS oauth2.TokenSource
- if iamAuthNDial {
- iamTS = c.ts
- }
- ec, err := fetchEphemeralCert(ctx, c.client, cn, c.key, iamTS)
- ecC <- ecRes{ec, err}
- }()
-
- // wait for the results of each operation
- var md metadata
- select {
- case r := <-mdC:
- if r.err != nil {
- return ConnectionInfo{}, fmt.Errorf("failed to get instance: %w", r.err)
- }
- md = r.md
- case <-ctx.Done():
- return ci, fmt.Errorf("refresh failed: %w", ctx.Err())
- }
- if iamAuthNDial {
- if vErr := supportsAutoIAMAuthN(md.version); vErr != nil {
- return ConnectionInfo{}, vErr
- }
- }
-
- var ec tls.Certificate
- select {
- case r := <-ecC:
- if r.err != nil {
- return ConnectionInfo{}, fmt.Errorf("fetch ephemeral cert failed: %w", r.err)
- }
- ec = r.ec
- case <-ctx.Done():
- return ConnectionInfo{}, fmt.Errorf("refresh failed: %w", ctx.Err())
- }
-
- return NewConnectionInfo(
- cn, md.version, md.ipAddrs, md.serverCaCert, ec,
- ), nil
-}
-
-// supportsAutoIAMAuthN checks that the engine support automatic IAM authn. If
-// auto IAM authn was not request, this is a no-op.
-func supportsAutoIAMAuthN(version string) error {
- switch {
- case strings.HasPrefix(version, "POSTGRES"):
- return nil
- case strings.HasPrefix(version, "MYSQL"):
- return nil
- default:
- return fmt.Errorf("%s does not support Auto IAM DB Authentication", version)
- }
-}
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/retry.go b/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/retry.go
deleted file mode 100644
index ff890000..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/internal/cloudsql/retry.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2024 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cloudsql
-
-import (
- "context"
- "math"
- "math/rand"
- "time"
-
- "google.golang.org/api/googleapi"
-)
-
-// exponentialBackoff calculates a duration based on the attempt i.
-//
-// The formula is:
-//
-// base * multi^(attempt + 1 + random)
-//
-// With base = 200ms and multi = 1.1618, and random = [0.0, 1.0),
-// the backoff values would fall between the following low and high ends:
-//
-// Attempt Low (ms) High (ms)
-//
-// 0 324 524
-// 1 524 847
-// 2 847 1371
-// 3 1371 2218
-// 4 2218 3588
-//
-// The theoretical worst case scenario would have a client wait 8.5s in total
-// for an API request to complete (with the first four attempts failing, and
-// the fifth succeeding).
-//
-// This backoff strategy matches the behavior of the Cloud SQL Proxy v1.
-func exponentialBackoff(attempt int) time.Duration {
- const (
- base = float64(200 * time.Millisecond)
- multi = 1.618
- )
- exp := float64(attempt+1) + rand.Float64()
- return time.Duration(base * math.Pow(multi, exp))
-}
-
-// retry50x will retry any 50x HTTP response up to maxRetries times. The
-// backoffFunc determines the duration to wait between attempts.
-func retry50x[T any](
- ctx context.Context,
- f func(context.Context) (*T, error),
- waitDuration func(int) time.Duration,
-) (*T, error) {
- const maxRetries = 5
- var (
- resp *T
- err error
- )
- for i := 0; i < maxRetries; i++ {
- resp, err = f(ctx)
- // If err is nil, break and return the response.
- if err == nil {
- break
- }
-
- gErr, ok := err.(*googleapi.Error)
- // If err is not a googleapi.Error, don't retry.
- if !ok {
- return nil, err
- }
- // If the error code is not a 50x error, don't retry.
- if gErr.Code < 500 {
- return nil, err
- }
-
- if wErr := wait(ctx, waitDuration(i)); wErr != nil {
- err = wErr
- break
- }
-
- }
- return resp, err
-}
-
-// wait will block until the provided duration passes or the context is
-// canceled, whatever happens first.
-func wait(ctx context.Context, d time.Duration) error {
- timer := time.NewTimer(d)
- select {
- case <-ctx.Done():
- if !timer.Stop() {
- <-timer.C
- }
- return ctx.Err()
- case <-timer.C:
- return nil
- }
-}
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/internal/trace/metrics.go b/vendor/cloud.google.com/go/cloudsqlconn/internal/trace/metrics.go
deleted file mode 100644
index 801454ee..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/internal/trace/metrics.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2022 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "context"
- "errors"
- "fmt"
- "strings"
- "sync"
-
- "go.opencensus.io/stats"
- "go.opencensus.io/stats/view"
- "go.opencensus.io/tag"
- "google.golang.org/api/googleapi"
-)
-
-var (
- keyInstance, _ = tag.NewKey("cloudsql_instance")
- keyDialerID, _ = tag.NewKey("cloudsql_dialer_id")
- keyErrorCode, _ = tag.NewKey("cloudsql_error_code")
-
- mLatencyMS = stats.Int64(
- "cloudsqlconn/latency",
- "The latency in milliseconds per Dial",
- stats.UnitMilliseconds,
- )
- mConnections = stats.Int64(
- "cloudsqlconn/connection",
- "A connect or disconnect event to Cloud SQL",
- stats.UnitDimensionless,
- )
- mDialError = stats.Int64(
- "cloudsqlconn/dial_failure",
- "A failure to dial a Cloud SQL instance",
- stats.UnitDimensionless,
- )
- mSuccessfulRefresh = stats.Int64(
- "cloudsqlconn/refresh_success",
- "A successful certificate refresh operation",
- stats.UnitDimensionless,
- )
- mFailedRefresh = stats.Int64(
- "cloudsqlconn/refresh_failure",
- "A failed certificate refresh operation",
- stats.UnitDimensionless,
- )
-
- latencyView = &view.View{
- Name: "cloudsqlconn/dial_latency",
- Measure: mLatencyMS,
- Description: "The distribution of dialer latencies (ms)",
- // Latency in buckets, e.g., >=0ms, >=100ms, etc.
- Aggregation: view.Distribution(0, 5, 25, 100, 250, 500, 1000, 2000, 5000, 30000),
- TagKeys: []tag.Key{keyInstance, keyDialerID},
- }
- connectionsView = &view.View{
- Name: "cloudsqlconn/open_connections",
- Measure: mConnections,
- Description: "The current number of open Cloud SQL connections",
- Aggregation: view.LastValue(),
- TagKeys: []tag.Key{keyInstance, keyDialerID},
- }
- dialFailureView = &view.View{
- Name: "cloudsqlconn/dial_failure_count",
- Measure: mDialError,
- Description: "The number of failed dial attempts",
- Aggregation: view.Count(),
- TagKeys: []tag.Key{keyInstance, keyDialerID},
- }
- refreshCountView = &view.View{
- Name: "cloudsqlconn/refresh_success_count",
- Measure: mSuccessfulRefresh,
- Description: "The number of successful certificate refresh operations",
- Aggregation: view.Count(),
- TagKeys: []tag.Key{keyInstance, keyDialerID},
- }
- failedRefreshCountView = &view.View{
- Name: "cloudsqlconn/refresh_failure_count",
- Measure: mFailedRefresh,
- Description: "The number of failed certificate refresh operations",
- Aggregation: view.Count(),
- TagKeys: []tag.Key{keyInstance, keyDialerID, keyErrorCode},
- }
-
- registerOnce sync.Once
- registerErr error
-)
-
-// InitMetrics registers all views once. Without registering views, metrics will
-// not be reported. If any names of the registered views conflict, this function
-// returns an error to indicate an internal configuration problem.
-func InitMetrics() error {
- registerOnce.Do(func() {
- if rErr := view.Register(
- latencyView,
- connectionsView,
- dialFailureView,
- refreshCountView,
- failedRefreshCountView,
- ); rErr != nil {
- registerErr = fmt.Errorf("failed to initialize metrics: %v", rErr)
- }
- })
- return registerErr
-}
-
-// RecordDialLatency records a latency value for a call to dial.
-func RecordDialLatency(ctx context.Context, instance, dialerID string, latency int64) {
- // tag.New creates a new context and errors only if the new tag already
- // exists in the provided context. Since we're adding tags within this
- // package only, we can be confident that there were be no duplicate tags
- // and so can ignore the error.
- ctx, _ = tag.New(ctx, tag.Upsert(keyInstance, instance), tag.Upsert(keyDialerID, dialerID))
- stats.Record(ctx, mLatencyMS.M(latency))
-}
-
-// RecordOpenConnections records the number of open connections
-func RecordOpenConnections(ctx context.Context, num int64, dialerID, instance string) {
- ctx, _ = tag.New(ctx, tag.Upsert(keyInstance, instance), tag.Upsert(keyDialerID, dialerID))
- stats.Record(ctx, mConnections.M(num))
-}
-
-// RecordDialError reports a failed dial attempt. If err is nil, RecordDialError
-// is a no-op.
-func RecordDialError(ctx context.Context, instance, dialerID string, err error) {
- if err == nil {
- return
- }
- ctx, _ = tag.New(ctx, tag.Upsert(keyInstance, instance), tag.Upsert(keyDialerID, dialerID))
- stats.Record(ctx, mDialError.M(1))
-}
-
-// RecordRefreshResult reports the result of a refresh operation, either
-// successfull or failed.
-func RecordRefreshResult(ctx context.Context, instance, dialerID string, err error) {
- ctx, _ = tag.New(ctx, tag.Upsert(keyInstance, instance), tag.Upsert(keyDialerID, dialerID))
- if err != nil {
- if c := errorCode(err); c != "" {
- ctx, _ = tag.New(ctx, tag.Upsert(keyErrorCode, c))
- }
- stats.Record(ctx, mFailedRefresh.M(1))
- return
- }
- stats.Record(ctx, mSuccessfulRefresh.M(1))
-}
-
-// errorCode returns an error code as given from the SQL Admin API, provided the
-// error wraps a googleapi.Error type. If multiple error codes are returned from
-// the API, then a comma-separated string of all codes is returned.
-//
-// For possible error codes and their meaning see:
-// https://cloud.google.com/sql/docs/mysql/admin-api-error-messages
-func errorCode(err error) string {
- var apiErr *googleapi.Error
- ok := errors.As(err, &apiErr)
- if !ok {
- return ""
- }
- var codes []string
- for _, e := range apiErr.Errors {
- codes = append(codes, e.Reason)
- }
- return strings.Join(codes, ",")
-}
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/internal/trace/trace.go b/vendor/cloud.google.com/go/cloudsqlconn/internal/trace/trace.go
deleted file mode 100644
index 1be78ddc..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/internal/trace/trace.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2021 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "context"
-
- "go.opencensus.io/trace"
- "google.golang.org/api/googleapi"
- "google.golang.org/genproto/googleapis/rpc/code"
- "google.golang.org/grpc/status"
-)
-
-// EndSpanFunc is a function that ends a span, reporting an error if necessary.
-type EndSpanFunc func(error)
-
-// Attribute annotates a span with additional data.
-type Attribute struct {
- key string
- value interface{}
-}
-
-func (a Attribute) traceAttr() trace.Attribute {
- // always use a string attribute for now
- // if need for additional types arise, this can be expanded.
- return trace.StringAttribute(a.key, a.value.(string))
-}
-
-// AddInstanceName creates an attribute with the Cloud SQL instance name.
-func AddInstanceName(name string) Attribute {
- return Attribute{key: "/cloudsql/instance", value: name}
-}
-
-// AddDialerID creates an attribute to identify a particular dialer.
-func AddDialerID(dialerID string) Attribute {
- return Attribute{key: "/cloudsql/dialer_id", value: dialerID}
-}
-
-// StartSpan begins a span with the provided name and returns a context and a
-// function to end the created span.
-func StartSpan(ctx context.Context, name string, attrs ...Attribute) (context.Context, EndSpanFunc) {
- var span *trace.Span
- ctx, span = trace.StartSpan(ctx, name)
- as := make([]trace.Attribute, 0, len(attrs))
- for _, a := range attrs {
- as = append(as, a.traceAttr())
- }
- span.AddAttributes(as...)
- return ctx, func(err error) {
- if err != nil {
- span.SetStatus(toStatus(err))
- }
- span.End()
- }
-}
-
-// toStatus interrogates an error and converts it to an appropriate
-// OpenCensus status.
-// Note: this function is borrowed from
-// https://github.com/googleapis/google-cloud-go/blob/master/internal/trace/trace.go
-func toStatus(err error) trace.Status {
- if err2, ok := err.(*googleapi.Error); ok {
- return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message}
- }
- if s, ok := status.FromError(err); ok {
- return trace.Status{Code: int32(s.Code()), Message: s.Message()}
- }
- return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()}
-}
-
-// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto
-func httpStatusCodeToOCCode(httpStatusCode int) int32 {
- switch httpStatusCode {
- case 200:
- return int32(code.Code_OK)
- case 499:
- return int32(code.Code_CANCELLED)
- case 500:
- return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS
- case 400:
- return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE
- case 504:
- return int32(code.Code_DEADLINE_EXCEEDED)
- case 404:
- return int32(code.Code_NOT_FOUND)
- case 409:
- return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED
- case 403:
- return int32(code.Code_PERMISSION_DENIED)
- case 401:
- return int32(code.Code_UNAUTHENTICATED)
- case 429:
- return int32(code.Code_RESOURCE_EXHAUSTED)
- case 501:
- return int32(code.Code_UNIMPLEMENTED)
- case 503:
- return int32(code.Code_UNAVAILABLE)
- default:
- return int32(code.Code_UNKNOWN)
- }
-}
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/migration-guide.md b/vendor/cloud.google.com/go/cloudsqlconn/migration-guide.md
deleted file mode 100644
index 92db2133..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/migration-guide.md
+++ /dev/null
@@ -1,170 +0,0 @@
-# Migrating from Cloud SQL Proxy v1 to the Go Connector
-
-The Go Connector supports an improved version of the drivers available in v1.
-Unlike V1, the Go Connectors drivers support:
-
-1. Configuring a driver with all supported Go connector options
-1. Configuring multiple drivers per engine type using distinct registered driver
- names
-1. Support for SQL Server
-1. (Postgres only) Configuring a connection using pgx directly (see README for
- details).
-
-Below are examples of the Cloud SQL Proxy invocation vs the new Go connector
-invocation.
-
-## MySQL
-
-### Cloud SQL Proxy
-
-``` golang
-import (
- "database/sql"
-
- "github.com/GoogleCloudPlatform/cloudsql-proxy/proxy/dialers/mysql"
-)
-
-func connectMySQL() *sql.DB {
- cfg := mysql.Cfg("project:region:instance", "user", "password")
- cfg.DBName = "DB_1"
- cfg.ParseTime = true
-
- db, err := mysql.DialCfg(cfg)
- if err != nil {
- // handle error as necessary
- }
- return db
-}
-```
-
-### Cloud SQL Go Connector
-
-``` golang
-import (
- "database/sql"
-
- "cloud.google.com/go/cloudsqlconn"
- "cloud.google.com/go/cloudsqlconn/mysql/mysql"
-)
-
-func connectMySQL() *sql.DB {
- // Register a driver using whatever name you like.
- cleanup, err := mysql.RegisterDriver(
- "cloudsql-mysql",
- // any desired options go here, for example:
- cloudsqlconn.WithCredentialsFile("key.json"),
- )
- if err != nil {
- // handle error as necessary
- }
- // call cleanup to close the underylying driver when you're done with the
- // db.
- defer cleanup()
-
- db, err := sql.Open(
- "cloudsql-mysql", // matches the name registered above
- "myuser:mypass@cloudsql-mysql(project:region:instance)/mydb",
- )
- if err != nil {
- // handle error as necessary
- }
- return db
-}
-```
-
-## Postgres
-
-### Cloud SQL Proxy
-
-``` golang
-import (
- "database/sql"
-
- _ "github.com/GoogleCloudPlatform/cloudsql-proxy/proxy/dialers/postgres"
-)
-func connectPostgres() *sql.DB {
- db, err := sql.Open(
- "cloudsqlpostgres",
- "host=project:region:instance user=postgres dbname=postgres password=password sslmode=disable",
- )
- if err != nil {
- // handle error as necessary
- }
- return db
-}
-```
-
-### Cloud SQL Go Connector
-
-``` golang
-import (
- "database/sql"
-
- "cloud.google.com/go/cloudsqlconn"
- "cloud.google.com/go/cloudsqlconn/postgres/pgxv4"
-)
-
-func connectPostgres() *sql.DB {
- // Register a driver using whatever name you like.
- cleanup, err := pgxv4.RegisterDriver(
- "cloudsql-postgres",
- // any desired options go here, for example:
- cloudsqlconn.WithCredentialsFile("key.json"),
- cloudsqlconn.WithIAMAuthN(),
- )
- if err != nil {
- // handle error as necessary
- }
- // call cleanup to close the underylying driver when you're done with the
- // db.
- defer cleanup()
- db, err := sql.Open(
- "cloudsql-postgres", // matches the name registered above
- "host=project:region:instance user=postgres password=password dbname=postgres sslmode=disable",
- )
- if err != nil {
- // handle error as necessary
- }
- return db
-}
-```
-
-## SQL Server
-
-### Cloud SQL Proxy
-
-The Cloud SQL Proxy does not support SQL Server as a driver.
-
-### Cloud SQL Go Connector
-
-``` golang
-import (
- "database/sql"
-
- "cloud.google.com/go/cloudsqlconn"
- "cloud.google.com/go/cloudsqlconn/sqlserver/mssql"
-)
-
-func connectSQLServer() *sql.DB {
- // Register a driver using whatever name you like.
- cleanup, err := mssql.RegisterDriver(
- "cloudsql-sqlserver",
- // any desired options go here, for example:
- cloudsqlconn.WithCredentialsFile("key.json"),
- )
- if err != nil {
- // handle error as necessary
- }
- // call cleanup when you're done with the database connection
- defer cleanup()
-
- db, err := sql.Open(
- "cloudsql-sqlserver", // matches the name registered above
- "sqlserver://user:password@localhost?database=mydb&cloudsql=project:region:instance",
- )
- if err != nil {
- // handle error as necessary
- }
- return db
-}
-```
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/mysql/mysql/mysql.go b/vendor/cloud.google.com/go/cloudsqlconn/mysql/mysql/mysql.go
deleted file mode 100644
index e9f4a1b4..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/mysql/mysql/mysql.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright 2022 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package mysql provides a Cloud SQL MySQL driver that uses go-sql-driver/mysql
-// and works with database/sql
-package mysql
-
-import (
- "context"
- "database/sql"
- "database/sql/driver"
- "errors"
- "net"
- "syscall"
-
- "cloud.google.com/go/cloudsqlconn"
- "github.com/go-sql-driver/mysql"
-)
-
-// RegisterDriver registers a MySQL driver that uses the cloudsqlconn.Dialer
-// configured with the provided options. The choice of name is entirely up to
-// the caller and may be used to distinguish between multiple registrations of
-// differently configured Dialers.
-func RegisterDriver(name string, opts ...cloudsqlconn.Option) (func() error, error) {
- d, err := cloudsqlconn.NewDialer(context.Background(), opts...)
- if err != nil {
- return func() error { return nil }, err
- }
- mysql.RegisterDialContext(name, mysql.DialContextFunc(func(ctx context.Context, addr string) (net.Conn, error) {
- conn, err := d.Dial(ctx, addr)
- if err != nil {
- return nil, err
- }
- return LivenessCheckConn{Conn: conn}, nil
- }))
- sql.Register(name, &mysqlDriver{
- d: &mysql.MySQLDriver{},
- })
- return func() error { return d.Close() }, nil
-}
-
-// LivenessCheckConn wraps the underlying connection with support for a
-// liveness check.
-//
-// See https://github.com/go-sql-driver/mysql/pull/934 for details.
-type LivenessCheckConn struct {
- net.Conn
-}
-
-// SyscallConn supports a connection check in the MySQL driver by delegating to
-// the underlying non-TLS net.Conn.
-func (c *LivenessCheckConn) SyscallConn() (syscall.RawConn, error) {
- sconn, ok := c.Conn.(syscall.Conn)
- if !ok {
- return nil, errors.New("connection is not a syscall.Conn")
- }
- return sconn.SyscallConn()
-}
-
-type mysqlDriver struct {
- d *mysql.MySQLDriver
-}
-
-// Open accepts a DSN using the go-sql-driver/mysql format. See
-// https://github.com/go-sql-driver/mysql#dsn-data-source-name for details.
-// Note the protocol should match the name used when registering a driver. For
-// example, a connection string might look like this where "cloudsql-mysql" is
-// the named used when registering the driver:
-//
-// my-user:mypass@cloudsql-mysql(my-proj:us-central1:my-inst)/my-db
-func (d *mysqlDriver) Open(name string) (driver.Conn, error) {
- return d.d.Open(name)
-}
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/options.go b/vendor/cloud.google.com/go/cloudsqlconn/options.go
deleted file mode 100644
index 25e6ae86..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/options.go
+++ /dev/null
@@ -1,355 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cloudsqlconn
-
-import (
- "context"
- "crypto/rsa"
- "net"
- "net/http"
- "os"
- "time"
-
- "cloud.google.com/go/cloudsqlconn/debug"
- "cloud.google.com/go/cloudsqlconn/errtype"
- "cloud.google.com/go/cloudsqlconn/internal/cloudsql"
- "golang.org/x/oauth2"
- "golang.org/x/oauth2/google"
- apiopt "google.golang.org/api/option"
- sqladmin "google.golang.org/api/sqladmin/v1beta4"
-)
-
-// An Option is an option for configuring a Dialer.
-type Option func(d *dialerConfig)
-
-type dialerConfig struct {
- rsaKey *rsa.PrivateKey
- sqladminOpts []apiopt.ClientOption
- dialOpts []DialOption
- dialFunc func(ctx context.Context, network, addr string) (net.Conn, error)
- refreshTimeout time.Duration
- useIAMAuthN bool
- logger debug.ContextLogger
- lazyRefresh bool
- iamLoginTokenSource oauth2.TokenSource
- useragents []string
- credentialsUniverse string
- serviceUniverse string
- setAdminAPIEndpoint bool
- setUniverseDomain bool
- setCredentials bool
- setTokenSource bool
- setIAMAuthNTokenSource bool
- // err tracks any dialer options that may have failed.
- err error
-}
-
-// WithOptions turns a list of Option's into a single Option.
-func WithOptions(opts ...Option) Option {
- return func(d *dialerConfig) {
- for _, opt := range opts {
- opt(d)
- }
- }
-}
-
-// WithCredentialsFile returns an Option that specifies a service account
-// or refresh token JSON credentials file to be used as the basis for
-// authentication.
-func WithCredentialsFile(filename string) Option {
- return func(d *dialerConfig) {
- b, err := os.ReadFile(filename)
- if err != nil {
- d.err = errtype.NewConfigError(err.Error(), "n/a")
- return
- }
- opt := WithCredentialsJSON(b)
- opt(d)
- }
-}
-
-// WithCredentialsJSON returns an Option that specifies a service account
-// or refresh token JSON credentials to be used as the basis for authentication.
-func WithCredentialsJSON(b []byte) Option {
- return func(d *dialerConfig) {
- c, err := google.CredentialsFromJSON(context.Background(), b, sqladmin.SqlserviceAdminScope)
- if err != nil {
- d.err = errtype.NewConfigError(err.Error(), "n/a")
- return
- }
- ud, err := c.GetUniverseDomain()
- if err != nil {
- d.err = errtype.NewConfigError(err.Error(), "n/a")
- return
- }
- d.credentialsUniverse = ud
- d.sqladminOpts = append(d.sqladminOpts, apiopt.WithCredentials(c))
-
- // Create another set of credentials scoped to login only
- scoped, err := google.CredentialsFromJSON(context.Background(), b, iamLoginScope)
- if err != nil {
- d.err = errtype.NewConfigError(err.Error(), "n/a")
- return
- }
- d.iamLoginTokenSource = scoped.TokenSource
- d.setCredentials = true
- }
-}
-
-// WithUserAgent returns an Option that sets the User-Agent.
-func WithUserAgent(ua string) Option {
- return func(d *dialerConfig) {
- d.useragents = append(d.useragents, ua)
- }
-}
-
-// WithDefaultDialOptions returns an Option that specifies the default
-// DialOptions used.
-func WithDefaultDialOptions(opts ...DialOption) Option {
- return func(d *dialerConfig) {
- d.dialOpts = append(d.dialOpts, opts...)
- }
-}
-
-// WithTokenSource returns an Option that specifies an OAuth2 token source to be
-// used as the basis for authentication.
-//
-// When Auth IAM AuthN is enabled, use WithIAMAuthNTokenSources to set the token
-// source for login tokens separately from the API client token source.
-// WithTokenSource should not be used with WithIAMAuthNTokenSources.
-func WithTokenSource(s oauth2.TokenSource) Option {
- return func(d *dialerConfig) {
- d.setTokenSource = true
- d.setCredentials = true
- d.sqladminOpts = append(d.sqladminOpts, apiopt.WithTokenSource(s))
- }
-}
-
-// WithIAMAuthNTokenSources sets the oauth2.TokenSource for the API client and a
-// second token source for IAM AuthN login tokens. The API client token source
-// should have the following scopes:
-//
-// 1. https://www.googleapis.com/auth/sqlservice.admin, and
-// 2. https://www.googleapis.com/auth/cloud-platform
-//
-// The IAM AuthN token source on the other hand should only have:
-//
-// 1. https://www.googleapis.com/auth/sqlservice.login.
-//
-// Prefer this option over WithTokenSource when using IAM AuthN which does not
-// distinguish between the two token sources. WithIAMAuthNTokenSources should
-// not be used with WithTokenSource.
-func WithIAMAuthNTokenSources(apiTS, iamLoginTS oauth2.TokenSource) Option {
- return func(d *dialerConfig) {
- d.setIAMAuthNTokenSource = true
- d.setCredentials = true
- d.iamLoginTokenSource = iamLoginTS
- d.sqladminOpts = append(d.sqladminOpts, apiopt.WithTokenSource(apiTS))
- }
-}
-
-// WithRSAKey returns an Option that specifies a rsa.PrivateKey used to represent the client.
-func WithRSAKey(k *rsa.PrivateKey) Option {
- return func(d *dialerConfig) {
- d.rsaKey = k
- }
-}
-
-// WithRefreshTimeout returns an Option that sets a timeout on refresh
-// operations. Defaults to 60s.
-func WithRefreshTimeout(t time.Duration) Option {
- return func(d *dialerConfig) {
- d.refreshTimeout = t
- }
-}
-
-// WithHTTPClient configures the underlying SQL Admin API client with the
-// provided HTTP client. This option is generally unnecessary except for
-// advanced use-cases.
-func WithHTTPClient(client *http.Client) Option {
- return func(d *dialerConfig) {
- d.sqladminOpts = append(d.sqladminOpts, apiopt.WithHTTPClient(client))
- }
-}
-
-// WithAdminAPIEndpoint configures the underlying SQL Admin API client to use
-// the provided URL.
-func WithAdminAPIEndpoint(url string) Option {
- return func(d *dialerConfig) {
- d.sqladminOpts = append(d.sqladminOpts, apiopt.WithEndpoint(url))
- d.setAdminAPIEndpoint = true
- d.serviceUniverse = ""
- }
-}
-
-// WithUniverseDomain configures the underlying SQL Admin API client to use
-// the provided universe domain. Enables Trusted Partner Cloud (TPC).
-func WithUniverseDomain(ud string) Option {
- return func(d *dialerConfig) {
- d.sqladminOpts = append(d.sqladminOpts, apiopt.WithUniverseDomain(ud))
- d.serviceUniverse = ud
- d.setUniverseDomain = true
- }
-}
-
-// WithQuotaProject returns an Option that specifies the project used for quota and billing purposes.
-func WithQuotaProject(p string) Option {
- return func(cfg *dialerConfig) {
- cfg.sqladminOpts = append(cfg.sqladminOpts, apiopt.WithQuotaProject(p))
- }
-}
-
-// WithDialFunc configures the function used to connect to the address on the
-// named network. This option is generally unnecessary except for advanced
-// use-cases. The function is used for all invocations of Dial. To configure
-// a dial function per individual calls to dial, use WithOneOffDialFunc.
-func WithDialFunc(dial func(ctx context.Context, network, addr string) (net.Conn, error)) Option {
- return func(d *dialerConfig) {
- d.dialFunc = dial
- }
-}
-
-// WithIAMAuthN enables automatic IAM Authentication. If no token source has
-// been configured (such as with WithTokenSource, WithCredentialsFile, etc), the
-// dialer will use the default token source as defined by
-// https://pkg.go.dev/golang.org/x/oauth2/google#FindDefaultCredentialsWithParams.
-//
-// For documentation on automatic IAM Authentication, see
-// https://cloud.google.com/sql/docs/postgres/authentication.
-func WithIAMAuthN() Option {
- return func(d *dialerConfig) {
- d.useIAMAuthN = true
- }
-}
-
-type debugLoggerWithoutContext struct {
- logger debug.Logger
-}
-
-// Debugf implements debug.ContextLogger.
-func (d *debugLoggerWithoutContext) Debugf(_ context.Context, format string, args ...interface{}) {
- d.logger.Debugf(format, args...)
-}
-
-var _ debug.ContextLogger = new(debugLoggerWithoutContext)
-
-// WithDebugLogger configures a debug lgoger for reporting on internal
-// operations. By default the debug logger is disabled.
-//
-// Prefer WithContextDebugLogger instead
-func WithDebugLogger(l debug.Logger) Option {
- return func(d *dialerConfig) {
- d.logger = &debugLoggerWithoutContext{l}
- }
-}
-
-// WithContextDebugLogger configures a debug logger for reporting on internal
-// operations. By default the debug logger is disabled.
-func WithContextDebugLogger(l debug.ContextLogger) Option {
- return func(d *dialerConfig) {
- d.logger = l
- }
-}
-
-// WithLazyRefresh configures the dialer to refresh certificates on an
-// as-needed basis. If a certificate is expired when a connection request
-// occurs, the Go Connector will block the attempt and refresh the certificate
-// immediately. This option is useful when running the Go Connector in
-// environments where the CPU may be throttled, thus preventing a background
-// goroutine from running consistently (e.g., in Cloud Run the CPU is throttled
-// outside of a request context causing the background refresh to fail).
-func WithLazyRefresh() Option {
- return func(d *dialerConfig) {
- d.lazyRefresh = true
- }
-}
-
-// A DialOption is an option for configuring how a Dialer's Dial call is executed.
-type DialOption func(d *dialConfig)
-
-type dialConfig struct {
- dialFunc func(ctx context.Context, network, addr string) (net.Conn, error)
- ipType string
- tcpKeepAlive time.Duration
- useIAMAuthN bool
-}
-
-// DialOptions turns a list of DialOption instances into an DialOption.
-func DialOptions(opts ...DialOption) DialOption {
- return func(cfg *dialConfig) {
- for _, opt := range opts {
- opt(cfg)
- }
- }
-}
-
-// WithOneOffDialFunc configures the dial function on a one-off basis for an
-// individual call to Dial. To configure a dial function across all invocations
-// of Dial, use WithDialFunc.
-func WithOneOffDialFunc(dial func(ctx context.Context, network, addr string) (net.Conn, error)) DialOption {
- return func(c *dialConfig) {
- c.dialFunc = dial
- }
-}
-
-// WithTCPKeepAlive returns a DialOption that specifies the tcp keep alive period for the connection returned by Dial.
-func WithTCPKeepAlive(d time.Duration) DialOption {
- return func(cfg *dialConfig) {
- cfg.tcpKeepAlive = d
- }
-}
-
-// WithPublicIP returns a DialOption that specifies a public IP will be used to connect.
-func WithPublicIP() DialOption {
- return func(cfg *dialConfig) {
- cfg.ipType = cloudsql.PublicIP
- }
-}
-
-// WithPrivateIP returns a DialOption that specifies a private IP (VPC) will be used to connect.
-func WithPrivateIP() DialOption {
- return func(cfg *dialConfig) {
- cfg.ipType = cloudsql.PrivateIP
- }
-}
-
-// WithPSC returns a DialOption that specifies a PSC endpoint will be used to connect.
-func WithPSC() DialOption {
- return func(cfg *dialConfig) {
- cfg.ipType = cloudsql.PSC
- }
-}
-
-// WithAutoIP returns a DialOption that selects the public IP if available and
-// otherwise falls back to private IP. This option is present for backwards
-// compatibility only and is not recommended for use in production.
-func WithAutoIP() DialOption {
- return func(cfg *dialConfig) {
- cfg.ipType = cloudsql.AutoIP
- }
-}
-
-// WithDialIAMAuthN allows you to enable or disable IAM Authentication for this
-// instance as described in the documentation for WithIAMAuthN. This value will
-// override the Dialer-level configuration set with WithIAMAuthN.
-//
-// WARNING: This DialOption can cause a new Refresh operation to be triggered.
-// Toggling this option on or off between Dials may cause increased API usage
-// and/or delayed connection attempts.
-func WithDialIAMAuthN(b bool) DialOption {
- return func(cfg *dialConfig) {
- cfg.useIAMAuthN = b
- }
-}
diff --git a/vendor/cloud.google.com/go/cloudsqlconn/version.txt b/vendor/cloud.google.com/go/cloudsqlconn/version.txt
deleted file mode 100644
index 1cac385c..00000000
--- a/vendor/cloud.google.com/go/cloudsqlconn/version.txt
+++ /dev/null
@@ -1 +0,0 @@
-1.11.0
diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
deleted file mode 100644
index 967e0607..00000000
--- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
+++ /dev/null
@@ -1,26 +0,0 @@
-# Changes
-
-## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.3...compute/metadata/v0.3.0) (2024-04-15)
-
-
-### Features
-
-* **compute/metadata:** Add context aware functions ([#9733](https://github.com/googleapis/google-cloud-go/issues/9733)) ([e4eb5b4](https://github.com/googleapis/google-cloud-go/commit/e4eb5b46ee2aec9d2fc18300bfd66015e25a0510))
-
-## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15)
-
-
-### Bug Fixes
-
-* **compute/metadata:** Switch DNS lookup to an absolute lookup ([119b410](https://github.com/googleapis/google-cloud-go/commit/119b41060c7895e45e48aee5621ad35607c4d021)), refs [#7165](https://github.com/googleapis/google-cloud-go/issues/7165)
-
-## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01)
-
-
-### Bug Fixes
-
-* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430)
-
-## [0.1.0] (2022-10-26)
-
-Initial release of metadata being it's own module.
diff --git a/vendor/cloud.google.com/go/compute/metadata/LICENSE b/vendor/cloud.google.com/go/compute/metadata/LICENSE
deleted file mode 100644
index d6456956..00000000
--- a/vendor/cloud.google.com/go/compute/metadata/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/cloud.google.com/go/compute/metadata/README.md b/vendor/cloud.google.com/go/compute/metadata/README.md
deleted file mode 100644
index f940fb2c..00000000
--- a/vendor/cloud.google.com/go/compute/metadata/README.md
+++ /dev/null
@@ -1,27 +0,0 @@
-# Compute API
-
-[](https://pkg.go.dev/cloud.google.com/go/compute/metadata)
-
-This is a utility library for communicating with Google Cloud metadata service
-on Google Cloud.
-
-## Install
-
-```bash
-go get cloud.google.com/go/compute/metadata
-```
-
-## Go Version Support
-
-See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported)
-section in the root directory's README.
-
-## Contributing
-
-Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md)
-document for details.
-
-Please note that this project is released with a Contributor Code of Conduct.
-By participating in this project you agree to abide by its terms. See
-[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct)
-for more information.
diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go
deleted file mode 100644
index f67e3c7e..00000000
--- a/vendor/cloud.google.com/go/compute/metadata/metadata.go
+++ /dev/null
@@ -1,579 +0,0 @@
-// Copyright 2014 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package metadata provides access to Google Compute Engine (GCE)
-// metadata and API service accounts.
-//
-// This package is a wrapper around the GCE metadata service,
-// as documented at https://cloud.google.com/compute/docs/metadata/overview.
-package metadata // import "cloud.google.com/go/compute/metadata"
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "io"
- "net"
- "net/http"
- "net/url"
- "os"
- "runtime"
- "strings"
- "sync"
- "time"
-)
-
-const (
- // metadataIP is the documented metadata server IP address.
- metadataIP = "169.254.169.254"
-
- // metadataHostEnv is the environment variable specifying the
- // GCE metadata hostname. If empty, the default value of
- // metadataIP ("169.254.169.254") is used instead.
- // This is variable name is not defined by any spec, as far as
- // I know; it was made up for the Go package.
- metadataHostEnv = "GCE_METADATA_HOST"
-
- userAgent = "gcloud-golang/0.1"
-)
-
-type cachedValue struct {
- k string
- trim bool
- mu sync.Mutex
- v string
-}
-
-var (
- projID = &cachedValue{k: "project/project-id", trim: true}
- projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
- instID = &cachedValue{k: "instance/id", trim: true}
-)
-
-var defaultClient = &Client{hc: newDefaultHTTPClient()}
-
-func newDefaultHTTPClient() *http.Client {
- return &http.Client{
- Transport: &http.Transport{
- Dial: (&net.Dialer{
- Timeout: 2 * time.Second,
- KeepAlive: 30 * time.Second,
- }).Dial,
- IdleConnTimeout: 60 * time.Second,
- },
- Timeout: 5 * time.Second,
- }
-}
-
-// NotDefinedError is returned when requested metadata is not defined.
-//
-// The underlying string is the suffix after "/computeMetadata/v1/".
-//
-// This error is not returned if the value is defined to be the empty
-// string.
-type NotDefinedError string
-
-func (suffix NotDefinedError) Error() string {
- return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
-}
-
-func (c *cachedValue) get(cl *Client) (v string, err error) {
- defer c.mu.Unlock()
- c.mu.Lock()
- if c.v != "" {
- return c.v, nil
- }
- if c.trim {
- v, err = cl.getTrimmed(context.Background(), c.k)
- } else {
- v, err = cl.GetWithContext(context.Background(), c.k)
- }
- if err == nil {
- c.v = v
- }
- return
-}
-
-var (
- onGCEOnce sync.Once
- onGCE bool
-)
-
-// OnGCE reports whether this process is running on Google Compute Engine.
-func OnGCE() bool {
- onGCEOnce.Do(initOnGCE)
- return onGCE
-}
-
-func initOnGCE() {
- onGCE = testOnGCE()
-}
-
-func testOnGCE() bool {
- // The user explicitly said they're on GCE, so trust them.
- if os.Getenv(metadataHostEnv) != "" {
- return true
- }
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- resc := make(chan bool, 2)
-
- // Try two strategies in parallel.
- // See https://github.com/googleapis/google-cloud-go/issues/194
- go func() {
- req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
- req.Header.Set("User-Agent", userAgent)
- res, err := newDefaultHTTPClient().Do(req.WithContext(ctx))
- if err != nil {
- resc <- false
- return
- }
- defer res.Body.Close()
- resc <- res.Header.Get("Metadata-Flavor") == "Google"
- }()
-
- go func() {
- resolver := &net.Resolver{}
- addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.")
- if err != nil || len(addrs) == 0 {
- resc <- false
- return
- }
- resc <- strsContains(addrs, metadataIP)
- }()
-
- tryHarder := systemInfoSuggestsGCE()
- if tryHarder {
- res := <-resc
- if res {
- // The first strategy succeeded, so let's use it.
- return true
- }
- // Wait for either the DNS or metadata server probe to
- // contradict the other one and say we are running on
- // GCE. Give it a lot of time to do so, since the system
- // info already suggests we're running on a GCE BIOS.
- timer := time.NewTimer(5 * time.Second)
- defer timer.Stop()
- select {
- case res = <-resc:
- return res
- case <-timer.C:
- // Too slow. Who knows what this system is.
- return false
- }
- }
-
- // There's no hint from the system info that we're running on
- // GCE, so use the first probe's result as truth, whether it's
- // true or false. The goal here is to optimize for speed for
- // users who are NOT running on GCE. We can't assume that
- // either a DNS lookup or an HTTP request to a blackholed IP
- // address is fast. Worst case this should return when the
- // metaClient's Transport.ResponseHeaderTimeout or
- // Transport.Dial.Timeout fires (in two seconds).
- return <-resc
-}
-
-// systemInfoSuggestsGCE reports whether the local system (without
-// doing network requests) suggests that we're running on GCE. If this
-// returns true, testOnGCE tries a bit harder to reach its metadata
-// server.
-func systemInfoSuggestsGCE() bool {
- if runtime.GOOS != "linux" {
- // We don't have any non-Linux clues available, at least yet.
- return false
- }
- slurp, _ := os.ReadFile("/sys/class/dmi/id/product_name")
- name := strings.TrimSpace(string(slurp))
- return name == "Google" || name == "Google Compute Engine"
-}
-
-// Subscribe calls Client.SubscribeWithContext on the default client.
-func Subscribe(suffix string, fn func(v string, ok bool) error) error {
- return defaultClient.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) })
-}
-
-// SubscribeWithContext calls Client.SubscribeWithContext on the default client.
-func SubscribeWithContext(ctx context.Context, suffix string, fn func(ctx context.Context, v string, ok bool) error) error {
- return defaultClient.SubscribeWithContext(ctx, suffix, fn)
-}
-
-// Get calls Client.GetWithContext on the default client.
-//
-// Deprecated: Please use the context aware variant [GetWithContext].
-func Get(suffix string) (string, error) {
- return defaultClient.GetWithContext(context.Background(), suffix)
-}
-
-// GetWithContext calls Client.GetWithContext on the default client.
-func GetWithContext(ctx context.Context, suffix string) (string, error) {
- return defaultClient.GetWithContext(ctx, suffix)
-}
-
-// ProjectID returns the current instance's project ID string.
-func ProjectID() (string, error) { return defaultClient.ProjectID() }
-
-// NumericProjectID returns the current instance's numeric project ID.
-func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
-
-// InternalIP returns the instance's primary internal IP address.
-func InternalIP() (string, error) { return defaultClient.InternalIP() }
-
-// ExternalIP returns the instance's primary external (public) IP address.
-func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
-
-// Email calls Client.Email on the default client.
-func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) }
-
-// Hostname returns the instance's hostname. This will be of the form
-// ".c..internal".
-func Hostname() (string, error) { return defaultClient.Hostname() }
-
-// InstanceTags returns the list of user-defined instance tags,
-// assigned when initially creating a GCE instance.
-func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
-
-// InstanceID returns the current VM's numeric instance ID.
-func InstanceID() (string, error) { return defaultClient.InstanceID() }
-
-// InstanceName returns the current VM's instance ID string.
-func InstanceName() (string, error) { return defaultClient.InstanceName() }
-
-// Zone returns the current VM's zone, such as "us-central1-b".
-func Zone() (string, error) { return defaultClient.Zone() }
-
-// InstanceAttributes calls Client.InstanceAttributes on the default client.
-func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
-
-// ProjectAttributes calls Client.ProjectAttributes on the default client.
-func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
-
-// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
-func InstanceAttributeValue(attr string) (string, error) {
- return defaultClient.InstanceAttributeValue(attr)
-}
-
-// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
-func ProjectAttributeValue(attr string) (string, error) {
- return defaultClient.ProjectAttributeValue(attr)
-}
-
-// Scopes calls Client.Scopes on the default client.
-func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
-
-func strsContains(ss []string, s string) bool {
- for _, v := range ss {
- if v == s {
- return true
- }
- }
- return false
-}
-
-// A Client provides metadata.
-type Client struct {
- hc *http.Client
-}
-
-// NewClient returns a Client that can be used to fetch metadata.
-// Returns the client that uses the specified http.Client for HTTP requests.
-// If nil is specified, returns the default client.
-func NewClient(c *http.Client) *Client {
- if c == nil {
- return defaultClient
- }
-
- return &Client{hc: c}
-}
-
-// getETag returns a value from the metadata service as well as the associated ETag.
-// This func is otherwise equivalent to Get.
-func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string, err error) {
- // Using a fixed IP makes it very difficult to spoof the metadata service in
- // a container, which is an important use-case for local testing of cloud
- // deployments. To enable spoofing of the metadata service, the environment
- // variable GCE_METADATA_HOST is first inspected to decide where metadata
- // requests shall go.
- host := os.Getenv(metadataHostEnv)
- if host == "" {
- // Using 169.254.169.254 instead of "metadata" here because Go
- // binaries built with the "netgo" tag and without cgo won't
- // know the search suffix for "metadata" is
- // ".google.internal", and this IP address is documented as
- // being stable anyway.
- host = metadataIP
- }
- suffix = strings.TrimLeft(suffix, "/")
- u := "http://" + host + "/computeMetadata/v1/" + suffix
- req, err := http.NewRequestWithContext(ctx, "GET", u, nil)
- if err != nil {
- return "", "", err
- }
- req.Header.Set("Metadata-Flavor", "Google")
- req.Header.Set("User-Agent", userAgent)
- var res *http.Response
- var reqErr error
- retryer := newRetryer()
- for {
- res, reqErr = c.hc.Do(req)
- var code int
- if res != nil {
- code = res.StatusCode
- }
- if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry {
- if err := sleep(ctx, delay); err != nil {
- return "", "", err
- }
- continue
- }
- break
- }
- if reqErr != nil {
- return "", "", reqErr
- }
- defer res.Body.Close()
- if res.StatusCode == http.StatusNotFound {
- return "", "", NotDefinedError(suffix)
- }
- all, err := io.ReadAll(res.Body)
- if err != nil {
- return "", "", err
- }
- if res.StatusCode != 200 {
- return "", "", &Error{Code: res.StatusCode, Message: string(all)}
- }
- return string(all), res.Header.Get("Etag"), nil
-}
-
-// Get returns a value from the metadata service.
-// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
-//
-// If the GCE_METADATA_HOST environment variable is not defined, a default of
-// 169.254.169.254 will be used instead.
-//
-// If the requested metadata is not defined, the returned error will
-// be of type NotDefinedError.
-//
-// Deprecated: Please use the context aware variant [Client.GetWithContext].
-func (c *Client) Get(suffix string) (string, error) {
- return c.GetWithContext(context.Background(), suffix)
-}
-
-// GetWithContext returns a value from the metadata service.
-// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
-//
-// If the GCE_METADATA_HOST environment variable is not defined, a default of
-// 169.254.169.254 will be used instead.
-//
-// If the requested metadata is not defined, the returned error will
-// be of type NotDefinedError.
-func (c *Client) GetWithContext(ctx context.Context, suffix string) (string, error) {
- val, _, err := c.getETag(ctx, suffix)
- return val, err
-}
-
-func (c *Client) getTrimmed(ctx context.Context, suffix string) (s string, err error) {
- s, err = c.GetWithContext(ctx, suffix)
- s = strings.TrimSpace(s)
- return
-}
-
-func (c *Client) lines(suffix string) ([]string, error) {
- j, err := c.GetWithContext(context.Background(), suffix)
- if err != nil {
- return nil, err
- }
- s := strings.Split(strings.TrimSpace(j), "\n")
- for i := range s {
- s[i] = strings.TrimSpace(s[i])
- }
- return s, nil
-}
-
-// ProjectID returns the current instance's project ID string.
-func (c *Client) ProjectID() (string, error) { return projID.get(c) }
-
-// NumericProjectID returns the current instance's numeric project ID.
-func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
-
-// InstanceID returns the current VM's numeric instance ID.
-func (c *Client) InstanceID() (string, error) { return instID.get(c) }
-
-// InternalIP returns the instance's primary internal IP address.
-func (c *Client) InternalIP() (string, error) {
- return c.getTrimmed(context.Background(), "instance/network-interfaces/0/ip")
-}
-
-// Email returns the email address associated with the service account.
-// The account may be empty or the string "default" to use the instance's
-// main account.
-func (c *Client) Email(serviceAccount string) (string, error) {
- if serviceAccount == "" {
- serviceAccount = "default"
- }
- return c.getTrimmed(context.Background(), "instance/service-accounts/"+serviceAccount+"/email")
-}
-
-// ExternalIP returns the instance's primary external (public) IP address.
-func (c *Client) ExternalIP() (string, error) {
- return c.getTrimmed(context.Background(), "instance/network-interfaces/0/access-configs/0/external-ip")
-}
-
-// Hostname returns the instance's hostname. This will be of the form
-// ".c..internal".
-func (c *Client) Hostname() (string, error) {
- return c.getTrimmed(context.Background(), "instance/hostname")
-}
-
-// InstanceTags returns the list of user-defined instance tags,
-// assigned when initially creating a GCE instance.
-func (c *Client) InstanceTags() ([]string, error) {
- var s []string
- j, err := c.GetWithContext(context.Background(), "instance/tags")
- if err != nil {
- return nil, err
- }
- if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
- return nil, err
- }
- return s, nil
-}
-
-// InstanceName returns the current VM's instance ID string.
-func (c *Client) InstanceName() (string, error) {
- return c.getTrimmed(context.Background(), "instance/name")
-}
-
-// Zone returns the current VM's zone, such as "us-central1-b".
-func (c *Client) Zone() (string, error) {
- zone, err := c.getTrimmed(context.Background(), "instance/zone")
- // zone is of the form "projects//zones/".
- if err != nil {
- return "", err
- }
- return zone[strings.LastIndex(zone, "/")+1:], nil
-}
-
-// InstanceAttributes returns the list of user-defined attributes,
-// assigned when initially creating a GCE VM instance. The value of an
-// attribute can be obtained with InstanceAttributeValue.
-func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
-
-// ProjectAttributes returns the list of user-defined attributes
-// applying to the project as a whole, not just this VM. The value of
-// an attribute can be obtained with ProjectAttributeValue.
-func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
-
-// InstanceAttributeValue returns the value of the provided VM
-// instance attribute.
-//
-// If the requested attribute is not defined, the returned error will
-// be of type NotDefinedError.
-//
-// InstanceAttributeValue may return ("", nil) if the attribute was
-// defined to be the empty string.
-func (c *Client) InstanceAttributeValue(attr string) (string, error) {
- return c.GetWithContext(context.Background(), "instance/attributes/"+attr)
-}
-
-// ProjectAttributeValue returns the value of the provided
-// project attribute.
-//
-// If the requested attribute is not defined, the returned error will
-// be of type NotDefinedError.
-//
-// ProjectAttributeValue may return ("", nil) if the attribute was
-// defined to be the empty string.
-func (c *Client) ProjectAttributeValue(attr string) (string, error) {
- return c.GetWithContext(context.Background(), "project/attributes/"+attr)
-}
-
-// Scopes returns the service account scopes for the given account.
-// The account may be empty or the string "default" to use the instance's
-// main account.
-func (c *Client) Scopes(serviceAccount string) ([]string, error) {
- if serviceAccount == "" {
- serviceAccount = "default"
- }
- return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
-}
-
-// Subscribe subscribes to a value from the metadata service.
-// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
-// The suffix may contain query parameters.
-//
-// Deprecated: Please use the context aware variant [Client.SubscribeWithContext].
-func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
- return c.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) })
-}
-
-// SubscribeWithContext subscribes to a value from the metadata service.
-// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
-// The suffix may contain query parameters.
-//
-// SubscribeWithContext calls fn with the latest metadata value indicated by the
-// provided suffix. If the metadata value is deleted, fn is called with the
-// empty string and ok false. Subscribe blocks until fn returns a non-nil error
-// or the value is deleted. Subscribe returns the error value returned from the
-// last call to fn, which may be nil when ok == false.
-func (c *Client) SubscribeWithContext(ctx context.Context, suffix string, fn func(ctx context.Context, v string, ok bool) error) error {
- const failedSubscribeSleep = time.Second * 5
-
- // First check to see if the metadata value exists at all.
- val, lastETag, err := c.getETag(ctx, suffix)
- if err != nil {
- return err
- }
-
- if err := fn(ctx, val, true); err != nil {
- return err
- }
-
- ok := true
- if strings.ContainsRune(suffix, '?') {
- suffix += "&wait_for_change=true&last_etag="
- } else {
- suffix += "?wait_for_change=true&last_etag="
- }
- for {
- val, etag, err := c.getETag(ctx, suffix+url.QueryEscape(lastETag))
- if err != nil {
- if _, deleted := err.(NotDefinedError); !deleted {
- time.Sleep(failedSubscribeSleep)
- continue // Retry on other errors.
- }
- ok = false
- }
- lastETag = etag
-
- if err := fn(ctx, val, ok); err != nil || !ok {
- return err
- }
- }
-}
-
-// Error contains an error response from the server.
-type Error struct {
- // Code is the HTTP response status code.
- Code int
- // Message is the server response message.
- Message string
-}
-
-func (e *Error) Error() string {
- return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message)
-}
diff --git a/vendor/cloud.google.com/go/compute/metadata/retry.go b/vendor/cloud.google.com/go/compute/metadata/retry.go
deleted file mode 100644
index 3d4bc75d..00000000
--- a/vendor/cloud.google.com/go/compute/metadata/retry.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2021 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metadata
-
-import (
- "context"
- "io"
- "math/rand"
- "net/http"
- "time"
-)
-
-const (
- maxRetryAttempts = 5
-)
-
-var (
- syscallRetryable = func(error) bool { return false }
-)
-
-// defaultBackoff is basically equivalent to gax.Backoff without the need for
-// the dependency.
-type defaultBackoff struct {
- max time.Duration
- mul float64
- cur time.Duration
-}
-
-func (b *defaultBackoff) Pause() time.Duration {
- d := time.Duration(1 + rand.Int63n(int64(b.cur)))
- b.cur = time.Duration(float64(b.cur) * b.mul)
- if b.cur > b.max {
- b.cur = b.max
- }
- return d
-}
-
-// sleep is the equivalent of gax.Sleep without the need for the dependency.
-func sleep(ctx context.Context, d time.Duration) error {
- t := time.NewTimer(d)
- select {
- case <-ctx.Done():
- t.Stop()
- return ctx.Err()
- case <-t.C:
- return nil
- }
-}
-
-func newRetryer() *metadataRetryer {
- return &metadataRetryer{bo: &defaultBackoff{
- cur: 100 * time.Millisecond,
- max: 30 * time.Second,
- mul: 2,
- }}
-}
-
-type backoff interface {
- Pause() time.Duration
-}
-
-type metadataRetryer struct {
- bo backoff
- attempts int
-}
-
-func (r *metadataRetryer) Retry(status int, err error) (time.Duration, bool) {
- if status == http.StatusOK {
- return 0, false
- }
- retryOk := shouldRetry(status, err)
- if !retryOk {
- return 0, false
- }
- if r.attempts == maxRetryAttempts {
- return 0, false
- }
- r.attempts++
- return r.bo.Pause(), true
-}
-
-func shouldRetry(status int, err error) bool {
- if 500 <= status && status <= 599 {
- return true
- }
- if err == io.ErrUnexpectedEOF {
- return true
- }
- // Transient network errors should be retried.
- if syscallRetryable(err) {
- return true
- }
- if err, ok := err.(interface{ Temporary() bool }); ok {
- if err.Temporary() {
- return true
- }
- }
- if err, ok := err.(interface{ Unwrap() error }); ok {
- return shouldRetry(status, err.Unwrap())
- }
- return false
-}
diff --git a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go
deleted file mode 100644
index bb412f89..00000000
--- a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2021 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux
-// +build linux
-
-package metadata
-
-import "syscall"
-
-func init() {
- // Initialize syscallRetryable to return true on transient socket-level
- // errors. These errors are specific to Linux.
- syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED }
-}
diff --git a/vendor/filippo.io/edwards25519/LICENSE b/vendor/filippo.io/edwards25519/LICENSE
deleted file mode 100644
index 6a66aea5..00000000
--- a/vendor/filippo.io/edwards25519/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/filippo.io/edwards25519/README.md b/vendor/filippo.io/edwards25519/README.md
deleted file mode 100644
index 24e2457d..00000000
--- a/vendor/filippo.io/edwards25519/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-# filippo.io/edwards25519
-
-```
-import "filippo.io/edwards25519"
-```
-
-This library implements the edwards25519 elliptic curve, exposing the necessary APIs to build a wide array of higher-level primitives.
-Read the docs at [pkg.go.dev/filippo.io/edwards25519](https://pkg.go.dev/filippo.io/edwards25519).
-
-The code is originally derived from Adam Langley's internal implementation in the Go standard library, and includes George Tankersley's [performance improvements](https://golang.org/cl/71950). It was then further developed by Henry de Valence for use in ristretto255, and was finally [merged back into the Go standard library](https://golang.org/cl/276272) as of Go 1.17. It now tracks the upstream codebase and extends it with additional functionality.
-
-Most users don't need this package, and should instead use `crypto/ed25519` for signatures, `golang.org/x/crypto/curve25519` for Diffie-Hellman, or `github.com/gtank/ristretto255` for prime order group logic. However, for anyone currently using a fork of `crypto/internal/edwards25519`/`crypto/ed25519/internal/edwards25519` or `github.com/agl/edwards25519`, this package should be a safer, faster, and more powerful alternative.
-
-Since this package is meant to curb proliferation of edwards25519 implementations in the Go ecosystem, it welcomes requests for new APIs or reviewable performance improvements.
diff --git a/vendor/filippo.io/edwards25519/doc.go b/vendor/filippo.io/edwards25519/doc.go
deleted file mode 100644
index ab6aaebc..00000000
--- a/vendor/filippo.io/edwards25519/doc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright (c) 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package edwards25519 implements group logic for the twisted Edwards curve
-//
-// -x^2 + y^2 = 1 + -(121665/121666)*x^2*y^2
-//
-// This is better known as the Edwards curve equivalent to Curve25519, and is
-// the curve used by the Ed25519 signature scheme.
-//
-// Most users don't need this package, and should instead use crypto/ed25519 for
-// signatures, golang.org/x/crypto/curve25519 for Diffie-Hellman, or
-// github.com/gtank/ristretto255 for prime order group logic.
-//
-// However, developers who do need to interact with low-level edwards25519
-// operations can use this package, which is an extended version of
-// crypto/internal/edwards25519 from the standard library repackaged as
-// an importable module.
-package edwards25519
diff --git a/vendor/filippo.io/edwards25519/edwards25519.go b/vendor/filippo.io/edwards25519/edwards25519.go
deleted file mode 100644
index a744da2c..00000000
--- a/vendor/filippo.io/edwards25519/edwards25519.go
+++ /dev/null
@@ -1,427 +0,0 @@
-// Copyright (c) 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package edwards25519
-
-import (
- "errors"
-
- "filippo.io/edwards25519/field"
-)
-
-// Point types.
-
-type projP1xP1 struct {
- X, Y, Z, T field.Element
-}
-
-type projP2 struct {
- X, Y, Z field.Element
-}
-
-// Point represents a point on the edwards25519 curve.
-//
-// This type works similarly to math/big.Int, and all arguments and receivers
-// are allowed to alias.
-//
-// The zero value is NOT valid, and it may be used only as a receiver.
-type Point struct {
- // Make the type not comparable (i.e. used with == or as a map key), as
- // equivalent points can be represented by different Go values.
- _ incomparable
-
- // The point is internally represented in extended coordinates (X, Y, Z, T)
- // where x = X/Z, y = Y/Z, and xy = T/Z per https://eprint.iacr.org/2008/522.
- x, y, z, t field.Element
-}
-
-type incomparable [0]func()
-
-func checkInitialized(points ...*Point) {
- for _, p := range points {
- if p.x == (field.Element{}) && p.y == (field.Element{}) {
- panic("edwards25519: use of uninitialized Point")
- }
- }
-}
-
-type projCached struct {
- YplusX, YminusX, Z, T2d field.Element
-}
-
-type affineCached struct {
- YplusX, YminusX, T2d field.Element
-}
-
-// Constructors.
-
-func (v *projP2) Zero() *projP2 {
- v.X.Zero()
- v.Y.One()
- v.Z.One()
- return v
-}
-
-// identity is the point at infinity.
-var identity, _ = new(Point).SetBytes([]byte{
- 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
-
-// NewIdentityPoint returns a new Point set to the identity.
-func NewIdentityPoint() *Point {
- return new(Point).Set(identity)
-}
-
-// generator is the canonical curve basepoint. See TestGenerator for the
-// correspondence of this encoding with the values in RFC 8032.
-var generator, _ = new(Point).SetBytes([]byte{
- 0x58, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
- 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
- 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
- 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66})
-
-// NewGeneratorPoint returns a new Point set to the canonical generator.
-func NewGeneratorPoint() *Point {
- return new(Point).Set(generator)
-}
-
-func (v *projCached) Zero() *projCached {
- v.YplusX.One()
- v.YminusX.One()
- v.Z.One()
- v.T2d.Zero()
- return v
-}
-
-func (v *affineCached) Zero() *affineCached {
- v.YplusX.One()
- v.YminusX.One()
- v.T2d.Zero()
- return v
-}
-
-// Assignments.
-
-// Set sets v = u, and returns v.
-func (v *Point) Set(u *Point) *Point {
- *v = *u
- return v
-}
-
-// Encoding.
-
-// Bytes returns the canonical 32-byte encoding of v, according to RFC 8032,
-// Section 5.1.2.
-func (v *Point) Bytes() []byte {
- // This function is outlined to make the allocations inline in the caller
- // rather than happen on the heap.
- var buf [32]byte
- return v.bytes(&buf)
-}
-
-func (v *Point) bytes(buf *[32]byte) []byte {
- checkInitialized(v)
-
- var zInv, x, y field.Element
- zInv.Invert(&v.z) // zInv = 1 / Z
- x.Multiply(&v.x, &zInv) // x = X / Z
- y.Multiply(&v.y, &zInv) // y = Y / Z
-
- out := copyFieldElement(buf, &y)
- out[31] |= byte(x.IsNegative() << 7)
- return out
-}
-
-var feOne = new(field.Element).One()
-
-// SetBytes sets v = x, where x is a 32-byte encoding of v. If x does not
-// represent a valid point on the curve, SetBytes returns nil and an error and
-// the receiver is unchanged. Otherwise, SetBytes returns v.
-//
-// Note that SetBytes accepts all non-canonical encodings of valid points.
-// That is, it follows decoding rules that match most implementations in
-// the ecosystem rather than RFC 8032.
-func (v *Point) SetBytes(x []byte) (*Point, error) {
- // Specifically, the non-canonical encodings that are accepted are
- // 1) the ones where the field element is not reduced (see the
- // (*field.Element).SetBytes docs) and
- // 2) the ones where the x-coordinate is zero and the sign bit is set.
- //
- // Read more at https://hdevalence.ca/blog/2020-10-04-its-25519am,
- // specifically the "Canonical A, R" section.
-
- y, err := new(field.Element).SetBytes(x)
- if err != nil {
- return nil, errors.New("edwards25519: invalid point encoding length")
- }
-
- // -x² + y² = 1 + dx²y²
- // x² + dx²y² = x²(dy² + 1) = y² - 1
- // x² = (y² - 1) / (dy² + 1)
-
- // u = y² - 1
- y2 := new(field.Element).Square(y)
- u := new(field.Element).Subtract(y2, feOne)
-
- // v = dy² + 1
- vv := new(field.Element).Multiply(y2, d)
- vv = vv.Add(vv, feOne)
-
- // x = +√(u/v)
- xx, wasSquare := new(field.Element).SqrtRatio(u, vv)
- if wasSquare == 0 {
- return nil, errors.New("edwards25519: invalid point encoding")
- }
-
- // Select the negative square root if the sign bit is set.
- xxNeg := new(field.Element).Negate(xx)
- xx = xx.Select(xxNeg, xx, int(x[31]>>7))
-
- v.x.Set(xx)
- v.y.Set(y)
- v.z.One()
- v.t.Multiply(xx, y) // xy = T / Z
-
- return v, nil
-}
-
-func copyFieldElement(buf *[32]byte, v *field.Element) []byte {
- copy(buf[:], v.Bytes())
- return buf[:]
-}
-
-// Conversions.
-
-func (v *projP2) FromP1xP1(p *projP1xP1) *projP2 {
- v.X.Multiply(&p.X, &p.T)
- v.Y.Multiply(&p.Y, &p.Z)
- v.Z.Multiply(&p.Z, &p.T)
- return v
-}
-
-func (v *projP2) FromP3(p *Point) *projP2 {
- v.X.Set(&p.x)
- v.Y.Set(&p.y)
- v.Z.Set(&p.z)
- return v
-}
-
-func (v *Point) fromP1xP1(p *projP1xP1) *Point {
- v.x.Multiply(&p.X, &p.T)
- v.y.Multiply(&p.Y, &p.Z)
- v.z.Multiply(&p.Z, &p.T)
- v.t.Multiply(&p.X, &p.Y)
- return v
-}
-
-func (v *Point) fromP2(p *projP2) *Point {
- v.x.Multiply(&p.X, &p.Z)
- v.y.Multiply(&p.Y, &p.Z)
- v.z.Square(&p.Z)
- v.t.Multiply(&p.X, &p.Y)
- return v
-}
-
-// d is a constant in the curve equation.
-var d, _ = new(field.Element).SetBytes([]byte{
- 0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75,
- 0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00,
- 0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c,
- 0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52})
-var d2 = new(field.Element).Add(d, d)
-
-func (v *projCached) FromP3(p *Point) *projCached {
- v.YplusX.Add(&p.y, &p.x)
- v.YminusX.Subtract(&p.y, &p.x)
- v.Z.Set(&p.z)
- v.T2d.Multiply(&p.t, d2)
- return v
-}
-
-func (v *affineCached) FromP3(p *Point) *affineCached {
- v.YplusX.Add(&p.y, &p.x)
- v.YminusX.Subtract(&p.y, &p.x)
- v.T2d.Multiply(&p.t, d2)
-
- var invZ field.Element
- invZ.Invert(&p.z)
- v.YplusX.Multiply(&v.YplusX, &invZ)
- v.YminusX.Multiply(&v.YminusX, &invZ)
- v.T2d.Multiply(&v.T2d, &invZ)
- return v
-}
-
-// (Re)addition and subtraction.
-
-// Add sets v = p + q, and returns v.
-func (v *Point) Add(p, q *Point) *Point {
- checkInitialized(p, q)
- qCached := new(projCached).FromP3(q)
- result := new(projP1xP1).Add(p, qCached)
- return v.fromP1xP1(result)
-}
-
-// Subtract sets v = p - q, and returns v.
-func (v *Point) Subtract(p, q *Point) *Point {
- checkInitialized(p, q)
- qCached := new(projCached).FromP3(q)
- result := new(projP1xP1).Sub(p, qCached)
- return v.fromP1xP1(result)
-}
-
-func (v *projP1xP1) Add(p *Point, q *projCached) *projP1xP1 {
- var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
-
- YplusX.Add(&p.y, &p.x)
- YminusX.Subtract(&p.y, &p.x)
-
- PP.Multiply(&YplusX, &q.YplusX)
- MM.Multiply(&YminusX, &q.YminusX)
- TT2d.Multiply(&p.t, &q.T2d)
- ZZ2.Multiply(&p.z, &q.Z)
-
- ZZ2.Add(&ZZ2, &ZZ2)
-
- v.X.Subtract(&PP, &MM)
- v.Y.Add(&PP, &MM)
- v.Z.Add(&ZZ2, &TT2d)
- v.T.Subtract(&ZZ2, &TT2d)
- return v
-}
-
-func (v *projP1xP1) Sub(p *Point, q *projCached) *projP1xP1 {
- var YplusX, YminusX, PP, MM, TT2d, ZZ2 field.Element
-
- YplusX.Add(&p.y, &p.x)
- YminusX.Subtract(&p.y, &p.x)
-
- PP.Multiply(&YplusX, &q.YminusX) // flipped sign
- MM.Multiply(&YminusX, &q.YplusX) // flipped sign
- TT2d.Multiply(&p.t, &q.T2d)
- ZZ2.Multiply(&p.z, &q.Z)
-
- ZZ2.Add(&ZZ2, &ZZ2)
-
- v.X.Subtract(&PP, &MM)
- v.Y.Add(&PP, &MM)
- v.Z.Subtract(&ZZ2, &TT2d) // flipped sign
- v.T.Add(&ZZ2, &TT2d) // flipped sign
- return v
-}
-
-func (v *projP1xP1) AddAffine(p *Point, q *affineCached) *projP1xP1 {
- var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
-
- YplusX.Add(&p.y, &p.x)
- YminusX.Subtract(&p.y, &p.x)
-
- PP.Multiply(&YplusX, &q.YplusX)
- MM.Multiply(&YminusX, &q.YminusX)
- TT2d.Multiply(&p.t, &q.T2d)
-
- Z2.Add(&p.z, &p.z)
-
- v.X.Subtract(&PP, &MM)
- v.Y.Add(&PP, &MM)
- v.Z.Add(&Z2, &TT2d)
- v.T.Subtract(&Z2, &TT2d)
- return v
-}
-
-func (v *projP1xP1) SubAffine(p *Point, q *affineCached) *projP1xP1 {
- var YplusX, YminusX, PP, MM, TT2d, Z2 field.Element
-
- YplusX.Add(&p.y, &p.x)
- YminusX.Subtract(&p.y, &p.x)
-
- PP.Multiply(&YplusX, &q.YminusX) // flipped sign
- MM.Multiply(&YminusX, &q.YplusX) // flipped sign
- TT2d.Multiply(&p.t, &q.T2d)
-
- Z2.Add(&p.z, &p.z)
-
- v.X.Subtract(&PP, &MM)
- v.Y.Add(&PP, &MM)
- v.Z.Subtract(&Z2, &TT2d) // flipped sign
- v.T.Add(&Z2, &TT2d) // flipped sign
- return v
-}
-
-// Doubling.
-
-func (v *projP1xP1) Double(p *projP2) *projP1xP1 {
- var XX, YY, ZZ2, XplusYsq field.Element
-
- XX.Square(&p.X)
- YY.Square(&p.Y)
- ZZ2.Square(&p.Z)
- ZZ2.Add(&ZZ2, &ZZ2)
- XplusYsq.Add(&p.X, &p.Y)
- XplusYsq.Square(&XplusYsq)
-
- v.Y.Add(&YY, &XX)
- v.Z.Subtract(&YY, &XX)
-
- v.X.Subtract(&XplusYsq, &v.Y)
- v.T.Subtract(&ZZ2, &v.Z)
- return v
-}
-
-// Negation.
-
-// Negate sets v = -p, and returns v.
-func (v *Point) Negate(p *Point) *Point {
- checkInitialized(p)
- v.x.Negate(&p.x)
- v.y.Set(&p.y)
- v.z.Set(&p.z)
- v.t.Negate(&p.t)
- return v
-}
-
-// Equal returns 1 if v is equivalent to u, and 0 otherwise.
-func (v *Point) Equal(u *Point) int {
- checkInitialized(v, u)
-
- var t1, t2, t3, t4 field.Element
- t1.Multiply(&v.x, &u.z)
- t2.Multiply(&u.x, &v.z)
- t3.Multiply(&v.y, &u.z)
- t4.Multiply(&u.y, &v.z)
-
- return t1.Equal(&t2) & t3.Equal(&t4)
-}
-
-// Constant-time operations
-
-// Select sets v to a if cond == 1 and to b if cond == 0.
-func (v *projCached) Select(a, b *projCached, cond int) *projCached {
- v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
- v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
- v.Z.Select(&a.Z, &b.Z, cond)
- v.T2d.Select(&a.T2d, &b.T2d, cond)
- return v
-}
-
-// Select sets v to a if cond == 1 and to b if cond == 0.
-func (v *affineCached) Select(a, b *affineCached, cond int) *affineCached {
- v.YplusX.Select(&a.YplusX, &b.YplusX, cond)
- v.YminusX.Select(&a.YminusX, &b.YminusX, cond)
- v.T2d.Select(&a.T2d, &b.T2d, cond)
- return v
-}
-
-// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
-func (v *projCached) CondNeg(cond int) *projCached {
- v.YplusX.Swap(&v.YminusX, cond)
- v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
- return v
-}
-
-// CondNeg negates v if cond == 1 and leaves it unchanged if cond == 0.
-func (v *affineCached) CondNeg(cond int) *affineCached {
- v.YplusX.Swap(&v.YminusX, cond)
- v.T2d.Select(new(field.Element).Negate(&v.T2d), &v.T2d, cond)
- return v
-}
diff --git a/vendor/filippo.io/edwards25519/extra.go b/vendor/filippo.io/edwards25519/extra.go
deleted file mode 100644
index d152d68f..00000000
--- a/vendor/filippo.io/edwards25519/extra.go
+++ /dev/null
@@ -1,349 +0,0 @@
-// Copyright (c) 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package edwards25519
-
-// This file contains additional functionality that is not included in the
-// upstream crypto/internal/edwards25519 package.
-
-import (
- "errors"
-
- "filippo.io/edwards25519/field"
-)
-
-// ExtendedCoordinates returns v in extended coordinates (X:Y:Z:T) where
-// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522.
-func (v *Point) ExtendedCoordinates() (X, Y, Z, T *field.Element) {
- // This function is outlined to make the allocations inline in the caller
- // rather than happen on the heap. Don't change the style without making
- // sure it doesn't increase the inliner cost.
- var e [4]field.Element
- X, Y, Z, T = v.extendedCoordinates(&e)
- return
-}
-
-func (v *Point) extendedCoordinates(e *[4]field.Element) (X, Y, Z, T *field.Element) {
- checkInitialized(v)
- X = e[0].Set(&v.x)
- Y = e[1].Set(&v.y)
- Z = e[2].Set(&v.z)
- T = e[3].Set(&v.t)
- return
-}
-
-// SetExtendedCoordinates sets v = (X:Y:Z:T) in extended coordinates where
-// x = X/Z, y = Y/Z, and xy = T/Z as in https://eprint.iacr.org/2008/522.
-//
-// If the coordinates are invalid or don't represent a valid point on the curve,
-// SetExtendedCoordinates returns nil and an error and the receiver is
-// unchanged. Otherwise, SetExtendedCoordinates returns v.
-func (v *Point) SetExtendedCoordinates(X, Y, Z, T *field.Element) (*Point, error) {
- if !isOnCurve(X, Y, Z, T) {
- return nil, errors.New("edwards25519: invalid point coordinates")
- }
- v.x.Set(X)
- v.y.Set(Y)
- v.z.Set(Z)
- v.t.Set(T)
- return v, nil
-}
-
-func isOnCurve(X, Y, Z, T *field.Element) bool {
- var lhs, rhs field.Element
- XX := new(field.Element).Square(X)
- YY := new(field.Element).Square(Y)
- ZZ := new(field.Element).Square(Z)
- TT := new(field.Element).Square(T)
- // -x² + y² = 1 + dx²y²
- // -(X/Z)² + (Y/Z)² = 1 + d(T/Z)²
- // -X² + Y² = Z² + dT²
- lhs.Subtract(YY, XX)
- rhs.Multiply(d, TT).Add(&rhs, ZZ)
- if lhs.Equal(&rhs) != 1 {
- return false
- }
- // xy = T/Z
- // XY/Z² = T/Z
- // XY = TZ
- lhs.Multiply(X, Y)
- rhs.Multiply(T, Z)
- return lhs.Equal(&rhs) == 1
-}
-
-// BytesMontgomery converts v to a point on the birationally-equivalent
-// Curve25519 Montgomery curve, and returns its canonical 32 bytes encoding
-// according to RFC 7748.
-//
-// Note that BytesMontgomery only encodes the u-coordinate, so v and -v encode
-// to the same value. If v is the identity point, BytesMontgomery returns 32
-// zero bytes, analogously to the X25519 function.
-//
-// The lack of an inverse operation (such as SetMontgomeryBytes) is deliberate:
-// while every valid edwards25519 point has a unique u-coordinate Montgomery
-// encoding, X25519 accepts inputs on the quadratic twist, which don't correspond
-// to any edwards25519 point, and every other X25519 input corresponds to two
-// edwards25519 points.
-func (v *Point) BytesMontgomery() []byte {
- // This function is outlined to make the allocations inline in the caller
- // rather than happen on the heap.
- var buf [32]byte
- return v.bytesMontgomery(&buf)
-}
-
-func (v *Point) bytesMontgomery(buf *[32]byte) []byte {
- checkInitialized(v)
-
- // RFC 7748, Section 4.1 provides the bilinear map to calculate the
- // Montgomery u-coordinate
- //
- // u = (1 + y) / (1 - y)
- //
- // where y = Y / Z.
-
- var y, recip, u field.Element
-
- y.Multiply(&v.y, y.Invert(&v.z)) // y = Y / Z
- recip.Invert(recip.Subtract(feOne, &y)) // r = 1/(1 - y)
- u.Multiply(u.Add(feOne, &y), &recip) // u = (1 + y)*r
-
- return copyFieldElement(buf, &u)
-}
-
-// MultByCofactor sets v = 8 * p, and returns v.
-func (v *Point) MultByCofactor(p *Point) *Point {
- checkInitialized(p)
- result := projP1xP1{}
- pp := (&projP2{}).FromP3(p)
- result.Double(pp)
- pp.FromP1xP1(&result)
- result.Double(pp)
- pp.FromP1xP1(&result)
- result.Double(pp)
- return v.fromP1xP1(&result)
-}
-
-// Given k > 0, set s = s**(2*i).
-func (s *Scalar) pow2k(k int) {
- for i := 0; i < k; i++ {
- s.Multiply(s, s)
- }
-}
-
-// Invert sets s to the inverse of a nonzero scalar v, and returns s.
-//
-// If t is zero, Invert returns zero.
-func (s *Scalar) Invert(t *Scalar) *Scalar {
- // Uses a hardcoded sliding window of width 4.
- var table [8]Scalar
- var tt Scalar
- tt.Multiply(t, t)
- table[0] = *t
- for i := 0; i < 7; i++ {
- table[i+1].Multiply(&table[i], &tt)
- }
- // Now table = [t**1, t**3, t**5, t**7, t**9, t**11, t**13, t**15]
- // so t**k = t[k/2] for odd k
-
- // To compute the sliding window digits, use the following Sage script:
-
- // sage: import itertools
- // sage: def sliding_window(w,k):
- // ....: digits = []
- // ....: while k > 0:
- // ....: if k % 2 == 1:
- // ....: kmod = k % (2**w)
- // ....: digits.append(kmod)
- // ....: k = k - kmod
- // ....: else:
- // ....: digits.append(0)
- // ....: k = k // 2
- // ....: return digits
-
- // Now we can compute s roughly as follows:
-
- // sage: s = 1
- // sage: for coeff in reversed(sliding_window(4,l-2)):
- // ....: s = s*s
- // ....: if coeff > 0 :
- // ....: s = s*t**coeff
-
- // This works on one bit at a time, with many runs of zeros.
- // The digits can be collapsed into [(count, coeff)] as follows:
-
- // sage: [(len(list(group)),d) for d,group in itertools.groupby(sliding_window(4,l-2))]
-
- // Entries of the form (k, 0) turn into pow2k(k)
- // Entries of the form (1, coeff) turn into a squaring and then a table lookup.
- // We can fold the squaring into the previous pow2k(k) as pow2k(k+1).
-
- *s = table[1/2]
- s.pow2k(127 + 1)
- s.Multiply(s, &table[1/2])
- s.pow2k(4 + 1)
- s.Multiply(s, &table[9/2])
- s.pow2k(3 + 1)
- s.Multiply(s, &table[11/2])
- s.pow2k(3 + 1)
- s.Multiply(s, &table[13/2])
- s.pow2k(3 + 1)
- s.Multiply(s, &table[15/2])
- s.pow2k(4 + 1)
- s.Multiply(s, &table[7/2])
- s.pow2k(4 + 1)
- s.Multiply(s, &table[15/2])
- s.pow2k(3 + 1)
- s.Multiply(s, &table[5/2])
- s.pow2k(3 + 1)
- s.Multiply(s, &table[1/2])
- s.pow2k(4 + 1)
- s.Multiply(s, &table[15/2])
- s.pow2k(4 + 1)
- s.Multiply(s, &table[15/2])
- s.pow2k(4 + 1)
- s.Multiply(s, &table[7/2])
- s.pow2k(3 + 1)
- s.Multiply(s, &table[3/2])
- s.pow2k(4 + 1)
- s.Multiply(s, &table[11/2])
- s.pow2k(5 + 1)
- s.Multiply(s, &table[11/2])
- s.pow2k(9 + 1)
- s.Multiply(s, &table[9/2])
- s.pow2k(3 + 1)
- s.Multiply(s, &table[3/2])
- s.pow2k(4 + 1)
- s.Multiply(s, &table[3/2])
- s.pow2k(4 + 1)
- s.Multiply(s, &table[3/2])
- s.pow2k(4 + 1)
- s.Multiply(s, &table[9/2])
- s.pow2k(3 + 1)
- s.Multiply(s, &table[7/2])
- s.pow2k(3 + 1)
- s.Multiply(s, &table[3/2])
- s.pow2k(3 + 1)
- s.Multiply(s, &table[13/2])
- s.pow2k(3 + 1)
- s.Multiply(s, &table[7/2])
- s.pow2k(4 + 1)
- s.Multiply(s, &table[9/2])
- s.pow2k(3 + 1)
- s.Multiply(s, &table[15/2])
- s.pow2k(4 + 1)
- s.Multiply(s, &table[11/2])
-
- return s
-}
-
-// MultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v.
-//
-// Execution time depends only on the lengths of the two slices, which must match.
-func (v *Point) MultiScalarMult(scalars []*Scalar, points []*Point) *Point {
- if len(scalars) != len(points) {
- panic("edwards25519: called MultiScalarMult with different size inputs")
- }
- checkInitialized(points...)
-
- // Proceed as in the single-base case, but share doublings
- // between each point in the multiscalar equation.
-
- // Build lookup tables for each point
- tables := make([]projLookupTable, len(points))
- for i := range tables {
- tables[i].FromP3(points[i])
- }
- // Compute signed radix-16 digits for each scalar
- digits := make([][64]int8, len(scalars))
- for i := range digits {
- digits[i] = scalars[i].signedRadix16()
- }
-
- // Unwrap first loop iteration to save computing 16*identity
- multiple := &projCached{}
- tmp1 := &projP1xP1{}
- tmp2 := &projP2{}
- // Lookup-and-add the appropriate multiple of each input point
- for j := range tables {
- tables[j].SelectInto(multiple, digits[j][63])
- tmp1.Add(v, multiple) // tmp1 = v + x_(j,63)*Q in P1xP1 coords
- v.fromP1xP1(tmp1) // update v
- }
- tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration
- for i := 62; i >= 0; i-- {
- tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords
- tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords
- tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords
- tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords
- tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords
- tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords
- tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords
- v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords
- // Lookup-and-add the appropriate multiple of each input point
- for j := range tables {
- tables[j].SelectInto(multiple, digits[j][i])
- tmp1.Add(v, multiple) // tmp1 = v + x_(j,i)*Q in P1xP1 coords
- v.fromP1xP1(tmp1) // update v
- }
- tmp2.FromP3(v) // set up tmp2 = v in P2 coords for next iteration
- }
- return v
-}
-
-// VarTimeMultiScalarMult sets v = sum(scalars[i] * points[i]), and returns v.
-//
-// Execution time depends on the inputs.
-func (v *Point) VarTimeMultiScalarMult(scalars []*Scalar, points []*Point) *Point {
- if len(scalars) != len(points) {
- panic("edwards25519: called VarTimeMultiScalarMult with different size inputs")
- }
- checkInitialized(points...)
-
- // Generalize double-base NAF computation to arbitrary sizes.
- // Here all the points are dynamic, so we only use the smaller
- // tables.
-
- // Build lookup tables for each point
- tables := make([]nafLookupTable5, len(points))
- for i := range tables {
- tables[i].FromP3(points[i])
- }
- // Compute a NAF for each scalar
- nafs := make([][256]int8, len(scalars))
- for i := range nafs {
- nafs[i] = scalars[i].nonAdjacentForm(5)
- }
-
- multiple := &projCached{}
- tmp1 := &projP1xP1{}
- tmp2 := &projP2{}
- tmp2.Zero()
-
- // Move from high to low bits, doubling the accumulator
- // at each iteration and checking whether there is a nonzero
- // coefficient to look up a multiple of.
- //
- // Skip trying to find the first nonzero coefficent, because
- // searching might be more work than a few extra doublings.
- for i := 255; i >= 0; i-- {
- tmp1.Double(tmp2)
-
- for j := range nafs {
- if nafs[j][i] > 0 {
- v.fromP1xP1(tmp1)
- tables[j].SelectInto(multiple, nafs[j][i])
- tmp1.Add(v, multiple)
- } else if nafs[j][i] < 0 {
- v.fromP1xP1(tmp1)
- tables[j].SelectInto(multiple, -nafs[j][i])
- tmp1.Sub(v, multiple)
- }
- }
-
- tmp2.FromP1xP1(tmp1)
- }
-
- v.fromP2(tmp2)
- return v
-}
diff --git a/vendor/filippo.io/edwards25519/field/fe.go b/vendor/filippo.io/edwards25519/field/fe.go
deleted file mode 100644
index 5518ef2b..00000000
--- a/vendor/filippo.io/edwards25519/field/fe.go
+++ /dev/null
@@ -1,420 +0,0 @@
-// Copyright (c) 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package field implements fast arithmetic modulo 2^255-19.
-package field
-
-import (
- "crypto/subtle"
- "encoding/binary"
- "errors"
- "math/bits"
-)
-
-// Element represents an element of the field GF(2^255-19). Note that this
-// is not a cryptographically secure group, and should only be used to interact
-// with edwards25519.Point coordinates.
-//
-// This type works similarly to math/big.Int, and all arguments and receivers
-// are allowed to alias.
-//
-// The zero value is a valid zero element.
-type Element struct {
- // An element t represents the integer
- // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204
- //
- // Between operations, all limbs are expected to be lower than 2^52.
- l0 uint64
- l1 uint64
- l2 uint64
- l3 uint64
- l4 uint64
-}
-
-const maskLow51Bits uint64 = (1 << 51) - 1
-
-var feZero = &Element{0, 0, 0, 0, 0}
-
-// Zero sets v = 0, and returns v.
-func (v *Element) Zero() *Element {
- *v = *feZero
- return v
-}
-
-var feOne = &Element{1, 0, 0, 0, 0}
-
-// One sets v = 1, and returns v.
-func (v *Element) One() *Element {
- *v = *feOne
- return v
-}
-
-// reduce reduces v modulo 2^255 - 19 and returns it.
-func (v *Element) reduce() *Element {
- v.carryPropagate()
-
- // After the light reduction we now have a field element representation
- // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19.
-
- // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1,
- // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise.
- c := (v.l0 + 19) >> 51
- c = (v.l1 + c) >> 51
- c = (v.l2 + c) >> 51
- c = (v.l3 + c) >> 51
- c = (v.l4 + c) >> 51
-
- // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's
- // effectively applying the reduction identity to the carry.
- v.l0 += 19 * c
-
- v.l1 += v.l0 >> 51
- v.l0 = v.l0 & maskLow51Bits
- v.l2 += v.l1 >> 51
- v.l1 = v.l1 & maskLow51Bits
- v.l3 += v.l2 >> 51
- v.l2 = v.l2 & maskLow51Bits
- v.l4 += v.l3 >> 51
- v.l3 = v.l3 & maskLow51Bits
- // no additional carry
- v.l4 = v.l4 & maskLow51Bits
-
- return v
-}
-
-// Add sets v = a + b, and returns v.
-func (v *Element) Add(a, b *Element) *Element {
- v.l0 = a.l0 + b.l0
- v.l1 = a.l1 + b.l1
- v.l2 = a.l2 + b.l2
- v.l3 = a.l3 + b.l3
- v.l4 = a.l4 + b.l4
- // Using the generic implementation here is actually faster than the
- // assembly. Probably because the body of this function is so simple that
- // the compiler can figure out better optimizations by inlining the carry
- // propagation.
- return v.carryPropagateGeneric()
-}
-
-// Subtract sets v = a - b, and returns v.
-func (v *Element) Subtract(a, b *Element) *Element {
- // We first add 2 * p, to guarantee the subtraction won't underflow, and
- // then subtract b (which can be up to 2^255 + 2^13 * 19).
- v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0
- v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1
- v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2
- v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3
- v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4
- return v.carryPropagate()
-}
-
-// Negate sets v = -a, and returns v.
-func (v *Element) Negate(a *Element) *Element {
- return v.Subtract(feZero, a)
-}
-
-// Invert sets v = 1/z mod p, and returns v.
-//
-// If z == 0, Invert returns v = 0.
-func (v *Element) Invert(z *Element) *Element {
- // Inversion is implemented as exponentiation with exponent p − 2. It uses the
- // same sequence of 255 squarings and 11 multiplications as [Curve25519].
- var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element
-
- z2.Square(z) // 2
- t.Square(&z2) // 4
- t.Square(&t) // 8
- z9.Multiply(&t, z) // 9
- z11.Multiply(&z9, &z2) // 11
- t.Square(&z11) // 22
- z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0
-
- t.Square(&z2_5_0) // 2^6 - 2^1
- for i := 0; i < 4; i++ {
- t.Square(&t) // 2^10 - 2^5
- }
- z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0
-
- t.Square(&z2_10_0) // 2^11 - 2^1
- for i := 0; i < 9; i++ {
- t.Square(&t) // 2^20 - 2^10
- }
- z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0
-
- t.Square(&z2_20_0) // 2^21 - 2^1
- for i := 0; i < 19; i++ {
- t.Square(&t) // 2^40 - 2^20
- }
- t.Multiply(&t, &z2_20_0) // 2^40 - 2^0
-
- t.Square(&t) // 2^41 - 2^1
- for i := 0; i < 9; i++ {
- t.Square(&t) // 2^50 - 2^10
- }
- z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0
-
- t.Square(&z2_50_0) // 2^51 - 2^1
- for i := 0; i < 49; i++ {
- t.Square(&t) // 2^100 - 2^50
- }
- z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0
-
- t.Square(&z2_100_0) // 2^101 - 2^1
- for i := 0; i < 99; i++ {
- t.Square(&t) // 2^200 - 2^100
- }
- t.Multiply(&t, &z2_100_0) // 2^200 - 2^0
-
- t.Square(&t) // 2^201 - 2^1
- for i := 0; i < 49; i++ {
- t.Square(&t) // 2^250 - 2^50
- }
- t.Multiply(&t, &z2_50_0) // 2^250 - 2^0
-
- t.Square(&t) // 2^251 - 2^1
- t.Square(&t) // 2^252 - 2^2
- t.Square(&t) // 2^253 - 2^3
- t.Square(&t) // 2^254 - 2^4
- t.Square(&t) // 2^255 - 2^5
-
- return v.Multiply(&t, &z11) // 2^255 - 21
-}
-
-// Set sets v = a, and returns v.
-func (v *Element) Set(a *Element) *Element {
- *v = *a
- return v
-}
-
-// SetBytes sets v to x, where x is a 32-byte little-endian encoding. If x is
-// not of the right length, SetBytes returns nil and an error, and the
-// receiver is unchanged.
-//
-// Consistent with RFC 7748, the most significant bit (the high bit of the
-// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1)
-// are accepted. Note that this is laxer than specified by RFC 8032, but
-// consistent with most Ed25519 implementations.
-func (v *Element) SetBytes(x []byte) (*Element, error) {
- if len(x) != 32 {
- return nil, errors.New("edwards25519: invalid field element input size")
- }
-
- // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51).
- v.l0 = binary.LittleEndian.Uint64(x[0:8])
- v.l0 &= maskLow51Bits
- // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51).
- v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3
- v.l1 &= maskLow51Bits
- // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51).
- v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6
- v.l2 &= maskLow51Bits
- // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51).
- v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1
- v.l3 &= maskLow51Bits
- // Bits 204:255 (bytes 24:32, bits 192:256, shift 12, mask 51).
- // Note: not bytes 25:33, shift 4, to avoid overread.
- v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12
- v.l4 &= maskLow51Bits
-
- return v, nil
-}
-
-// Bytes returns the canonical 32-byte little-endian encoding of v.
-func (v *Element) Bytes() []byte {
- // This function is outlined to make the allocations inline in the caller
- // rather than happen on the heap.
- var out [32]byte
- return v.bytes(&out)
-}
-
-func (v *Element) bytes(out *[32]byte) []byte {
- t := *v
- t.reduce()
-
- var buf [8]byte
- for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} {
- bitsOffset := i * 51
- binary.LittleEndian.PutUint64(buf[:], l<= len(out) {
- break
- }
- out[off] |= bb
- }
- }
-
- return out[:]
-}
-
-// Equal returns 1 if v and u are equal, and 0 otherwise.
-func (v *Element) Equal(u *Element) int {
- sa, sv := u.Bytes(), v.Bytes()
- return subtle.ConstantTimeCompare(sa, sv)
-}
-
-// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise.
-func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) }
-
-// Select sets v to a if cond == 1, and to b if cond == 0.
-func (v *Element) Select(a, b *Element, cond int) *Element {
- m := mask64Bits(cond)
- v.l0 = (m & a.l0) | (^m & b.l0)
- v.l1 = (m & a.l1) | (^m & b.l1)
- v.l2 = (m & a.l2) | (^m & b.l2)
- v.l3 = (m & a.l3) | (^m & b.l3)
- v.l4 = (m & a.l4) | (^m & b.l4)
- return v
-}
-
-// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v.
-func (v *Element) Swap(u *Element, cond int) {
- m := mask64Bits(cond)
- t := m & (v.l0 ^ u.l0)
- v.l0 ^= t
- u.l0 ^= t
- t = m & (v.l1 ^ u.l1)
- v.l1 ^= t
- u.l1 ^= t
- t = m & (v.l2 ^ u.l2)
- v.l2 ^= t
- u.l2 ^= t
- t = m & (v.l3 ^ u.l3)
- v.l3 ^= t
- u.l3 ^= t
- t = m & (v.l4 ^ u.l4)
- v.l4 ^= t
- u.l4 ^= t
-}
-
-// IsNegative returns 1 if v is negative, and 0 otherwise.
-func (v *Element) IsNegative() int {
- return int(v.Bytes()[0] & 1)
-}
-
-// Absolute sets v to |u|, and returns v.
-func (v *Element) Absolute(u *Element) *Element {
- return v.Select(new(Element).Negate(u), u, u.IsNegative())
-}
-
-// Multiply sets v = x * y, and returns v.
-func (v *Element) Multiply(x, y *Element) *Element {
- feMul(v, x, y)
- return v
-}
-
-// Square sets v = x * x, and returns v.
-func (v *Element) Square(x *Element) *Element {
- feSquare(v, x)
- return v
-}
-
-// Mult32 sets v = x * y, and returns v.
-func (v *Element) Mult32(x *Element, y uint32) *Element {
- x0lo, x0hi := mul51(x.l0, y)
- x1lo, x1hi := mul51(x.l1, y)
- x2lo, x2hi := mul51(x.l2, y)
- x3lo, x3hi := mul51(x.l3, y)
- x4lo, x4hi := mul51(x.l4, y)
- v.l0 = x0lo + 19*x4hi // carried over per the reduction identity
- v.l1 = x1lo + x0hi
- v.l2 = x2lo + x1hi
- v.l3 = x3lo + x2hi
- v.l4 = x4lo + x3hi
- // The hi portions are going to be only 32 bits, plus any previous excess,
- // so we can skip the carry propagation.
- return v
-}
-
-// mul51 returns lo + hi * 2⁵¹ = a * b.
-func mul51(a uint64, b uint32) (lo uint64, hi uint64) {
- mh, ml := bits.Mul64(a, uint64(b))
- lo = ml & maskLow51Bits
- hi = (mh << 13) | (ml >> 51)
- return
-}
-
-// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3.
-func (v *Element) Pow22523(x *Element) *Element {
- var t0, t1, t2 Element
-
- t0.Square(x) // x^2
- t1.Square(&t0) // x^4
- t1.Square(&t1) // x^8
- t1.Multiply(x, &t1) // x^9
- t0.Multiply(&t0, &t1) // x^11
- t0.Square(&t0) // x^22
- t0.Multiply(&t1, &t0) // x^31
- t1.Square(&t0) // x^62
- for i := 1; i < 5; i++ { // x^992
- t1.Square(&t1)
- }
- t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1
- t1.Square(&t0) // 2^11 - 2
- for i := 1; i < 10; i++ { // 2^20 - 2^10
- t1.Square(&t1)
- }
- t1.Multiply(&t1, &t0) // 2^20 - 1
- t2.Square(&t1) // 2^21 - 2
- for i := 1; i < 20; i++ { // 2^40 - 2^20
- t2.Square(&t2)
- }
- t1.Multiply(&t2, &t1) // 2^40 - 1
- t1.Square(&t1) // 2^41 - 2
- for i := 1; i < 10; i++ { // 2^50 - 2^10
- t1.Square(&t1)
- }
- t0.Multiply(&t1, &t0) // 2^50 - 1
- t1.Square(&t0) // 2^51 - 2
- for i := 1; i < 50; i++ { // 2^100 - 2^50
- t1.Square(&t1)
- }
- t1.Multiply(&t1, &t0) // 2^100 - 1
- t2.Square(&t1) // 2^101 - 2
- for i := 1; i < 100; i++ { // 2^200 - 2^100
- t2.Square(&t2)
- }
- t1.Multiply(&t2, &t1) // 2^200 - 1
- t1.Square(&t1) // 2^201 - 2
- for i := 1; i < 50; i++ { // 2^250 - 2^50
- t1.Square(&t1)
- }
- t0.Multiply(&t1, &t0) // 2^250 - 1
- t0.Square(&t0) // 2^251 - 2
- t0.Square(&t0) // 2^252 - 4
- return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3)
-}
-
-// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion.
-var sqrtM1 = &Element{1718705420411056, 234908883556509,
- 2233514472574048, 2117202627021982, 765476049583133}
-
-// SqrtRatio sets r to the non-negative square root of the ratio of u and v.
-//
-// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio
-// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00,
-// and returns r and 0.
-func (r *Element) SqrtRatio(u, v *Element) (R *Element, wasSquare int) {
- t0 := new(Element)
-
- // r = (u * v3) * (u * v7)^((p-5)/8)
- v2 := new(Element).Square(v)
- uv3 := new(Element).Multiply(u, t0.Multiply(v2, v))
- uv7 := new(Element).Multiply(uv3, t0.Square(v2))
- rr := new(Element).Multiply(uv3, t0.Pow22523(uv7))
-
- check := new(Element).Multiply(v, t0.Square(rr)) // check = v * r^2
-
- uNeg := new(Element).Negate(u)
- correctSignSqrt := check.Equal(u)
- flippedSignSqrt := check.Equal(uNeg)
- flippedSignSqrtI := check.Equal(t0.Multiply(uNeg, sqrtM1))
-
- rPrime := new(Element).Multiply(rr, sqrtM1) // r_prime = SQRT_M1 * r
- // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r)
- rr.Select(rPrime, rr, flippedSignSqrt|flippedSignSqrtI)
-
- r.Absolute(rr) // Choose the nonnegative square root.
- return r, correctSignSqrt | flippedSignSqrt
-}
diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64.go b/vendor/filippo.io/edwards25519/field/fe_amd64.go
deleted file mode 100644
index edcf163c..00000000
--- a/vendor/filippo.io/edwards25519/field/fe_amd64.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
-
-//go:build amd64 && gc && !purego
-// +build amd64,gc,!purego
-
-package field
-
-// feMul sets out = a * b. It works like feMulGeneric.
-//
-//go:noescape
-func feMul(out *Element, a *Element, b *Element)
-
-// feSquare sets out = a * a. It works like feSquareGeneric.
-//
-//go:noescape
-func feSquare(out *Element, a *Element)
diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64.s b/vendor/filippo.io/edwards25519/field/fe_amd64.s
deleted file mode 100644
index 293f013c..00000000
--- a/vendor/filippo.io/edwards25519/field/fe_amd64.s
+++ /dev/null
@@ -1,379 +0,0 @@
-// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
-
-//go:build amd64 && gc && !purego
-// +build amd64,gc,!purego
-
-#include "textflag.h"
-
-// func feMul(out *Element, a *Element, b *Element)
-TEXT ·feMul(SB), NOSPLIT, $0-24
- MOVQ a+8(FP), CX
- MOVQ b+16(FP), BX
-
- // r0 = a0×b0
- MOVQ (CX), AX
- MULQ (BX)
- MOVQ AX, DI
- MOVQ DX, SI
-
- // r0 += 19×a1×b4
- MOVQ 8(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 32(BX)
- ADDQ AX, DI
- ADCQ DX, SI
-
- // r0 += 19×a2×b3
- MOVQ 16(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 24(BX)
- ADDQ AX, DI
- ADCQ DX, SI
-
- // r0 += 19×a3×b2
- MOVQ 24(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 16(BX)
- ADDQ AX, DI
- ADCQ DX, SI
-
- // r0 += 19×a4×b1
- MOVQ 32(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 8(BX)
- ADDQ AX, DI
- ADCQ DX, SI
-
- // r1 = a0×b1
- MOVQ (CX), AX
- MULQ 8(BX)
- MOVQ AX, R9
- MOVQ DX, R8
-
- // r1 += a1×b0
- MOVQ 8(CX), AX
- MULQ (BX)
- ADDQ AX, R9
- ADCQ DX, R8
-
- // r1 += 19×a2×b4
- MOVQ 16(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 32(BX)
- ADDQ AX, R9
- ADCQ DX, R8
-
- // r1 += 19×a3×b3
- MOVQ 24(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 24(BX)
- ADDQ AX, R9
- ADCQ DX, R8
-
- // r1 += 19×a4×b2
- MOVQ 32(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 16(BX)
- ADDQ AX, R9
- ADCQ DX, R8
-
- // r2 = a0×b2
- MOVQ (CX), AX
- MULQ 16(BX)
- MOVQ AX, R11
- MOVQ DX, R10
-
- // r2 += a1×b1
- MOVQ 8(CX), AX
- MULQ 8(BX)
- ADDQ AX, R11
- ADCQ DX, R10
-
- // r2 += a2×b0
- MOVQ 16(CX), AX
- MULQ (BX)
- ADDQ AX, R11
- ADCQ DX, R10
-
- // r2 += 19×a3×b4
- MOVQ 24(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 32(BX)
- ADDQ AX, R11
- ADCQ DX, R10
-
- // r2 += 19×a4×b3
- MOVQ 32(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 24(BX)
- ADDQ AX, R11
- ADCQ DX, R10
-
- // r3 = a0×b3
- MOVQ (CX), AX
- MULQ 24(BX)
- MOVQ AX, R13
- MOVQ DX, R12
-
- // r3 += a1×b2
- MOVQ 8(CX), AX
- MULQ 16(BX)
- ADDQ AX, R13
- ADCQ DX, R12
-
- // r3 += a2×b1
- MOVQ 16(CX), AX
- MULQ 8(BX)
- ADDQ AX, R13
- ADCQ DX, R12
-
- // r3 += a3×b0
- MOVQ 24(CX), AX
- MULQ (BX)
- ADDQ AX, R13
- ADCQ DX, R12
-
- // r3 += 19×a4×b4
- MOVQ 32(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 32(BX)
- ADDQ AX, R13
- ADCQ DX, R12
-
- // r4 = a0×b4
- MOVQ (CX), AX
- MULQ 32(BX)
- MOVQ AX, R15
- MOVQ DX, R14
-
- // r4 += a1×b3
- MOVQ 8(CX), AX
- MULQ 24(BX)
- ADDQ AX, R15
- ADCQ DX, R14
-
- // r4 += a2×b2
- MOVQ 16(CX), AX
- MULQ 16(BX)
- ADDQ AX, R15
- ADCQ DX, R14
-
- // r4 += a3×b1
- MOVQ 24(CX), AX
- MULQ 8(BX)
- ADDQ AX, R15
- ADCQ DX, R14
-
- // r4 += a4×b0
- MOVQ 32(CX), AX
- MULQ (BX)
- ADDQ AX, R15
- ADCQ DX, R14
-
- // First reduction chain
- MOVQ $0x0007ffffffffffff, AX
- SHLQ $0x0d, DI, SI
- SHLQ $0x0d, R9, R8
- SHLQ $0x0d, R11, R10
- SHLQ $0x0d, R13, R12
- SHLQ $0x0d, R15, R14
- ANDQ AX, DI
- IMUL3Q $0x13, R14, R14
- ADDQ R14, DI
- ANDQ AX, R9
- ADDQ SI, R9
- ANDQ AX, R11
- ADDQ R8, R11
- ANDQ AX, R13
- ADDQ R10, R13
- ANDQ AX, R15
- ADDQ R12, R15
-
- // Second reduction chain (carryPropagate)
- MOVQ DI, SI
- SHRQ $0x33, SI
- MOVQ R9, R8
- SHRQ $0x33, R8
- MOVQ R11, R10
- SHRQ $0x33, R10
- MOVQ R13, R12
- SHRQ $0x33, R12
- MOVQ R15, R14
- SHRQ $0x33, R14
- ANDQ AX, DI
- IMUL3Q $0x13, R14, R14
- ADDQ R14, DI
- ANDQ AX, R9
- ADDQ SI, R9
- ANDQ AX, R11
- ADDQ R8, R11
- ANDQ AX, R13
- ADDQ R10, R13
- ANDQ AX, R15
- ADDQ R12, R15
-
- // Store output
- MOVQ out+0(FP), AX
- MOVQ DI, (AX)
- MOVQ R9, 8(AX)
- MOVQ R11, 16(AX)
- MOVQ R13, 24(AX)
- MOVQ R15, 32(AX)
- RET
-
-// func feSquare(out *Element, a *Element)
-TEXT ·feSquare(SB), NOSPLIT, $0-16
- MOVQ a+8(FP), CX
-
- // r0 = l0×l0
- MOVQ (CX), AX
- MULQ (CX)
- MOVQ AX, SI
- MOVQ DX, BX
-
- // r0 += 38×l1×l4
- MOVQ 8(CX), AX
- IMUL3Q $0x26, AX, AX
- MULQ 32(CX)
- ADDQ AX, SI
- ADCQ DX, BX
-
- // r0 += 38×l2×l3
- MOVQ 16(CX), AX
- IMUL3Q $0x26, AX, AX
- MULQ 24(CX)
- ADDQ AX, SI
- ADCQ DX, BX
-
- // r1 = 2×l0×l1
- MOVQ (CX), AX
- SHLQ $0x01, AX
- MULQ 8(CX)
- MOVQ AX, R8
- MOVQ DX, DI
-
- // r1 += 38×l2×l4
- MOVQ 16(CX), AX
- IMUL3Q $0x26, AX, AX
- MULQ 32(CX)
- ADDQ AX, R8
- ADCQ DX, DI
-
- // r1 += 19×l3×l3
- MOVQ 24(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 24(CX)
- ADDQ AX, R8
- ADCQ DX, DI
-
- // r2 = 2×l0×l2
- MOVQ (CX), AX
- SHLQ $0x01, AX
- MULQ 16(CX)
- MOVQ AX, R10
- MOVQ DX, R9
-
- // r2 += l1×l1
- MOVQ 8(CX), AX
- MULQ 8(CX)
- ADDQ AX, R10
- ADCQ DX, R9
-
- // r2 += 38×l3×l4
- MOVQ 24(CX), AX
- IMUL3Q $0x26, AX, AX
- MULQ 32(CX)
- ADDQ AX, R10
- ADCQ DX, R9
-
- // r3 = 2×l0×l3
- MOVQ (CX), AX
- SHLQ $0x01, AX
- MULQ 24(CX)
- MOVQ AX, R12
- MOVQ DX, R11
-
- // r3 += 2×l1×l2
- MOVQ 8(CX), AX
- IMUL3Q $0x02, AX, AX
- MULQ 16(CX)
- ADDQ AX, R12
- ADCQ DX, R11
-
- // r3 += 19×l4×l4
- MOVQ 32(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 32(CX)
- ADDQ AX, R12
- ADCQ DX, R11
-
- // r4 = 2×l0×l4
- MOVQ (CX), AX
- SHLQ $0x01, AX
- MULQ 32(CX)
- MOVQ AX, R14
- MOVQ DX, R13
-
- // r4 += 2×l1×l3
- MOVQ 8(CX), AX
- IMUL3Q $0x02, AX, AX
- MULQ 24(CX)
- ADDQ AX, R14
- ADCQ DX, R13
-
- // r4 += l2×l2
- MOVQ 16(CX), AX
- MULQ 16(CX)
- ADDQ AX, R14
- ADCQ DX, R13
-
- // First reduction chain
- MOVQ $0x0007ffffffffffff, AX
- SHLQ $0x0d, SI, BX
- SHLQ $0x0d, R8, DI
- SHLQ $0x0d, R10, R9
- SHLQ $0x0d, R12, R11
- SHLQ $0x0d, R14, R13
- ANDQ AX, SI
- IMUL3Q $0x13, R13, R13
- ADDQ R13, SI
- ANDQ AX, R8
- ADDQ BX, R8
- ANDQ AX, R10
- ADDQ DI, R10
- ANDQ AX, R12
- ADDQ R9, R12
- ANDQ AX, R14
- ADDQ R11, R14
-
- // Second reduction chain (carryPropagate)
- MOVQ SI, BX
- SHRQ $0x33, BX
- MOVQ R8, DI
- SHRQ $0x33, DI
- MOVQ R10, R9
- SHRQ $0x33, R9
- MOVQ R12, R11
- SHRQ $0x33, R11
- MOVQ R14, R13
- SHRQ $0x33, R13
- ANDQ AX, SI
- IMUL3Q $0x13, R13, R13
- ADDQ R13, SI
- ANDQ AX, R8
- ADDQ BX, R8
- ANDQ AX, R10
- ADDQ DI, R10
- ANDQ AX, R12
- ADDQ R9, R12
- ANDQ AX, R14
- ADDQ R11, R14
-
- // Store output
- MOVQ out+0(FP), AX
- MOVQ SI, (AX)
- MOVQ R8, 8(AX)
- MOVQ R10, 16(AX)
- MOVQ R12, 24(AX)
- MOVQ R14, 32(AX)
- RET
diff --git a/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go b/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go
deleted file mode 100644
index ddb6c9b8..00000000
--- a/vendor/filippo.io/edwards25519/field/fe_amd64_noasm.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright (c) 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !amd64 || !gc || purego
-// +build !amd64 !gc purego
-
-package field
-
-func feMul(v, x, y *Element) { feMulGeneric(v, x, y) }
-
-func feSquare(v, x *Element) { feSquareGeneric(v, x) }
diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64.go b/vendor/filippo.io/edwards25519/field/fe_arm64.go
deleted file mode 100644
index af459ef5..00000000
--- a/vendor/filippo.io/edwards25519/field/fe_arm64.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright (c) 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build arm64 && gc && !purego
-// +build arm64,gc,!purego
-
-package field
-
-//go:noescape
-func carryPropagate(v *Element)
-
-func (v *Element) carryPropagate() *Element {
- carryPropagate(v)
- return v
-}
diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64.s b/vendor/filippo.io/edwards25519/field/fe_arm64.s
deleted file mode 100644
index 3126a434..00000000
--- a/vendor/filippo.io/edwards25519/field/fe_arm64.s
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright (c) 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build arm64 && gc && !purego
-
-#include "textflag.h"
-
-// carryPropagate works exactly like carryPropagateGeneric and uses the
-// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but
-// avoids loading R0-R4 twice and uses LDP and STP.
-//
-// See https://golang.org/issues/43145 for the main compiler issue.
-//
-// func carryPropagate(v *Element)
-TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8
- MOVD v+0(FP), R20
-
- LDP 0(R20), (R0, R1)
- LDP 16(R20), (R2, R3)
- MOVD 32(R20), R4
-
- AND $0x7ffffffffffff, R0, R10
- AND $0x7ffffffffffff, R1, R11
- AND $0x7ffffffffffff, R2, R12
- AND $0x7ffffffffffff, R3, R13
- AND $0x7ffffffffffff, R4, R14
-
- ADD R0>>51, R11, R11
- ADD R1>>51, R12, R12
- ADD R2>>51, R13, R13
- ADD R3>>51, R14, R14
- // R4>>51 * 19 + R10 -> R10
- LSR $51, R4, R21
- MOVD $19, R22
- MADD R22, R10, R21, R10
-
- STP (R10, R11), 0(R20)
- STP (R12, R13), 16(R20)
- MOVD R14, 32(R20)
-
- RET
diff --git a/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go b/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go
deleted file mode 100644
index 234a5b2e..00000000
--- a/vendor/filippo.io/edwards25519/field/fe_arm64_noasm.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright (c) 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !arm64 || !gc || purego
-// +build !arm64 !gc purego
-
-package field
-
-func (v *Element) carryPropagate() *Element {
- return v.carryPropagateGeneric()
-}
diff --git a/vendor/filippo.io/edwards25519/field/fe_extra.go b/vendor/filippo.io/edwards25519/field/fe_extra.go
deleted file mode 100644
index 1ef503b9..00000000
--- a/vendor/filippo.io/edwards25519/field/fe_extra.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (c) 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package field
-
-import "errors"
-
-// This file contains additional functionality that is not included in the
-// upstream crypto/ed25519/edwards25519/field package.
-
-// SetWideBytes sets v to x, where x is a 64-byte little-endian encoding, which
-// is reduced modulo the field order. If x is not of the right length,
-// SetWideBytes returns nil and an error, and the receiver is unchanged.
-//
-// SetWideBytes is not necessary to select a uniformly distributed value, and is
-// only provided for compatibility: SetBytes can be used instead as the chance
-// of bias is less than 2⁻²⁵⁰.
-func (v *Element) SetWideBytes(x []byte) (*Element, error) {
- if len(x) != 64 {
- return nil, errors.New("edwards25519: invalid SetWideBytes input size")
- }
-
- // Split the 64 bytes into two elements, and extract the most significant
- // bit of each, which is ignored by SetBytes.
- lo, _ := new(Element).SetBytes(x[:32])
- loMSB := uint64(x[31] >> 7)
- hi, _ := new(Element).SetBytes(x[32:])
- hiMSB := uint64(x[63] >> 7)
-
- // The output we want is
- //
- // v = lo + loMSB * 2²⁵⁵ + hi * 2²⁵⁶ + hiMSB * 2⁵¹¹
- //
- // which applying the reduction identity comes out to
- //
- // v = lo + loMSB * 19 + hi * 2 * 19 + hiMSB * 2 * 19²
- //
- // l0 will be the sum of a 52 bits value (lo.l0), plus a 5 bits value
- // (loMSB * 19), a 6 bits value (hi.l0 * 2 * 19), and a 10 bits value
- // (hiMSB * 2 * 19²), so it fits in a uint64.
-
- v.l0 = lo.l0 + loMSB*19 + hi.l0*2*19 + hiMSB*2*19*19
- v.l1 = lo.l1 + hi.l1*2*19
- v.l2 = lo.l2 + hi.l2*2*19
- v.l3 = lo.l3 + hi.l3*2*19
- v.l4 = lo.l4 + hi.l4*2*19
-
- return v.carryPropagate(), nil
-}
diff --git a/vendor/filippo.io/edwards25519/field/fe_generic.go b/vendor/filippo.io/edwards25519/field/fe_generic.go
deleted file mode 100644
index 86f5fd95..00000000
--- a/vendor/filippo.io/edwards25519/field/fe_generic.go
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright (c) 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package field
-
-import "math/bits"
-
-// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
-// bits.Mul64 and bits.Add64 intrinsics.
-type uint128 struct {
- lo, hi uint64
-}
-
-// mul64 returns a * b.
-func mul64(a, b uint64) uint128 {
- hi, lo := bits.Mul64(a, b)
- return uint128{lo, hi}
-}
-
-// addMul64 returns v + a * b.
-func addMul64(v uint128, a, b uint64) uint128 {
- hi, lo := bits.Mul64(a, b)
- lo, c := bits.Add64(lo, v.lo, 0)
- hi, _ = bits.Add64(hi, v.hi, c)
- return uint128{lo, hi}
-}
-
-// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits.
-func shiftRightBy51(a uint128) uint64 {
- return (a.hi << (64 - 51)) | (a.lo >> 51)
-}
-
-func feMulGeneric(v, a, b *Element) {
- a0 := a.l0
- a1 := a.l1
- a2 := a.l2
- a3 := a.l3
- a4 := a.l4
-
- b0 := b.l0
- b1 := b.l1
- b2 := b.l2
- b3 := b.l3
- b4 := b.l4
-
- // Limb multiplication works like pen-and-paper columnar multiplication, but
- // with 51-bit limbs instead of digits.
- //
- // a4 a3 a2 a1 a0 x
- // b4 b3 b2 b1 b0 =
- // ------------------------
- // a4b0 a3b0 a2b0 a1b0 a0b0 +
- // a4b1 a3b1 a2b1 a1b1 a0b1 +
- // a4b2 a3b2 a2b2 a1b2 a0b2 +
- // a4b3 a3b3 a2b3 a1b3 a0b3 +
- // a4b4 a3b4 a2b4 a1b4 a0b4 =
- // ----------------------------------------------
- // r8 r7 r6 r5 r4 r3 r2 r1 r0
- //
- // We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to
- // reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5,
- // r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc.
- //
- // Reduction can be carried out simultaneously to multiplication. For
- // example, we do not compute r5: whenever the result of a multiplication
- // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0.
- //
- // a4b0 a3b0 a2b0 a1b0 a0b0 +
- // a3b1 a2b1 a1b1 a0b1 19×a4b1 +
- // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 +
- // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 +
- // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 =
- // --------------------------------------
- // r4 r3 r2 r1 r0
- //
- // Finally we add up the columns into wide, overlapping limbs.
-
- a1_19 := a1 * 19
- a2_19 := a2 * 19
- a3_19 := a3 * 19
- a4_19 := a4 * 19
-
- // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
- r0 := mul64(a0, b0)
- r0 = addMul64(r0, a1_19, b4)
- r0 = addMul64(r0, a2_19, b3)
- r0 = addMul64(r0, a3_19, b2)
- r0 = addMul64(r0, a4_19, b1)
-
- // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2)
- r1 := mul64(a0, b1)
- r1 = addMul64(r1, a1, b0)
- r1 = addMul64(r1, a2_19, b4)
- r1 = addMul64(r1, a3_19, b3)
- r1 = addMul64(r1, a4_19, b2)
-
- // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3)
- r2 := mul64(a0, b2)
- r2 = addMul64(r2, a1, b1)
- r2 = addMul64(r2, a2, b0)
- r2 = addMul64(r2, a3_19, b4)
- r2 = addMul64(r2, a4_19, b3)
-
- // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4
- r3 := mul64(a0, b3)
- r3 = addMul64(r3, a1, b2)
- r3 = addMul64(r3, a2, b1)
- r3 = addMul64(r3, a3, b0)
- r3 = addMul64(r3, a4_19, b4)
-
- // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
- r4 := mul64(a0, b4)
- r4 = addMul64(r4, a1, b3)
- r4 = addMul64(r4, a2, b2)
- r4 = addMul64(r4, a3, b1)
- r4 = addMul64(r4, a4, b0)
-
- // After the multiplication, we need to reduce (carry) the five coefficients
- // to obtain a result with limbs that are at most slightly larger than 2⁵¹,
- // to respect the Element invariant.
- //
- // Overall, the reduction works the same as carryPropagate, except with
- // wider inputs: we take the carry for each coefficient by shifting it right
- // by 51, and add it to the limb above it. The top carry is multiplied by 19
- // according to the reduction identity and added to the lowest limb.
- //
- // The largest coefficient (r0) will be at most 111 bits, which guarantees
- // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64.
- //
- // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
- // r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²)
- // r0 < (1 + 19 × 4) × 2⁵² × 2⁵²
- // r0 < 2⁷ × 2⁵² × 2⁵²
- // r0 < 2¹¹¹
- //
- // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most
- // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and
- // allows us to easily apply the reduction identity.
- //
- // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
- // r4 < 5 × 2⁵² × 2⁵²
- // r4 < 2¹⁰⁷
- //
-
- c0 := shiftRightBy51(r0)
- c1 := shiftRightBy51(r1)
- c2 := shiftRightBy51(r2)
- c3 := shiftRightBy51(r3)
- c4 := shiftRightBy51(r4)
-
- rr0 := r0.lo&maskLow51Bits + c4*19
- rr1 := r1.lo&maskLow51Bits + c0
- rr2 := r2.lo&maskLow51Bits + c1
- rr3 := r3.lo&maskLow51Bits + c2
- rr4 := r4.lo&maskLow51Bits + c3
-
- // Now all coefficients fit into 64-bit registers but are still too large to
- // be passed around as an Element. We therefore do one last carry chain,
- // where the carries will be small enough to fit in the wiggle room above 2⁵¹.
- *v = Element{rr0, rr1, rr2, rr3, rr4}
- v.carryPropagate()
-}
-
-func feSquareGeneric(v, a *Element) {
- l0 := a.l0
- l1 := a.l1
- l2 := a.l2
- l3 := a.l3
- l4 := a.l4
-
- // Squaring works precisely like multiplication above, but thanks to its
- // symmetry we get to group a few terms together.
- //
- // l4 l3 l2 l1 l0 x
- // l4 l3 l2 l1 l0 =
- // ------------------------
- // l4l0 l3l0 l2l0 l1l0 l0l0 +
- // l4l1 l3l1 l2l1 l1l1 l0l1 +
- // l4l2 l3l2 l2l2 l1l2 l0l2 +
- // l4l3 l3l3 l2l3 l1l3 l0l3 +
- // l4l4 l3l4 l2l4 l1l4 l0l4 =
- // ----------------------------------------------
- // r8 r7 r6 r5 r4 r3 r2 r1 r0
- //
- // l4l0 l3l0 l2l0 l1l0 l0l0 +
- // l3l1 l2l1 l1l1 l0l1 19×l4l1 +
- // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 +
- // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 +
- // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 =
- // --------------------------------------
- // r4 r3 r2 r1 r0
- //
- // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with
- // only three Mul64 and four Add64, instead of five and eight.
-
- l0_2 := l0 * 2
- l1_2 := l1 * 2
-
- l1_38 := l1 * 38
- l2_38 := l2 * 38
- l3_38 := l3 * 38
-
- l3_19 := l3 * 19
- l4_19 := l4 * 19
-
- // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3)
- r0 := mul64(l0, l0)
- r0 = addMul64(r0, l1_38, l4)
- r0 = addMul64(r0, l2_38, l3)
-
- // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3
- r1 := mul64(l0_2, l1)
- r1 = addMul64(r1, l2_38, l4)
- r1 = addMul64(r1, l3_19, l3)
-
- // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4
- r2 := mul64(l0_2, l2)
- r2 = addMul64(r2, l1, l1)
- r2 = addMul64(r2, l3_38, l4)
-
- // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4
- r3 := mul64(l0_2, l3)
- r3 = addMul64(r3, l1_2, l2)
- r3 = addMul64(r3, l4_19, l4)
-
- // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2
- r4 := mul64(l0_2, l4)
- r4 = addMul64(r4, l1_2, l3)
- r4 = addMul64(r4, l2, l2)
-
- c0 := shiftRightBy51(r0)
- c1 := shiftRightBy51(r1)
- c2 := shiftRightBy51(r2)
- c3 := shiftRightBy51(r3)
- c4 := shiftRightBy51(r4)
-
- rr0 := r0.lo&maskLow51Bits + c4*19
- rr1 := r1.lo&maskLow51Bits + c0
- rr2 := r2.lo&maskLow51Bits + c1
- rr3 := r3.lo&maskLow51Bits + c2
- rr4 := r4.lo&maskLow51Bits + c3
-
- *v = Element{rr0, rr1, rr2, rr3, rr4}
- v.carryPropagate()
-}
-
-// carryPropagateGeneric brings the limbs below 52 bits by applying the reduction
-// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry.
-func (v *Element) carryPropagateGeneric() *Element {
- c0 := v.l0 >> 51
- c1 := v.l1 >> 51
- c2 := v.l2 >> 51
- c3 := v.l3 >> 51
- c4 := v.l4 >> 51
-
- // c4 is at most 64 - 51 = 13 bits, so c4*19 is at most 18 bits, and
- // the final l0 will be at most 52 bits. Similarly for the rest.
- v.l0 = v.l0&maskLow51Bits + c4*19
- v.l1 = v.l1&maskLow51Bits + c0
- v.l2 = v.l2&maskLow51Bits + c1
- v.l3 = v.l3&maskLow51Bits + c2
- v.l4 = v.l4&maskLow51Bits + c3
-
- return v
-}
diff --git a/vendor/filippo.io/edwards25519/scalar.go b/vendor/filippo.io/edwards25519/scalar.go
deleted file mode 100644
index 3fd16538..00000000
--- a/vendor/filippo.io/edwards25519/scalar.go
+++ /dev/null
@@ -1,343 +0,0 @@
-// Copyright (c) 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package edwards25519
-
-import (
- "encoding/binary"
- "errors"
-)
-
-// A Scalar is an integer modulo
-//
-// l = 2^252 + 27742317777372353535851937790883648493
-//
-// which is the prime order of the edwards25519 group.
-//
-// This type works similarly to math/big.Int, and all arguments and
-// receivers are allowed to alias.
-//
-// The zero value is a valid zero element.
-type Scalar struct {
- // s is the scalar in the Montgomery domain, in the format of the
- // fiat-crypto implementation.
- s fiatScalarMontgomeryDomainFieldElement
-}
-
-// The field implementation in scalar_fiat.go is generated by the fiat-crypto
-// project (https://github.com/mit-plv/fiat-crypto) at version v0.0.9 (23d2dbc)
-// from a formally verified model.
-//
-// fiat-crypto code comes under the following license.
-//
-// Copyright (c) 2015-2020 The fiat-crypto Authors. All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-//
-// THIS SOFTWARE IS PROVIDED BY the fiat-crypto authors "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Berkeley Software Design,
-// Inc. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-
-// NewScalar returns a new zero Scalar.
-func NewScalar() *Scalar {
- return &Scalar{}
-}
-
-// MultiplyAdd sets s = x * y + z mod l, and returns s. It is equivalent to
-// using Multiply and then Add.
-func (s *Scalar) MultiplyAdd(x, y, z *Scalar) *Scalar {
- // Make a copy of z in case it aliases s.
- zCopy := new(Scalar).Set(z)
- return s.Multiply(x, y).Add(s, zCopy)
-}
-
-// Add sets s = x + y mod l, and returns s.
-func (s *Scalar) Add(x, y *Scalar) *Scalar {
- // s = 1 * x + y mod l
- fiatScalarAdd(&s.s, &x.s, &y.s)
- return s
-}
-
-// Subtract sets s = x - y mod l, and returns s.
-func (s *Scalar) Subtract(x, y *Scalar) *Scalar {
- // s = -1 * y + x mod l
- fiatScalarSub(&s.s, &x.s, &y.s)
- return s
-}
-
-// Negate sets s = -x mod l, and returns s.
-func (s *Scalar) Negate(x *Scalar) *Scalar {
- // s = -1 * x + 0 mod l
- fiatScalarOpp(&s.s, &x.s)
- return s
-}
-
-// Multiply sets s = x * y mod l, and returns s.
-func (s *Scalar) Multiply(x, y *Scalar) *Scalar {
- // s = x * y + 0 mod l
- fiatScalarMul(&s.s, &x.s, &y.s)
- return s
-}
-
-// Set sets s = x, and returns s.
-func (s *Scalar) Set(x *Scalar) *Scalar {
- *s = *x
- return s
-}
-
-// SetUniformBytes sets s = x mod l, where x is a 64-byte little-endian integer.
-// If x is not of the right length, SetUniformBytes returns nil and an error,
-// and the receiver is unchanged.
-//
-// SetUniformBytes can be used to set s to a uniformly distributed value given
-// 64 uniformly distributed random bytes.
-func (s *Scalar) SetUniformBytes(x []byte) (*Scalar, error) {
- if len(x) != 64 {
- return nil, errors.New("edwards25519: invalid SetUniformBytes input length")
- }
-
- // We have a value x of 512 bits, but our fiatScalarFromBytes function
- // expects an input lower than l, which is a little over 252 bits.
- //
- // Instead of writing a reduction function that operates on wider inputs, we
- // can interpret x as the sum of three shorter values a, b, and c.
- //
- // x = a + b * 2^168 + c * 2^336 mod l
- //
- // We then precompute 2^168 and 2^336 modulo l, and perform the reduction
- // with two multiplications and two additions.
-
- s.setShortBytes(x[:21])
- t := new(Scalar).setShortBytes(x[21:42])
- s.Add(s, t.Multiply(t, scalarTwo168))
- t.setShortBytes(x[42:])
- s.Add(s, t.Multiply(t, scalarTwo336))
-
- return s, nil
-}
-
-// scalarTwo168 and scalarTwo336 are 2^168 and 2^336 modulo l, encoded as a
-// fiatScalarMontgomeryDomainFieldElement, which is a little-endian 4-limb value
-// in the 2^256 Montgomery domain.
-var scalarTwo168 = &Scalar{s: [4]uint64{0x5b8ab432eac74798, 0x38afddd6de59d5d7,
- 0xa2c131b399411b7c, 0x6329a7ed9ce5a30}}
-var scalarTwo336 = &Scalar{s: [4]uint64{0xbd3d108e2b35ecc5, 0x5c3a3718bdf9c90b,
- 0x63aa97a331b4f2ee, 0x3d217f5be65cb5c}}
-
-// setShortBytes sets s = x mod l, where x is a little-endian integer shorter
-// than 32 bytes.
-func (s *Scalar) setShortBytes(x []byte) *Scalar {
- if len(x) >= 32 {
- panic("edwards25519: internal error: setShortBytes called with a long string")
- }
- var buf [32]byte
- copy(buf[:], x)
- fiatScalarFromBytes((*[4]uint64)(&s.s), &buf)
- fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s))
- return s
-}
-
-// SetCanonicalBytes sets s = x, where x is a 32-byte little-endian encoding of
-// s, and returns s. If x is not a canonical encoding of s, SetCanonicalBytes
-// returns nil and an error, and the receiver is unchanged.
-func (s *Scalar) SetCanonicalBytes(x []byte) (*Scalar, error) {
- if len(x) != 32 {
- return nil, errors.New("invalid scalar length")
- }
- if !isReduced(x) {
- return nil, errors.New("invalid scalar encoding")
- }
-
- fiatScalarFromBytes((*[4]uint64)(&s.s), (*[32]byte)(x))
- fiatScalarToMontgomery(&s.s, (*fiatScalarNonMontgomeryDomainFieldElement)(&s.s))
-
- return s, nil
-}
-
-// scalarMinusOneBytes is l - 1 in little endian.
-var scalarMinusOneBytes = [32]byte{236, 211, 245, 92, 26, 99, 18, 88, 214, 156, 247, 162, 222, 249, 222, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16}
-
-// isReduced returns whether the given scalar in 32-byte little endian encoded
-// form is reduced modulo l.
-func isReduced(s []byte) bool {
- if len(s) != 32 {
- return false
- }
-
- for i := len(s) - 1; i >= 0; i-- {
- switch {
- case s[i] > scalarMinusOneBytes[i]:
- return false
- case s[i] < scalarMinusOneBytes[i]:
- return true
- }
- }
- return true
-}
-
-// SetBytesWithClamping applies the buffer pruning described in RFC 8032,
-// Section 5.1.5 (also known as clamping) and sets s to the result. The input
-// must be 32 bytes, and it is not modified. If x is not of the right length,
-// SetBytesWithClamping returns nil and an error, and the receiver is unchanged.
-//
-// Note that since Scalar values are always reduced modulo the prime order of
-// the curve, the resulting value will not preserve any of the cofactor-clearing
-// properties that clamping is meant to provide. It will however work as
-// expected as long as it is applied to points on the prime order subgroup, like
-// in Ed25519. In fact, it is lost to history why RFC 8032 adopted the
-// irrelevant RFC 7748 clamping, but it is now required for compatibility.
-func (s *Scalar) SetBytesWithClamping(x []byte) (*Scalar, error) {
- // The description above omits the purpose of the high bits of the clamping
- // for brevity, but those are also lost to reductions, and are also
- // irrelevant to edwards25519 as they protect against a specific
- // implementation bug that was once observed in a generic Montgomery ladder.
- if len(x) != 32 {
- return nil, errors.New("edwards25519: invalid SetBytesWithClamping input length")
- }
-
- // We need to use the wide reduction from SetUniformBytes, since clamping
- // sets the 2^254 bit, making the value higher than the order.
- var wideBytes [64]byte
- copy(wideBytes[:], x[:])
- wideBytes[0] &= 248
- wideBytes[31] &= 63
- wideBytes[31] |= 64
- return s.SetUniformBytes(wideBytes[:])
-}
-
-// Bytes returns the canonical 32-byte little-endian encoding of s.
-func (s *Scalar) Bytes() []byte {
- // This function is outlined to make the allocations inline in the caller
- // rather than happen on the heap.
- var encoded [32]byte
- return s.bytes(&encoded)
-}
-
-func (s *Scalar) bytes(out *[32]byte) []byte {
- var ss fiatScalarNonMontgomeryDomainFieldElement
- fiatScalarFromMontgomery(&ss, &s.s)
- fiatScalarToBytes(out, (*[4]uint64)(&ss))
- return out[:]
-}
-
-// Equal returns 1 if s and t are equal, and 0 otherwise.
-func (s *Scalar) Equal(t *Scalar) int {
- var diff fiatScalarMontgomeryDomainFieldElement
- fiatScalarSub(&diff, &s.s, &t.s)
- var nonzero uint64
- fiatScalarNonzero(&nonzero, (*[4]uint64)(&diff))
- nonzero |= nonzero >> 32
- nonzero |= nonzero >> 16
- nonzero |= nonzero >> 8
- nonzero |= nonzero >> 4
- nonzero |= nonzero >> 2
- nonzero |= nonzero >> 1
- return int(^nonzero) & 1
-}
-
-// nonAdjacentForm computes a width-w non-adjacent form for this scalar.
-//
-// w must be between 2 and 8, or nonAdjacentForm will panic.
-func (s *Scalar) nonAdjacentForm(w uint) [256]int8 {
- // This implementation is adapted from the one
- // in curve25519-dalek and is documented there:
- // https://github.com/dalek-cryptography/curve25519-dalek/blob/f630041af28e9a405255f98a8a93adca18e4315b/src/scalar.rs#L800-L871
- b := s.Bytes()
- if b[31] > 127 {
- panic("scalar has high bit set illegally")
- }
- if w < 2 {
- panic("w must be at least 2 by the definition of NAF")
- } else if w > 8 {
- panic("NAF digits must fit in int8")
- }
-
- var naf [256]int8
- var digits [5]uint64
-
- for i := 0; i < 4; i++ {
- digits[i] = binary.LittleEndian.Uint64(b[i*8:])
- }
-
- width := uint64(1 << w)
- windowMask := uint64(width - 1)
-
- pos := uint(0)
- carry := uint64(0)
- for pos < 256 {
- indexU64 := pos / 64
- indexBit := pos % 64
- var bitBuf uint64
- if indexBit < 64-w {
- // This window's bits are contained in a single u64
- bitBuf = digits[indexU64] >> indexBit
- } else {
- // Combine the current 64 bits with bits from the next 64
- bitBuf = (digits[indexU64] >> indexBit) | (digits[1+indexU64] << (64 - indexBit))
- }
-
- // Add carry into the current window
- window := carry + (bitBuf & windowMask)
-
- if window&1 == 0 {
- // If the window value is even, preserve the carry and continue.
- // Why is the carry preserved?
- // If carry == 0 and window & 1 == 0,
- // then the next carry should be 0
- // If carry == 1 and window & 1 == 0,
- // then bit_buf & 1 == 1 so the next carry should be 1
- pos += 1
- continue
- }
-
- if window < width/2 {
- carry = 0
- naf[pos] = int8(window)
- } else {
- carry = 1
- naf[pos] = int8(window) - int8(width)
- }
-
- pos += w
- }
- return naf
-}
-
-func (s *Scalar) signedRadix16() [64]int8 {
- b := s.Bytes()
- if b[31] > 127 {
- panic("scalar has high bit set illegally")
- }
-
- var digits [64]int8
-
- // Compute unsigned radix-16 digits:
- for i := 0; i < 32; i++ {
- digits[2*i] = int8(b[i] & 15)
- digits[2*i+1] = int8((b[i] >> 4) & 15)
- }
-
- // Recenter coefficients:
- for i := 0; i < 63; i++ {
- carry := (digits[i] + 8) >> 4
- digits[i] -= carry << 4
- digits[i+1] += carry
- }
-
- return digits
-}
diff --git a/vendor/filippo.io/edwards25519/scalar_fiat.go b/vendor/filippo.io/edwards25519/scalar_fiat.go
deleted file mode 100644
index 2e5782b6..00000000
--- a/vendor/filippo.io/edwards25519/scalar_fiat.go
+++ /dev/null
@@ -1,1147 +0,0 @@
-// Code generated by Fiat Cryptography. DO NOT EDIT.
-//
-// Autogenerated: word_by_word_montgomery --lang Go --cmovznz-by-mul --relax-primitive-carry-to-bitwidth 32,64 --public-function-case camelCase --public-type-case camelCase --private-function-case camelCase --private-type-case camelCase --doc-text-before-function-name '' --doc-newline-before-package-declaration --doc-prepend-header 'Code generated by Fiat Cryptography. DO NOT EDIT.' --package-name edwards25519 Scalar 64 '2^252 + 27742317777372353535851937790883648493' mul add sub opp nonzero from_montgomery to_montgomery to_bytes from_bytes
-//
-// curve description: Scalar
-//
-// machine_wordsize = 64 (from "64")
-//
-// requested operations: mul, add, sub, opp, nonzero, from_montgomery, to_montgomery, to_bytes, from_bytes
-//
-// m = 0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed (from "2^252 + 27742317777372353535851937790883648493")
-//
-//
-//
-// NOTE: In addition to the bounds specified above each function, all
-//
-// functions synthesized for this Montgomery arithmetic require the
-//
-// input to be strictly less than the prime modulus (m), and also
-//
-// require the input to be in the unique saturated representation.
-//
-// All functions also ensure that these two properties are true of
-//
-// return values.
-//
-//
-//
-// Computed values:
-//
-// eval z = z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192)
-//
-// bytes_eval z = z[0] + (z[1] << 8) + (z[2] << 16) + (z[3] << 24) + (z[4] << 32) + (z[5] << 40) + (z[6] << 48) + (z[7] << 56) + (z[8] << 64) + (z[9] << 72) + (z[10] << 80) + (z[11] << 88) + (z[12] << 96) + (z[13] << 104) + (z[14] << 112) + (z[15] << 120) + (z[16] << 128) + (z[17] << 136) + (z[18] << 144) + (z[19] << 152) + (z[20] << 160) + (z[21] << 168) + (z[22] << 176) + (z[23] << 184) + (z[24] << 192) + (z[25] << 200) + (z[26] << 208) + (z[27] << 216) + (z[28] << 224) + (z[29] << 232) + (z[30] << 240) + (z[31] << 248)
-//
-// twos_complement_eval z = let x1 := z[0] + (z[1] << 64) + (z[2] << 128) + (z[3] << 192) in
-//
-// if x1 & (2^256-1) < 2^255 then x1 & (2^256-1) else (x1 & (2^256-1)) - 2^256
-
-package edwards25519
-
-import "math/bits"
-
-type fiatScalarUint1 uint64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
-type fiatScalarInt1 int64 // We use uint64 instead of a more narrow type for performance reasons; see https://github.com/mit-plv/fiat-crypto/pull/1006#issuecomment-892625927
-
-// The type fiatScalarMontgomeryDomainFieldElement is a field element in the Montgomery domain.
-//
-// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
-type fiatScalarMontgomeryDomainFieldElement [4]uint64
-
-// The type fiatScalarNonMontgomeryDomainFieldElement is a field element NOT in the Montgomery domain.
-//
-// Bounds: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
-type fiatScalarNonMontgomeryDomainFieldElement [4]uint64
-
-// fiatScalarCmovznzU64 is a single-word conditional move.
-//
-// Postconditions:
-//
-// out1 = (if arg1 = 0 then arg2 else arg3)
-//
-// Input Bounds:
-//
-// arg1: [0x0 ~> 0x1]
-// arg2: [0x0 ~> 0xffffffffffffffff]
-// arg3: [0x0 ~> 0xffffffffffffffff]
-//
-// Output Bounds:
-//
-// out1: [0x0 ~> 0xffffffffffffffff]
-func fiatScalarCmovznzU64(out1 *uint64, arg1 fiatScalarUint1, arg2 uint64, arg3 uint64) {
- x1 := (uint64(arg1) * 0xffffffffffffffff)
- x2 := ((x1 & arg3) | ((^x1) & arg2))
- *out1 = x2
-}
-
-// fiatScalarMul multiplies two field elements in the Montgomery domain.
-//
-// Preconditions:
-//
-// 0 ≤ eval arg1 < m
-// 0 ≤ eval arg2 < m
-//
-// Postconditions:
-//
-// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) * eval (from_montgomery arg2)) mod m
-// 0 ≤ eval out1 < m
-func fiatScalarMul(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
- x1 := arg1[1]
- x2 := arg1[2]
- x3 := arg1[3]
- x4 := arg1[0]
- var x5 uint64
- var x6 uint64
- x6, x5 = bits.Mul64(x4, arg2[3])
- var x7 uint64
- var x8 uint64
- x8, x7 = bits.Mul64(x4, arg2[2])
- var x9 uint64
- var x10 uint64
- x10, x9 = bits.Mul64(x4, arg2[1])
- var x11 uint64
- var x12 uint64
- x12, x11 = bits.Mul64(x4, arg2[0])
- var x13 uint64
- var x14 uint64
- x13, x14 = bits.Add64(x12, x9, uint64(0x0))
- var x15 uint64
- var x16 uint64
- x15, x16 = bits.Add64(x10, x7, uint64(fiatScalarUint1(x14)))
- var x17 uint64
- var x18 uint64
- x17, x18 = bits.Add64(x8, x5, uint64(fiatScalarUint1(x16)))
- x19 := (uint64(fiatScalarUint1(x18)) + x6)
- var x20 uint64
- _, x20 = bits.Mul64(x11, 0xd2b51da312547e1b)
- var x22 uint64
- var x23 uint64
- x23, x22 = bits.Mul64(x20, 0x1000000000000000)
- var x24 uint64
- var x25 uint64
- x25, x24 = bits.Mul64(x20, 0x14def9dea2f79cd6)
- var x26 uint64
- var x27 uint64
- x27, x26 = bits.Mul64(x20, 0x5812631a5cf5d3ed)
- var x28 uint64
- var x29 uint64
- x28, x29 = bits.Add64(x27, x24, uint64(0x0))
- x30 := (uint64(fiatScalarUint1(x29)) + x25)
- var x32 uint64
- _, x32 = bits.Add64(x11, x26, uint64(0x0))
- var x33 uint64
- var x34 uint64
- x33, x34 = bits.Add64(x13, x28, uint64(fiatScalarUint1(x32)))
- var x35 uint64
- var x36 uint64
- x35, x36 = bits.Add64(x15, x30, uint64(fiatScalarUint1(x34)))
- var x37 uint64
- var x38 uint64
- x37, x38 = bits.Add64(x17, x22, uint64(fiatScalarUint1(x36)))
- var x39 uint64
- var x40 uint64
- x39, x40 = bits.Add64(x19, x23, uint64(fiatScalarUint1(x38)))
- var x41 uint64
- var x42 uint64
- x42, x41 = bits.Mul64(x1, arg2[3])
- var x43 uint64
- var x44 uint64
- x44, x43 = bits.Mul64(x1, arg2[2])
- var x45 uint64
- var x46 uint64
- x46, x45 = bits.Mul64(x1, arg2[1])
- var x47 uint64
- var x48 uint64
- x48, x47 = bits.Mul64(x1, arg2[0])
- var x49 uint64
- var x50 uint64
- x49, x50 = bits.Add64(x48, x45, uint64(0x0))
- var x51 uint64
- var x52 uint64
- x51, x52 = bits.Add64(x46, x43, uint64(fiatScalarUint1(x50)))
- var x53 uint64
- var x54 uint64
- x53, x54 = bits.Add64(x44, x41, uint64(fiatScalarUint1(x52)))
- x55 := (uint64(fiatScalarUint1(x54)) + x42)
- var x56 uint64
- var x57 uint64
- x56, x57 = bits.Add64(x33, x47, uint64(0x0))
- var x58 uint64
- var x59 uint64
- x58, x59 = bits.Add64(x35, x49, uint64(fiatScalarUint1(x57)))
- var x60 uint64
- var x61 uint64
- x60, x61 = bits.Add64(x37, x51, uint64(fiatScalarUint1(x59)))
- var x62 uint64
- var x63 uint64
- x62, x63 = bits.Add64(x39, x53, uint64(fiatScalarUint1(x61)))
- var x64 uint64
- var x65 uint64
- x64, x65 = bits.Add64(uint64(fiatScalarUint1(x40)), x55, uint64(fiatScalarUint1(x63)))
- var x66 uint64
- _, x66 = bits.Mul64(x56, 0xd2b51da312547e1b)
- var x68 uint64
- var x69 uint64
- x69, x68 = bits.Mul64(x66, 0x1000000000000000)
- var x70 uint64
- var x71 uint64
- x71, x70 = bits.Mul64(x66, 0x14def9dea2f79cd6)
- var x72 uint64
- var x73 uint64
- x73, x72 = bits.Mul64(x66, 0x5812631a5cf5d3ed)
- var x74 uint64
- var x75 uint64
- x74, x75 = bits.Add64(x73, x70, uint64(0x0))
- x76 := (uint64(fiatScalarUint1(x75)) + x71)
- var x78 uint64
- _, x78 = bits.Add64(x56, x72, uint64(0x0))
- var x79 uint64
- var x80 uint64
- x79, x80 = bits.Add64(x58, x74, uint64(fiatScalarUint1(x78)))
- var x81 uint64
- var x82 uint64
- x81, x82 = bits.Add64(x60, x76, uint64(fiatScalarUint1(x80)))
- var x83 uint64
- var x84 uint64
- x83, x84 = bits.Add64(x62, x68, uint64(fiatScalarUint1(x82)))
- var x85 uint64
- var x86 uint64
- x85, x86 = bits.Add64(x64, x69, uint64(fiatScalarUint1(x84)))
- x87 := (uint64(fiatScalarUint1(x86)) + uint64(fiatScalarUint1(x65)))
- var x88 uint64
- var x89 uint64
- x89, x88 = bits.Mul64(x2, arg2[3])
- var x90 uint64
- var x91 uint64
- x91, x90 = bits.Mul64(x2, arg2[2])
- var x92 uint64
- var x93 uint64
- x93, x92 = bits.Mul64(x2, arg2[1])
- var x94 uint64
- var x95 uint64
- x95, x94 = bits.Mul64(x2, arg2[0])
- var x96 uint64
- var x97 uint64
- x96, x97 = bits.Add64(x95, x92, uint64(0x0))
- var x98 uint64
- var x99 uint64
- x98, x99 = bits.Add64(x93, x90, uint64(fiatScalarUint1(x97)))
- var x100 uint64
- var x101 uint64
- x100, x101 = bits.Add64(x91, x88, uint64(fiatScalarUint1(x99)))
- x102 := (uint64(fiatScalarUint1(x101)) + x89)
- var x103 uint64
- var x104 uint64
- x103, x104 = bits.Add64(x79, x94, uint64(0x0))
- var x105 uint64
- var x106 uint64
- x105, x106 = bits.Add64(x81, x96, uint64(fiatScalarUint1(x104)))
- var x107 uint64
- var x108 uint64
- x107, x108 = bits.Add64(x83, x98, uint64(fiatScalarUint1(x106)))
- var x109 uint64
- var x110 uint64
- x109, x110 = bits.Add64(x85, x100, uint64(fiatScalarUint1(x108)))
- var x111 uint64
- var x112 uint64
- x111, x112 = bits.Add64(x87, x102, uint64(fiatScalarUint1(x110)))
- var x113 uint64
- _, x113 = bits.Mul64(x103, 0xd2b51da312547e1b)
- var x115 uint64
- var x116 uint64
- x116, x115 = bits.Mul64(x113, 0x1000000000000000)
- var x117 uint64
- var x118 uint64
- x118, x117 = bits.Mul64(x113, 0x14def9dea2f79cd6)
- var x119 uint64
- var x120 uint64
- x120, x119 = bits.Mul64(x113, 0x5812631a5cf5d3ed)
- var x121 uint64
- var x122 uint64
- x121, x122 = bits.Add64(x120, x117, uint64(0x0))
- x123 := (uint64(fiatScalarUint1(x122)) + x118)
- var x125 uint64
- _, x125 = bits.Add64(x103, x119, uint64(0x0))
- var x126 uint64
- var x127 uint64
- x126, x127 = bits.Add64(x105, x121, uint64(fiatScalarUint1(x125)))
- var x128 uint64
- var x129 uint64
- x128, x129 = bits.Add64(x107, x123, uint64(fiatScalarUint1(x127)))
- var x130 uint64
- var x131 uint64
- x130, x131 = bits.Add64(x109, x115, uint64(fiatScalarUint1(x129)))
- var x132 uint64
- var x133 uint64
- x132, x133 = bits.Add64(x111, x116, uint64(fiatScalarUint1(x131)))
- x134 := (uint64(fiatScalarUint1(x133)) + uint64(fiatScalarUint1(x112)))
- var x135 uint64
- var x136 uint64
- x136, x135 = bits.Mul64(x3, arg2[3])
- var x137 uint64
- var x138 uint64
- x138, x137 = bits.Mul64(x3, arg2[2])
- var x139 uint64
- var x140 uint64
- x140, x139 = bits.Mul64(x3, arg2[1])
- var x141 uint64
- var x142 uint64
- x142, x141 = bits.Mul64(x3, arg2[0])
- var x143 uint64
- var x144 uint64
- x143, x144 = bits.Add64(x142, x139, uint64(0x0))
- var x145 uint64
- var x146 uint64
- x145, x146 = bits.Add64(x140, x137, uint64(fiatScalarUint1(x144)))
- var x147 uint64
- var x148 uint64
- x147, x148 = bits.Add64(x138, x135, uint64(fiatScalarUint1(x146)))
- x149 := (uint64(fiatScalarUint1(x148)) + x136)
- var x150 uint64
- var x151 uint64
- x150, x151 = bits.Add64(x126, x141, uint64(0x0))
- var x152 uint64
- var x153 uint64
- x152, x153 = bits.Add64(x128, x143, uint64(fiatScalarUint1(x151)))
- var x154 uint64
- var x155 uint64
- x154, x155 = bits.Add64(x130, x145, uint64(fiatScalarUint1(x153)))
- var x156 uint64
- var x157 uint64
- x156, x157 = bits.Add64(x132, x147, uint64(fiatScalarUint1(x155)))
- var x158 uint64
- var x159 uint64
- x158, x159 = bits.Add64(x134, x149, uint64(fiatScalarUint1(x157)))
- var x160 uint64
- _, x160 = bits.Mul64(x150, 0xd2b51da312547e1b)
- var x162 uint64
- var x163 uint64
- x163, x162 = bits.Mul64(x160, 0x1000000000000000)
- var x164 uint64
- var x165 uint64
- x165, x164 = bits.Mul64(x160, 0x14def9dea2f79cd6)
- var x166 uint64
- var x167 uint64
- x167, x166 = bits.Mul64(x160, 0x5812631a5cf5d3ed)
- var x168 uint64
- var x169 uint64
- x168, x169 = bits.Add64(x167, x164, uint64(0x0))
- x170 := (uint64(fiatScalarUint1(x169)) + x165)
- var x172 uint64
- _, x172 = bits.Add64(x150, x166, uint64(0x0))
- var x173 uint64
- var x174 uint64
- x173, x174 = bits.Add64(x152, x168, uint64(fiatScalarUint1(x172)))
- var x175 uint64
- var x176 uint64
- x175, x176 = bits.Add64(x154, x170, uint64(fiatScalarUint1(x174)))
- var x177 uint64
- var x178 uint64
- x177, x178 = bits.Add64(x156, x162, uint64(fiatScalarUint1(x176)))
- var x179 uint64
- var x180 uint64
- x179, x180 = bits.Add64(x158, x163, uint64(fiatScalarUint1(x178)))
- x181 := (uint64(fiatScalarUint1(x180)) + uint64(fiatScalarUint1(x159)))
- var x182 uint64
- var x183 uint64
- x182, x183 = bits.Sub64(x173, 0x5812631a5cf5d3ed, uint64(0x0))
- var x184 uint64
- var x185 uint64
- x184, x185 = bits.Sub64(x175, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x183)))
- var x186 uint64
- var x187 uint64
- x186, x187 = bits.Sub64(x177, uint64(0x0), uint64(fiatScalarUint1(x185)))
- var x188 uint64
- var x189 uint64
- x188, x189 = bits.Sub64(x179, 0x1000000000000000, uint64(fiatScalarUint1(x187)))
- var x191 uint64
- _, x191 = bits.Sub64(x181, uint64(0x0), uint64(fiatScalarUint1(x189)))
- var x192 uint64
- fiatScalarCmovznzU64(&x192, fiatScalarUint1(x191), x182, x173)
- var x193 uint64
- fiatScalarCmovznzU64(&x193, fiatScalarUint1(x191), x184, x175)
- var x194 uint64
- fiatScalarCmovznzU64(&x194, fiatScalarUint1(x191), x186, x177)
- var x195 uint64
- fiatScalarCmovznzU64(&x195, fiatScalarUint1(x191), x188, x179)
- out1[0] = x192
- out1[1] = x193
- out1[2] = x194
- out1[3] = x195
-}
-
-// fiatScalarAdd adds two field elements in the Montgomery domain.
-//
-// Preconditions:
-//
-// 0 ≤ eval arg1 < m
-// 0 ≤ eval arg2 < m
-//
-// Postconditions:
-//
-// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) + eval (from_montgomery arg2)) mod m
-// 0 ≤ eval out1 < m
-func fiatScalarAdd(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
- var x1 uint64
- var x2 uint64
- x1, x2 = bits.Add64(arg1[0], arg2[0], uint64(0x0))
- var x3 uint64
- var x4 uint64
- x3, x4 = bits.Add64(arg1[1], arg2[1], uint64(fiatScalarUint1(x2)))
- var x5 uint64
- var x6 uint64
- x5, x6 = bits.Add64(arg1[2], arg2[2], uint64(fiatScalarUint1(x4)))
- var x7 uint64
- var x8 uint64
- x7, x8 = bits.Add64(arg1[3], arg2[3], uint64(fiatScalarUint1(x6)))
- var x9 uint64
- var x10 uint64
- x9, x10 = bits.Sub64(x1, 0x5812631a5cf5d3ed, uint64(0x0))
- var x11 uint64
- var x12 uint64
- x11, x12 = bits.Sub64(x3, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x10)))
- var x13 uint64
- var x14 uint64
- x13, x14 = bits.Sub64(x5, uint64(0x0), uint64(fiatScalarUint1(x12)))
- var x15 uint64
- var x16 uint64
- x15, x16 = bits.Sub64(x7, 0x1000000000000000, uint64(fiatScalarUint1(x14)))
- var x18 uint64
- _, x18 = bits.Sub64(uint64(fiatScalarUint1(x8)), uint64(0x0), uint64(fiatScalarUint1(x16)))
- var x19 uint64
- fiatScalarCmovznzU64(&x19, fiatScalarUint1(x18), x9, x1)
- var x20 uint64
- fiatScalarCmovznzU64(&x20, fiatScalarUint1(x18), x11, x3)
- var x21 uint64
- fiatScalarCmovznzU64(&x21, fiatScalarUint1(x18), x13, x5)
- var x22 uint64
- fiatScalarCmovznzU64(&x22, fiatScalarUint1(x18), x15, x7)
- out1[0] = x19
- out1[1] = x20
- out1[2] = x21
- out1[3] = x22
-}
-
-// fiatScalarSub subtracts two field elements in the Montgomery domain.
-//
-// Preconditions:
-//
-// 0 ≤ eval arg1 < m
-// 0 ≤ eval arg2 < m
-//
-// Postconditions:
-//
-// eval (from_montgomery out1) mod m = (eval (from_montgomery arg1) - eval (from_montgomery arg2)) mod m
-// 0 ≤ eval out1 < m
-func fiatScalarSub(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement, arg2 *fiatScalarMontgomeryDomainFieldElement) {
- var x1 uint64
- var x2 uint64
- x1, x2 = bits.Sub64(arg1[0], arg2[0], uint64(0x0))
- var x3 uint64
- var x4 uint64
- x3, x4 = bits.Sub64(arg1[1], arg2[1], uint64(fiatScalarUint1(x2)))
- var x5 uint64
- var x6 uint64
- x5, x6 = bits.Sub64(arg1[2], arg2[2], uint64(fiatScalarUint1(x4)))
- var x7 uint64
- var x8 uint64
- x7, x8 = bits.Sub64(arg1[3], arg2[3], uint64(fiatScalarUint1(x6)))
- var x9 uint64
- fiatScalarCmovznzU64(&x9, fiatScalarUint1(x8), uint64(0x0), 0xffffffffffffffff)
- var x10 uint64
- var x11 uint64
- x10, x11 = bits.Add64(x1, (x9 & 0x5812631a5cf5d3ed), uint64(0x0))
- var x12 uint64
- var x13 uint64
- x12, x13 = bits.Add64(x3, (x9 & 0x14def9dea2f79cd6), uint64(fiatScalarUint1(x11)))
- var x14 uint64
- var x15 uint64
- x14, x15 = bits.Add64(x5, uint64(0x0), uint64(fiatScalarUint1(x13)))
- var x16 uint64
- x16, _ = bits.Add64(x7, (x9 & 0x1000000000000000), uint64(fiatScalarUint1(x15)))
- out1[0] = x10
- out1[1] = x12
- out1[2] = x14
- out1[3] = x16
-}
-
-// fiatScalarOpp negates a field element in the Montgomery domain.
-//
-// Preconditions:
-//
-// 0 ≤ eval arg1 < m
-//
-// Postconditions:
-//
-// eval (from_montgomery out1) mod m = -eval (from_montgomery arg1) mod m
-// 0 ≤ eval out1 < m
-func fiatScalarOpp(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement) {
- var x1 uint64
- var x2 uint64
- x1, x2 = bits.Sub64(uint64(0x0), arg1[0], uint64(0x0))
- var x3 uint64
- var x4 uint64
- x3, x4 = bits.Sub64(uint64(0x0), arg1[1], uint64(fiatScalarUint1(x2)))
- var x5 uint64
- var x6 uint64
- x5, x6 = bits.Sub64(uint64(0x0), arg1[2], uint64(fiatScalarUint1(x4)))
- var x7 uint64
- var x8 uint64
- x7, x8 = bits.Sub64(uint64(0x0), arg1[3], uint64(fiatScalarUint1(x6)))
- var x9 uint64
- fiatScalarCmovznzU64(&x9, fiatScalarUint1(x8), uint64(0x0), 0xffffffffffffffff)
- var x10 uint64
- var x11 uint64
- x10, x11 = bits.Add64(x1, (x9 & 0x5812631a5cf5d3ed), uint64(0x0))
- var x12 uint64
- var x13 uint64
- x12, x13 = bits.Add64(x3, (x9 & 0x14def9dea2f79cd6), uint64(fiatScalarUint1(x11)))
- var x14 uint64
- var x15 uint64
- x14, x15 = bits.Add64(x5, uint64(0x0), uint64(fiatScalarUint1(x13)))
- var x16 uint64
- x16, _ = bits.Add64(x7, (x9 & 0x1000000000000000), uint64(fiatScalarUint1(x15)))
- out1[0] = x10
- out1[1] = x12
- out1[2] = x14
- out1[3] = x16
-}
-
-// fiatScalarNonzero outputs a single non-zero word if the input is non-zero and zero otherwise.
-//
-// Preconditions:
-//
-// 0 ≤ eval arg1 < m
-//
-// Postconditions:
-//
-// out1 = 0 ↔ eval (from_montgomery arg1) mod m = 0
-//
-// Input Bounds:
-//
-// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff]]
-//
-// Output Bounds:
-//
-// out1: [0x0 ~> 0xffffffffffffffff]
-func fiatScalarNonzero(out1 *uint64, arg1 *[4]uint64) {
- x1 := (arg1[0] | (arg1[1] | (arg1[2] | arg1[3])))
- *out1 = x1
-}
-
-// fiatScalarFromMontgomery translates a field element out of the Montgomery domain.
-//
-// Preconditions:
-//
-// 0 ≤ eval arg1 < m
-//
-// Postconditions:
-//
-// eval out1 mod m = (eval arg1 * ((2^64)⁻¹ mod m)^4) mod m
-// 0 ≤ eval out1 < m
-func fiatScalarFromMontgomery(out1 *fiatScalarNonMontgomeryDomainFieldElement, arg1 *fiatScalarMontgomeryDomainFieldElement) {
- x1 := arg1[0]
- var x2 uint64
- _, x2 = bits.Mul64(x1, 0xd2b51da312547e1b)
- var x4 uint64
- var x5 uint64
- x5, x4 = bits.Mul64(x2, 0x1000000000000000)
- var x6 uint64
- var x7 uint64
- x7, x6 = bits.Mul64(x2, 0x14def9dea2f79cd6)
- var x8 uint64
- var x9 uint64
- x9, x8 = bits.Mul64(x2, 0x5812631a5cf5d3ed)
- var x10 uint64
- var x11 uint64
- x10, x11 = bits.Add64(x9, x6, uint64(0x0))
- var x13 uint64
- _, x13 = bits.Add64(x1, x8, uint64(0x0))
- var x14 uint64
- var x15 uint64
- x14, x15 = bits.Add64(uint64(0x0), x10, uint64(fiatScalarUint1(x13)))
- var x16 uint64
- var x17 uint64
- x16, x17 = bits.Add64(x14, arg1[1], uint64(0x0))
- var x18 uint64
- _, x18 = bits.Mul64(x16, 0xd2b51da312547e1b)
- var x20 uint64
- var x21 uint64
- x21, x20 = bits.Mul64(x18, 0x1000000000000000)
- var x22 uint64
- var x23 uint64
- x23, x22 = bits.Mul64(x18, 0x14def9dea2f79cd6)
- var x24 uint64
- var x25 uint64
- x25, x24 = bits.Mul64(x18, 0x5812631a5cf5d3ed)
- var x26 uint64
- var x27 uint64
- x26, x27 = bits.Add64(x25, x22, uint64(0x0))
- var x29 uint64
- _, x29 = bits.Add64(x16, x24, uint64(0x0))
- var x30 uint64
- var x31 uint64
- x30, x31 = bits.Add64((uint64(fiatScalarUint1(x17)) + (uint64(fiatScalarUint1(x15)) + (uint64(fiatScalarUint1(x11)) + x7))), x26, uint64(fiatScalarUint1(x29)))
- var x32 uint64
- var x33 uint64
- x32, x33 = bits.Add64(x4, (uint64(fiatScalarUint1(x27)) + x23), uint64(fiatScalarUint1(x31)))
- var x34 uint64
- var x35 uint64
- x34, x35 = bits.Add64(x5, x20, uint64(fiatScalarUint1(x33)))
- var x36 uint64
- var x37 uint64
- x36, x37 = bits.Add64(x30, arg1[2], uint64(0x0))
- var x38 uint64
- var x39 uint64
- x38, x39 = bits.Add64(x32, uint64(0x0), uint64(fiatScalarUint1(x37)))
- var x40 uint64
- var x41 uint64
- x40, x41 = bits.Add64(x34, uint64(0x0), uint64(fiatScalarUint1(x39)))
- var x42 uint64
- _, x42 = bits.Mul64(x36, 0xd2b51da312547e1b)
- var x44 uint64
- var x45 uint64
- x45, x44 = bits.Mul64(x42, 0x1000000000000000)
- var x46 uint64
- var x47 uint64
- x47, x46 = bits.Mul64(x42, 0x14def9dea2f79cd6)
- var x48 uint64
- var x49 uint64
- x49, x48 = bits.Mul64(x42, 0x5812631a5cf5d3ed)
- var x50 uint64
- var x51 uint64
- x50, x51 = bits.Add64(x49, x46, uint64(0x0))
- var x53 uint64
- _, x53 = bits.Add64(x36, x48, uint64(0x0))
- var x54 uint64
- var x55 uint64
- x54, x55 = bits.Add64(x38, x50, uint64(fiatScalarUint1(x53)))
- var x56 uint64
- var x57 uint64
- x56, x57 = bits.Add64(x40, (uint64(fiatScalarUint1(x51)) + x47), uint64(fiatScalarUint1(x55)))
- var x58 uint64
- var x59 uint64
- x58, x59 = bits.Add64((uint64(fiatScalarUint1(x41)) + (uint64(fiatScalarUint1(x35)) + x21)), x44, uint64(fiatScalarUint1(x57)))
- var x60 uint64
- var x61 uint64
- x60, x61 = bits.Add64(x54, arg1[3], uint64(0x0))
- var x62 uint64
- var x63 uint64
- x62, x63 = bits.Add64(x56, uint64(0x0), uint64(fiatScalarUint1(x61)))
- var x64 uint64
- var x65 uint64
- x64, x65 = bits.Add64(x58, uint64(0x0), uint64(fiatScalarUint1(x63)))
- var x66 uint64
- _, x66 = bits.Mul64(x60, 0xd2b51da312547e1b)
- var x68 uint64
- var x69 uint64
- x69, x68 = bits.Mul64(x66, 0x1000000000000000)
- var x70 uint64
- var x71 uint64
- x71, x70 = bits.Mul64(x66, 0x14def9dea2f79cd6)
- var x72 uint64
- var x73 uint64
- x73, x72 = bits.Mul64(x66, 0x5812631a5cf5d3ed)
- var x74 uint64
- var x75 uint64
- x74, x75 = bits.Add64(x73, x70, uint64(0x0))
- var x77 uint64
- _, x77 = bits.Add64(x60, x72, uint64(0x0))
- var x78 uint64
- var x79 uint64
- x78, x79 = bits.Add64(x62, x74, uint64(fiatScalarUint1(x77)))
- var x80 uint64
- var x81 uint64
- x80, x81 = bits.Add64(x64, (uint64(fiatScalarUint1(x75)) + x71), uint64(fiatScalarUint1(x79)))
- var x82 uint64
- var x83 uint64
- x82, x83 = bits.Add64((uint64(fiatScalarUint1(x65)) + (uint64(fiatScalarUint1(x59)) + x45)), x68, uint64(fiatScalarUint1(x81)))
- x84 := (uint64(fiatScalarUint1(x83)) + x69)
- var x85 uint64
- var x86 uint64
- x85, x86 = bits.Sub64(x78, 0x5812631a5cf5d3ed, uint64(0x0))
- var x87 uint64
- var x88 uint64
- x87, x88 = bits.Sub64(x80, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x86)))
- var x89 uint64
- var x90 uint64
- x89, x90 = bits.Sub64(x82, uint64(0x0), uint64(fiatScalarUint1(x88)))
- var x91 uint64
- var x92 uint64
- x91, x92 = bits.Sub64(x84, 0x1000000000000000, uint64(fiatScalarUint1(x90)))
- var x94 uint64
- _, x94 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(fiatScalarUint1(x92)))
- var x95 uint64
- fiatScalarCmovznzU64(&x95, fiatScalarUint1(x94), x85, x78)
- var x96 uint64
- fiatScalarCmovznzU64(&x96, fiatScalarUint1(x94), x87, x80)
- var x97 uint64
- fiatScalarCmovznzU64(&x97, fiatScalarUint1(x94), x89, x82)
- var x98 uint64
- fiatScalarCmovznzU64(&x98, fiatScalarUint1(x94), x91, x84)
- out1[0] = x95
- out1[1] = x96
- out1[2] = x97
- out1[3] = x98
-}
-
-// fiatScalarToMontgomery translates a field element into the Montgomery domain.
-//
-// Preconditions:
-//
-// 0 ≤ eval arg1 < m
-//
-// Postconditions:
-//
-// eval (from_montgomery out1) mod m = eval arg1 mod m
-// 0 ≤ eval out1 < m
-func fiatScalarToMontgomery(out1 *fiatScalarMontgomeryDomainFieldElement, arg1 *fiatScalarNonMontgomeryDomainFieldElement) {
- x1 := arg1[1]
- x2 := arg1[2]
- x3 := arg1[3]
- x4 := arg1[0]
- var x5 uint64
- var x6 uint64
- x6, x5 = bits.Mul64(x4, 0x399411b7c309a3d)
- var x7 uint64
- var x8 uint64
- x8, x7 = bits.Mul64(x4, 0xceec73d217f5be65)
- var x9 uint64
- var x10 uint64
- x10, x9 = bits.Mul64(x4, 0xd00e1ba768859347)
- var x11 uint64
- var x12 uint64
- x12, x11 = bits.Mul64(x4, 0xa40611e3449c0f01)
- var x13 uint64
- var x14 uint64
- x13, x14 = bits.Add64(x12, x9, uint64(0x0))
- var x15 uint64
- var x16 uint64
- x15, x16 = bits.Add64(x10, x7, uint64(fiatScalarUint1(x14)))
- var x17 uint64
- var x18 uint64
- x17, x18 = bits.Add64(x8, x5, uint64(fiatScalarUint1(x16)))
- var x19 uint64
- _, x19 = bits.Mul64(x11, 0xd2b51da312547e1b)
- var x21 uint64
- var x22 uint64
- x22, x21 = bits.Mul64(x19, 0x1000000000000000)
- var x23 uint64
- var x24 uint64
- x24, x23 = bits.Mul64(x19, 0x14def9dea2f79cd6)
- var x25 uint64
- var x26 uint64
- x26, x25 = bits.Mul64(x19, 0x5812631a5cf5d3ed)
- var x27 uint64
- var x28 uint64
- x27, x28 = bits.Add64(x26, x23, uint64(0x0))
- var x30 uint64
- _, x30 = bits.Add64(x11, x25, uint64(0x0))
- var x31 uint64
- var x32 uint64
- x31, x32 = bits.Add64(x13, x27, uint64(fiatScalarUint1(x30)))
- var x33 uint64
- var x34 uint64
- x33, x34 = bits.Add64(x15, (uint64(fiatScalarUint1(x28)) + x24), uint64(fiatScalarUint1(x32)))
- var x35 uint64
- var x36 uint64
- x35, x36 = bits.Add64(x17, x21, uint64(fiatScalarUint1(x34)))
- var x37 uint64
- var x38 uint64
- x38, x37 = bits.Mul64(x1, 0x399411b7c309a3d)
- var x39 uint64
- var x40 uint64
- x40, x39 = bits.Mul64(x1, 0xceec73d217f5be65)
- var x41 uint64
- var x42 uint64
- x42, x41 = bits.Mul64(x1, 0xd00e1ba768859347)
- var x43 uint64
- var x44 uint64
- x44, x43 = bits.Mul64(x1, 0xa40611e3449c0f01)
- var x45 uint64
- var x46 uint64
- x45, x46 = bits.Add64(x44, x41, uint64(0x0))
- var x47 uint64
- var x48 uint64
- x47, x48 = bits.Add64(x42, x39, uint64(fiatScalarUint1(x46)))
- var x49 uint64
- var x50 uint64
- x49, x50 = bits.Add64(x40, x37, uint64(fiatScalarUint1(x48)))
- var x51 uint64
- var x52 uint64
- x51, x52 = bits.Add64(x31, x43, uint64(0x0))
- var x53 uint64
- var x54 uint64
- x53, x54 = bits.Add64(x33, x45, uint64(fiatScalarUint1(x52)))
- var x55 uint64
- var x56 uint64
- x55, x56 = bits.Add64(x35, x47, uint64(fiatScalarUint1(x54)))
- var x57 uint64
- var x58 uint64
- x57, x58 = bits.Add64(((uint64(fiatScalarUint1(x36)) + (uint64(fiatScalarUint1(x18)) + x6)) + x22), x49, uint64(fiatScalarUint1(x56)))
- var x59 uint64
- _, x59 = bits.Mul64(x51, 0xd2b51da312547e1b)
- var x61 uint64
- var x62 uint64
- x62, x61 = bits.Mul64(x59, 0x1000000000000000)
- var x63 uint64
- var x64 uint64
- x64, x63 = bits.Mul64(x59, 0x14def9dea2f79cd6)
- var x65 uint64
- var x66 uint64
- x66, x65 = bits.Mul64(x59, 0x5812631a5cf5d3ed)
- var x67 uint64
- var x68 uint64
- x67, x68 = bits.Add64(x66, x63, uint64(0x0))
- var x70 uint64
- _, x70 = bits.Add64(x51, x65, uint64(0x0))
- var x71 uint64
- var x72 uint64
- x71, x72 = bits.Add64(x53, x67, uint64(fiatScalarUint1(x70)))
- var x73 uint64
- var x74 uint64
- x73, x74 = bits.Add64(x55, (uint64(fiatScalarUint1(x68)) + x64), uint64(fiatScalarUint1(x72)))
- var x75 uint64
- var x76 uint64
- x75, x76 = bits.Add64(x57, x61, uint64(fiatScalarUint1(x74)))
- var x77 uint64
- var x78 uint64
- x78, x77 = bits.Mul64(x2, 0x399411b7c309a3d)
- var x79 uint64
- var x80 uint64
- x80, x79 = bits.Mul64(x2, 0xceec73d217f5be65)
- var x81 uint64
- var x82 uint64
- x82, x81 = bits.Mul64(x2, 0xd00e1ba768859347)
- var x83 uint64
- var x84 uint64
- x84, x83 = bits.Mul64(x2, 0xa40611e3449c0f01)
- var x85 uint64
- var x86 uint64
- x85, x86 = bits.Add64(x84, x81, uint64(0x0))
- var x87 uint64
- var x88 uint64
- x87, x88 = bits.Add64(x82, x79, uint64(fiatScalarUint1(x86)))
- var x89 uint64
- var x90 uint64
- x89, x90 = bits.Add64(x80, x77, uint64(fiatScalarUint1(x88)))
- var x91 uint64
- var x92 uint64
- x91, x92 = bits.Add64(x71, x83, uint64(0x0))
- var x93 uint64
- var x94 uint64
- x93, x94 = bits.Add64(x73, x85, uint64(fiatScalarUint1(x92)))
- var x95 uint64
- var x96 uint64
- x95, x96 = bits.Add64(x75, x87, uint64(fiatScalarUint1(x94)))
- var x97 uint64
- var x98 uint64
- x97, x98 = bits.Add64(((uint64(fiatScalarUint1(x76)) + (uint64(fiatScalarUint1(x58)) + (uint64(fiatScalarUint1(x50)) + x38))) + x62), x89, uint64(fiatScalarUint1(x96)))
- var x99 uint64
- _, x99 = bits.Mul64(x91, 0xd2b51da312547e1b)
- var x101 uint64
- var x102 uint64
- x102, x101 = bits.Mul64(x99, 0x1000000000000000)
- var x103 uint64
- var x104 uint64
- x104, x103 = bits.Mul64(x99, 0x14def9dea2f79cd6)
- var x105 uint64
- var x106 uint64
- x106, x105 = bits.Mul64(x99, 0x5812631a5cf5d3ed)
- var x107 uint64
- var x108 uint64
- x107, x108 = bits.Add64(x106, x103, uint64(0x0))
- var x110 uint64
- _, x110 = bits.Add64(x91, x105, uint64(0x0))
- var x111 uint64
- var x112 uint64
- x111, x112 = bits.Add64(x93, x107, uint64(fiatScalarUint1(x110)))
- var x113 uint64
- var x114 uint64
- x113, x114 = bits.Add64(x95, (uint64(fiatScalarUint1(x108)) + x104), uint64(fiatScalarUint1(x112)))
- var x115 uint64
- var x116 uint64
- x115, x116 = bits.Add64(x97, x101, uint64(fiatScalarUint1(x114)))
- var x117 uint64
- var x118 uint64
- x118, x117 = bits.Mul64(x3, 0x399411b7c309a3d)
- var x119 uint64
- var x120 uint64
- x120, x119 = bits.Mul64(x3, 0xceec73d217f5be65)
- var x121 uint64
- var x122 uint64
- x122, x121 = bits.Mul64(x3, 0xd00e1ba768859347)
- var x123 uint64
- var x124 uint64
- x124, x123 = bits.Mul64(x3, 0xa40611e3449c0f01)
- var x125 uint64
- var x126 uint64
- x125, x126 = bits.Add64(x124, x121, uint64(0x0))
- var x127 uint64
- var x128 uint64
- x127, x128 = bits.Add64(x122, x119, uint64(fiatScalarUint1(x126)))
- var x129 uint64
- var x130 uint64
- x129, x130 = bits.Add64(x120, x117, uint64(fiatScalarUint1(x128)))
- var x131 uint64
- var x132 uint64
- x131, x132 = bits.Add64(x111, x123, uint64(0x0))
- var x133 uint64
- var x134 uint64
- x133, x134 = bits.Add64(x113, x125, uint64(fiatScalarUint1(x132)))
- var x135 uint64
- var x136 uint64
- x135, x136 = bits.Add64(x115, x127, uint64(fiatScalarUint1(x134)))
- var x137 uint64
- var x138 uint64
- x137, x138 = bits.Add64(((uint64(fiatScalarUint1(x116)) + (uint64(fiatScalarUint1(x98)) + (uint64(fiatScalarUint1(x90)) + x78))) + x102), x129, uint64(fiatScalarUint1(x136)))
- var x139 uint64
- _, x139 = bits.Mul64(x131, 0xd2b51da312547e1b)
- var x141 uint64
- var x142 uint64
- x142, x141 = bits.Mul64(x139, 0x1000000000000000)
- var x143 uint64
- var x144 uint64
- x144, x143 = bits.Mul64(x139, 0x14def9dea2f79cd6)
- var x145 uint64
- var x146 uint64
- x146, x145 = bits.Mul64(x139, 0x5812631a5cf5d3ed)
- var x147 uint64
- var x148 uint64
- x147, x148 = bits.Add64(x146, x143, uint64(0x0))
- var x150 uint64
- _, x150 = bits.Add64(x131, x145, uint64(0x0))
- var x151 uint64
- var x152 uint64
- x151, x152 = bits.Add64(x133, x147, uint64(fiatScalarUint1(x150)))
- var x153 uint64
- var x154 uint64
- x153, x154 = bits.Add64(x135, (uint64(fiatScalarUint1(x148)) + x144), uint64(fiatScalarUint1(x152)))
- var x155 uint64
- var x156 uint64
- x155, x156 = bits.Add64(x137, x141, uint64(fiatScalarUint1(x154)))
- x157 := ((uint64(fiatScalarUint1(x156)) + (uint64(fiatScalarUint1(x138)) + (uint64(fiatScalarUint1(x130)) + x118))) + x142)
- var x158 uint64
- var x159 uint64
- x158, x159 = bits.Sub64(x151, 0x5812631a5cf5d3ed, uint64(0x0))
- var x160 uint64
- var x161 uint64
- x160, x161 = bits.Sub64(x153, 0x14def9dea2f79cd6, uint64(fiatScalarUint1(x159)))
- var x162 uint64
- var x163 uint64
- x162, x163 = bits.Sub64(x155, uint64(0x0), uint64(fiatScalarUint1(x161)))
- var x164 uint64
- var x165 uint64
- x164, x165 = bits.Sub64(x157, 0x1000000000000000, uint64(fiatScalarUint1(x163)))
- var x167 uint64
- _, x167 = bits.Sub64(uint64(0x0), uint64(0x0), uint64(fiatScalarUint1(x165)))
- var x168 uint64
- fiatScalarCmovznzU64(&x168, fiatScalarUint1(x167), x158, x151)
- var x169 uint64
- fiatScalarCmovznzU64(&x169, fiatScalarUint1(x167), x160, x153)
- var x170 uint64
- fiatScalarCmovznzU64(&x170, fiatScalarUint1(x167), x162, x155)
- var x171 uint64
- fiatScalarCmovznzU64(&x171, fiatScalarUint1(x167), x164, x157)
- out1[0] = x168
- out1[1] = x169
- out1[2] = x170
- out1[3] = x171
-}
-
-// fiatScalarToBytes serializes a field element NOT in the Montgomery domain to bytes in little-endian order.
-//
-// Preconditions:
-//
-// 0 ≤ eval arg1 < m
-//
-// Postconditions:
-//
-// out1 = map (λ x, ⌊((eval arg1 mod m) mod 2^(8 * (x + 1))) / 2^(8 * x)⌋) [0..31]
-//
-// Input Bounds:
-//
-// arg1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1fffffffffffffff]]
-//
-// Output Bounds:
-//
-// out1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1f]]
-func fiatScalarToBytes(out1 *[32]uint8, arg1 *[4]uint64) {
- x1 := arg1[3]
- x2 := arg1[2]
- x3 := arg1[1]
- x4 := arg1[0]
- x5 := (uint8(x4) & 0xff)
- x6 := (x4 >> 8)
- x7 := (uint8(x6) & 0xff)
- x8 := (x6 >> 8)
- x9 := (uint8(x8) & 0xff)
- x10 := (x8 >> 8)
- x11 := (uint8(x10) & 0xff)
- x12 := (x10 >> 8)
- x13 := (uint8(x12) & 0xff)
- x14 := (x12 >> 8)
- x15 := (uint8(x14) & 0xff)
- x16 := (x14 >> 8)
- x17 := (uint8(x16) & 0xff)
- x18 := uint8((x16 >> 8))
- x19 := (uint8(x3) & 0xff)
- x20 := (x3 >> 8)
- x21 := (uint8(x20) & 0xff)
- x22 := (x20 >> 8)
- x23 := (uint8(x22) & 0xff)
- x24 := (x22 >> 8)
- x25 := (uint8(x24) & 0xff)
- x26 := (x24 >> 8)
- x27 := (uint8(x26) & 0xff)
- x28 := (x26 >> 8)
- x29 := (uint8(x28) & 0xff)
- x30 := (x28 >> 8)
- x31 := (uint8(x30) & 0xff)
- x32 := uint8((x30 >> 8))
- x33 := (uint8(x2) & 0xff)
- x34 := (x2 >> 8)
- x35 := (uint8(x34) & 0xff)
- x36 := (x34 >> 8)
- x37 := (uint8(x36) & 0xff)
- x38 := (x36 >> 8)
- x39 := (uint8(x38) & 0xff)
- x40 := (x38 >> 8)
- x41 := (uint8(x40) & 0xff)
- x42 := (x40 >> 8)
- x43 := (uint8(x42) & 0xff)
- x44 := (x42 >> 8)
- x45 := (uint8(x44) & 0xff)
- x46 := uint8((x44 >> 8))
- x47 := (uint8(x1) & 0xff)
- x48 := (x1 >> 8)
- x49 := (uint8(x48) & 0xff)
- x50 := (x48 >> 8)
- x51 := (uint8(x50) & 0xff)
- x52 := (x50 >> 8)
- x53 := (uint8(x52) & 0xff)
- x54 := (x52 >> 8)
- x55 := (uint8(x54) & 0xff)
- x56 := (x54 >> 8)
- x57 := (uint8(x56) & 0xff)
- x58 := (x56 >> 8)
- x59 := (uint8(x58) & 0xff)
- x60 := uint8((x58 >> 8))
- out1[0] = x5
- out1[1] = x7
- out1[2] = x9
- out1[3] = x11
- out1[4] = x13
- out1[5] = x15
- out1[6] = x17
- out1[7] = x18
- out1[8] = x19
- out1[9] = x21
- out1[10] = x23
- out1[11] = x25
- out1[12] = x27
- out1[13] = x29
- out1[14] = x31
- out1[15] = x32
- out1[16] = x33
- out1[17] = x35
- out1[18] = x37
- out1[19] = x39
- out1[20] = x41
- out1[21] = x43
- out1[22] = x45
- out1[23] = x46
- out1[24] = x47
- out1[25] = x49
- out1[26] = x51
- out1[27] = x53
- out1[28] = x55
- out1[29] = x57
- out1[30] = x59
- out1[31] = x60
-}
-
-// fiatScalarFromBytes deserializes a field element NOT in the Montgomery domain from bytes in little-endian order.
-//
-// Preconditions:
-//
-// 0 ≤ bytes_eval arg1 < m
-//
-// Postconditions:
-//
-// eval out1 mod m = bytes_eval arg1 mod m
-// 0 ≤ eval out1 < m
-//
-// Input Bounds:
-//
-// arg1: [[0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0xff], [0x0 ~> 0x1f]]
-//
-// Output Bounds:
-//
-// out1: [[0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0xffffffffffffffff], [0x0 ~> 0x1fffffffffffffff]]
-func fiatScalarFromBytes(out1 *[4]uint64, arg1 *[32]uint8) {
- x1 := (uint64(arg1[31]) << 56)
- x2 := (uint64(arg1[30]) << 48)
- x3 := (uint64(arg1[29]) << 40)
- x4 := (uint64(arg1[28]) << 32)
- x5 := (uint64(arg1[27]) << 24)
- x6 := (uint64(arg1[26]) << 16)
- x7 := (uint64(arg1[25]) << 8)
- x8 := arg1[24]
- x9 := (uint64(arg1[23]) << 56)
- x10 := (uint64(arg1[22]) << 48)
- x11 := (uint64(arg1[21]) << 40)
- x12 := (uint64(arg1[20]) << 32)
- x13 := (uint64(arg1[19]) << 24)
- x14 := (uint64(arg1[18]) << 16)
- x15 := (uint64(arg1[17]) << 8)
- x16 := arg1[16]
- x17 := (uint64(arg1[15]) << 56)
- x18 := (uint64(arg1[14]) << 48)
- x19 := (uint64(arg1[13]) << 40)
- x20 := (uint64(arg1[12]) << 32)
- x21 := (uint64(arg1[11]) << 24)
- x22 := (uint64(arg1[10]) << 16)
- x23 := (uint64(arg1[9]) << 8)
- x24 := arg1[8]
- x25 := (uint64(arg1[7]) << 56)
- x26 := (uint64(arg1[6]) << 48)
- x27 := (uint64(arg1[5]) << 40)
- x28 := (uint64(arg1[4]) << 32)
- x29 := (uint64(arg1[3]) << 24)
- x30 := (uint64(arg1[2]) << 16)
- x31 := (uint64(arg1[1]) << 8)
- x32 := arg1[0]
- x33 := (x31 + uint64(x32))
- x34 := (x30 + x33)
- x35 := (x29 + x34)
- x36 := (x28 + x35)
- x37 := (x27 + x36)
- x38 := (x26 + x37)
- x39 := (x25 + x38)
- x40 := (x23 + uint64(x24))
- x41 := (x22 + x40)
- x42 := (x21 + x41)
- x43 := (x20 + x42)
- x44 := (x19 + x43)
- x45 := (x18 + x44)
- x46 := (x17 + x45)
- x47 := (x15 + uint64(x16))
- x48 := (x14 + x47)
- x49 := (x13 + x48)
- x50 := (x12 + x49)
- x51 := (x11 + x50)
- x52 := (x10 + x51)
- x53 := (x9 + x52)
- x54 := (x7 + uint64(x8))
- x55 := (x6 + x54)
- x56 := (x5 + x55)
- x57 := (x4 + x56)
- x58 := (x3 + x57)
- x59 := (x2 + x58)
- x60 := (x1 + x59)
- out1[0] = x39
- out1[1] = x46
- out1[2] = x53
- out1[3] = x60
-}
diff --git a/vendor/filippo.io/edwards25519/scalarmult.go b/vendor/filippo.io/edwards25519/scalarmult.go
deleted file mode 100644
index f7ca3cef..00000000
--- a/vendor/filippo.io/edwards25519/scalarmult.go
+++ /dev/null
@@ -1,214 +0,0 @@
-// Copyright (c) 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package edwards25519
-
-import "sync"
-
-// basepointTable is a set of 32 affineLookupTables, where table i is generated
-// from 256i * basepoint. It is precomputed the first time it's used.
-func basepointTable() *[32]affineLookupTable {
- basepointTablePrecomp.initOnce.Do(func() {
- p := NewGeneratorPoint()
- for i := 0; i < 32; i++ {
- basepointTablePrecomp.table[i].FromP3(p)
- for j := 0; j < 8; j++ {
- p.Add(p, p)
- }
- }
- })
- return &basepointTablePrecomp.table
-}
-
-var basepointTablePrecomp struct {
- table [32]affineLookupTable
- initOnce sync.Once
-}
-
-// ScalarBaseMult sets v = x * B, where B is the canonical generator, and
-// returns v.
-//
-// The scalar multiplication is done in constant time.
-func (v *Point) ScalarBaseMult(x *Scalar) *Point {
- basepointTable := basepointTable()
-
- // Write x = sum(x_i * 16^i) so x*B = sum( B*x_i*16^i )
- // as described in the Ed25519 paper
- //
- // Group even and odd coefficients
- // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B
- // + x_1*16^1*B + x_3*16^3*B + ... + x_63*16^63*B
- // x*B = x_0*16^0*B + x_2*16^2*B + ... + x_62*16^62*B
- // + 16*( x_1*16^0*B + x_3*16^2*B + ... + x_63*16^62*B)
- //
- // We use a lookup table for each i to get x_i*16^(2*i)*B
- // and do four doublings to multiply by 16.
- digits := x.signedRadix16()
-
- multiple := &affineCached{}
- tmp1 := &projP1xP1{}
- tmp2 := &projP2{}
-
- // Accumulate the odd components first
- v.Set(NewIdentityPoint())
- for i := 1; i < 64; i += 2 {
- basepointTable[i/2].SelectInto(multiple, digits[i])
- tmp1.AddAffine(v, multiple)
- v.fromP1xP1(tmp1)
- }
-
- // Multiply by 16
- tmp2.FromP3(v) // tmp2 = v in P2 coords
- tmp1.Double(tmp2) // tmp1 = 2*v in P1xP1 coords
- tmp2.FromP1xP1(tmp1) // tmp2 = 2*v in P2 coords
- tmp1.Double(tmp2) // tmp1 = 4*v in P1xP1 coords
- tmp2.FromP1xP1(tmp1) // tmp2 = 4*v in P2 coords
- tmp1.Double(tmp2) // tmp1 = 8*v in P1xP1 coords
- tmp2.FromP1xP1(tmp1) // tmp2 = 8*v in P2 coords
- tmp1.Double(tmp2) // tmp1 = 16*v in P1xP1 coords
- v.fromP1xP1(tmp1) // now v = 16*(odd components)
-
- // Accumulate the even components
- for i := 0; i < 64; i += 2 {
- basepointTable[i/2].SelectInto(multiple, digits[i])
- tmp1.AddAffine(v, multiple)
- v.fromP1xP1(tmp1)
- }
-
- return v
-}
-
-// ScalarMult sets v = x * q, and returns v.
-//
-// The scalar multiplication is done in constant time.
-func (v *Point) ScalarMult(x *Scalar, q *Point) *Point {
- checkInitialized(q)
-
- var table projLookupTable
- table.FromP3(q)
-
- // Write x = sum(x_i * 16^i)
- // so x*Q = sum( Q*x_i*16^i )
- // = Q*x_0 + 16*(Q*x_1 + 16*( ... + Q*x_63) ... )
- // <------compute inside out---------
- //
- // We use the lookup table to get the x_i*Q values
- // and do four doublings to compute 16*Q
- digits := x.signedRadix16()
-
- // Unwrap first loop iteration to save computing 16*identity
- multiple := &projCached{}
- tmp1 := &projP1xP1{}
- tmp2 := &projP2{}
- table.SelectInto(multiple, digits[63])
-
- v.Set(NewIdentityPoint())
- tmp1.Add(v, multiple) // tmp1 = x_63*Q in P1xP1 coords
- for i := 62; i >= 0; i-- {
- tmp2.FromP1xP1(tmp1) // tmp2 = (prev) in P2 coords
- tmp1.Double(tmp2) // tmp1 = 2*(prev) in P1xP1 coords
- tmp2.FromP1xP1(tmp1) // tmp2 = 2*(prev) in P2 coords
- tmp1.Double(tmp2) // tmp1 = 4*(prev) in P1xP1 coords
- tmp2.FromP1xP1(tmp1) // tmp2 = 4*(prev) in P2 coords
- tmp1.Double(tmp2) // tmp1 = 8*(prev) in P1xP1 coords
- tmp2.FromP1xP1(tmp1) // tmp2 = 8*(prev) in P2 coords
- tmp1.Double(tmp2) // tmp1 = 16*(prev) in P1xP1 coords
- v.fromP1xP1(tmp1) // v = 16*(prev) in P3 coords
- table.SelectInto(multiple, digits[i])
- tmp1.Add(v, multiple) // tmp1 = x_i*Q + 16*(prev) in P1xP1 coords
- }
- v.fromP1xP1(tmp1)
- return v
-}
-
-// basepointNafTable is the nafLookupTable8 for the basepoint.
-// It is precomputed the first time it's used.
-func basepointNafTable() *nafLookupTable8 {
- basepointNafTablePrecomp.initOnce.Do(func() {
- basepointNafTablePrecomp.table.FromP3(NewGeneratorPoint())
- })
- return &basepointNafTablePrecomp.table
-}
-
-var basepointNafTablePrecomp struct {
- table nafLookupTable8
- initOnce sync.Once
-}
-
-// VarTimeDoubleScalarBaseMult sets v = a * A + b * B, where B is the canonical
-// generator, and returns v.
-//
-// Execution time depends on the inputs.
-func (v *Point) VarTimeDoubleScalarBaseMult(a *Scalar, A *Point, b *Scalar) *Point {
- checkInitialized(A)
-
- // Similarly to the single variable-base approach, we compute
- // digits and use them with a lookup table. However, because
- // we are allowed to do variable-time operations, we don't
- // need constant-time lookups or constant-time digit
- // computations.
- //
- // So we use a non-adjacent form of some width w instead of
- // radix 16. This is like a binary representation (one digit
- // for each binary place) but we allow the digits to grow in
- // magnitude up to 2^{w-1} so that the nonzero digits are as
- // sparse as possible. Intuitively, this "condenses" the
- // "mass" of the scalar onto sparse coefficients (meaning
- // fewer additions).
-
- basepointNafTable := basepointNafTable()
- var aTable nafLookupTable5
- aTable.FromP3(A)
- // Because the basepoint is fixed, we can use a wider NAF
- // corresponding to a bigger table.
- aNaf := a.nonAdjacentForm(5)
- bNaf := b.nonAdjacentForm(8)
-
- // Find the first nonzero coefficient.
- i := 255
- for j := i; j >= 0; j-- {
- if aNaf[j] != 0 || bNaf[j] != 0 {
- break
- }
- }
-
- multA := &projCached{}
- multB := &affineCached{}
- tmp1 := &projP1xP1{}
- tmp2 := &projP2{}
- tmp2.Zero()
-
- // Move from high to low bits, doubling the accumulator
- // at each iteration and checking whether there is a nonzero
- // coefficient to look up a multiple of.
- for ; i >= 0; i-- {
- tmp1.Double(tmp2)
-
- // Only update v if we have a nonzero coeff to add in.
- if aNaf[i] > 0 {
- v.fromP1xP1(tmp1)
- aTable.SelectInto(multA, aNaf[i])
- tmp1.Add(v, multA)
- } else if aNaf[i] < 0 {
- v.fromP1xP1(tmp1)
- aTable.SelectInto(multA, -aNaf[i])
- tmp1.Sub(v, multA)
- }
-
- if bNaf[i] > 0 {
- v.fromP1xP1(tmp1)
- basepointNafTable.SelectInto(multB, bNaf[i])
- tmp1.AddAffine(v, multB)
- } else if bNaf[i] < 0 {
- v.fromP1xP1(tmp1)
- basepointNafTable.SelectInto(multB, -bNaf[i])
- tmp1.SubAffine(v, multB)
- }
-
- tmp2.FromP1xP1(tmp1)
- }
-
- v.fromP2(tmp2)
- return v
-}
diff --git a/vendor/filippo.io/edwards25519/tables.go b/vendor/filippo.io/edwards25519/tables.go
deleted file mode 100644
index 83234bbc..00000000
--- a/vendor/filippo.io/edwards25519/tables.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright (c) 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package edwards25519
-
-import (
- "crypto/subtle"
-)
-
-// A dynamic lookup table for variable-base, constant-time scalar muls.
-type projLookupTable struct {
- points [8]projCached
-}
-
-// A precomputed lookup table for fixed-base, constant-time scalar muls.
-type affineLookupTable struct {
- points [8]affineCached
-}
-
-// A dynamic lookup table for variable-base, variable-time scalar muls.
-type nafLookupTable5 struct {
- points [8]projCached
-}
-
-// A precomputed lookup table for fixed-base, variable-time scalar muls.
-type nafLookupTable8 struct {
- points [64]affineCached
-}
-
-// Constructors.
-
-// Builds a lookup table at runtime. Fast.
-func (v *projLookupTable) FromP3(q *Point) {
- // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q
- // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q
- v.points[0].FromP3(q)
- tmpP3 := Point{}
- tmpP1xP1 := projP1xP1{}
- for i := 0; i < 7; i++ {
- // Compute (i+1)*Q as Q + i*Q and convert to a projCached
- // This is needlessly complicated because the API has explicit
- // receivers instead of creating stack objects and relying on RVO
- v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(q, &v.points[i])))
- }
-}
-
-// This is not optimised for speed; fixed-base tables should be precomputed.
-func (v *affineLookupTable) FromP3(q *Point) {
- // Goal: v.points[i] = (i+1)*Q, i.e., Q, 2Q, ..., 8Q
- // This allows lookup of -8Q, ..., -Q, 0, Q, ..., 8Q
- v.points[0].FromP3(q)
- tmpP3 := Point{}
- tmpP1xP1 := projP1xP1{}
- for i := 0; i < 7; i++ {
- // Compute (i+1)*Q as Q + i*Q and convert to affineCached
- v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(q, &v.points[i])))
- }
-}
-
-// Builds a lookup table at runtime. Fast.
-func (v *nafLookupTable5) FromP3(q *Point) {
- // Goal: v.points[i] = (2*i+1)*Q, i.e., Q, 3Q, 5Q, ..., 15Q
- // This allows lookup of -15Q, ..., -3Q, -Q, 0, Q, 3Q, ..., 15Q
- v.points[0].FromP3(q)
- q2 := Point{}
- q2.Add(q, q)
- tmpP3 := Point{}
- tmpP1xP1 := projP1xP1{}
- for i := 0; i < 7; i++ {
- v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.Add(&q2, &v.points[i])))
- }
-}
-
-// This is not optimised for speed; fixed-base tables should be precomputed.
-func (v *nafLookupTable8) FromP3(q *Point) {
- v.points[0].FromP3(q)
- q2 := Point{}
- q2.Add(q, q)
- tmpP3 := Point{}
- tmpP1xP1 := projP1xP1{}
- for i := 0; i < 63; i++ {
- v.points[i+1].FromP3(tmpP3.fromP1xP1(tmpP1xP1.AddAffine(&q2, &v.points[i])))
- }
-}
-
-// Selectors.
-
-// Set dest to x*Q, where -8 <= x <= 8, in constant time.
-func (v *projLookupTable) SelectInto(dest *projCached, x int8) {
- // Compute xabs = |x|
- xmask := x >> 7
- xabs := uint8((x + xmask) ^ xmask)
-
- dest.Zero()
- for j := 1; j <= 8; j++ {
- // Set dest = j*Q if |x| = j
- cond := subtle.ConstantTimeByteEq(xabs, uint8(j))
- dest.Select(&v.points[j-1], dest, cond)
- }
- // Now dest = |x|*Q, conditionally negate to get x*Q
- dest.CondNeg(int(xmask & 1))
-}
-
-// Set dest to x*Q, where -8 <= x <= 8, in constant time.
-func (v *affineLookupTable) SelectInto(dest *affineCached, x int8) {
- // Compute xabs = |x|
- xmask := x >> 7
- xabs := uint8((x + xmask) ^ xmask)
-
- dest.Zero()
- for j := 1; j <= 8; j++ {
- // Set dest = j*Q if |x| = j
- cond := subtle.ConstantTimeByteEq(xabs, uint8(j))
- dest.Select(&v.points[j-1], dest, cond)
- }
- // Now dest = |x|*Q, conditionally negate to get x*Q
- dest.CondNeg(int(xmask & 1))
-}
-
-// Given odd x with 0 < x < 2^4, return x*Q (in variable time).
-func (v *nafLookupTable5) SelectInto(dest *projCached, x int8) {
- *dest = v.points[x/2]
-}
-
-// Given odd x with 0 < x < 2^7, return x*Q (in variable time).
-func (v *nafLookupTable8) SelectInto(dest *affineCached, x int8) {
- *dest = v.points[x/2]
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
deleted file mode 100644
index af095f1d..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
+++ /dev/null
@@ -1,801 +0,0 @@
-# Release History
-
-## 1.12.0 (2024-06-06)
-
-### Features Added
-
-* Added field `StatusCodes` to `runtime.FetcherForNextLinkOptions` allowing for additional HTTP status codes indicating success.
-* Added func `NewUUID` to the `runtime` package for generating UUIDs.
-
-### Bugs Fixed
-
-* Fixed an issue that prevented pollers using the `Operation-Location` strategy from unmarshaling the final result in some cases.
-
-### Other Changes
-
-* Updated dependencies.
-
-## 1.11.1 (2024-04-02)
-
-### Bugs Fixed
-
-* Pollers that use the `Location` header won't consider `http.StatusRequestTimeout` a terminal failure.
-* `runtime.Poller[T].Result` won't consider non-terminal error responses as terminal.
-
-## 1.11.0 (2024-04-01)
-
-### Features Added
-
-* Added `StatusCodes` to `arm/policy.RegistrationOptions` to allow supporting non-standard HTTP status codes during registration.
-* Added field `InsecureAllowCredentialWithHTTP` to `azcore.ClientOptions` and dependent authentication pipeline policies.
-* Added type `MultipartContent` to the `streaming` package to support multipart/form payloads with custom Content-Type and file name.
-
-### Bugs Fixed
-
-* `runtime.SetMultipartFormData` won't try to stringify `[]byte` values.
-* Pollers that use the `Location` header won't consider `http.StatusTooManyRequests` a terminal failure.
-
-### Other Changes
-
-* Update dependencies.
-
-## 1.10.0 (2024-02-29)
-
-### Features Added
-
-* Added logging event `log.EventResponseError` that will contain the contents of `ResponseError.Error()` whenever an `azcore.ResponseError` is created.
-* Added `runtime.NewResponseErrorWithErrorCode` for creating an `azcore.ResponseError` with a caller-supplied error code.
-* Added type `MatchConditions` for use in conditional requests.
-
-### Bugs Fixed
-
-* Fixed a potential race condition between `NullValue` and `IsNullValue`.
-* `runtime.EncodeQueryParams` will escape semicolons before calling `url.ParseQuery`.
-
-### Other Changes
-
-* Update dependencies.
-
-## 1.9.2 (2024-02-06)
-
-### Bugs Fixed
-
-* `runtime.MarshalAsByteArray` and `runtime.MarshalAsJSON` will preserve the preexisting value of the `Content-Type` header.
-
-### Other Changes
-
-* Update to latest version of `internal`.
-
-## 1.9.1 (2023-12-11)
-
-### Bugs Fixed
-
-* The `retry-after-ms` and `x-ms-retry-after-ms` headers weren't being checked during retries.
-
-### Other Changes
-
-* Update dependencies.
-
-## 1.9.0 (2023-11-06)
-
-### Breaking Changes
-> These changes affect only code written against previous beta versions of `v1.7.0` and `v1.8.0`
-* The function `NewTokenCredential` has been removed from the `fake` package. Use a literal `&fake.TokenCredential{}` instead.
-* The field `TracingNamespace` in `runtime.PipelineOptions` has been replaced by `TracingOptions`.
-
-### Bugs Fixed
-
-* Fixed an issue that could cause some allowed HTTP header values to not show up in logs.
-* Include error text instead of error type in traces when the transport returns an error.
-* Fixed an issue that could cause an HTTP/2 request to hang when the TCP connection becomes unresponsive.
-* Block key and SAS authentication for non TLS protected endpoints.
-* Passing a `nil` credential value will no longer cause a panic. Instead, the authentication is skipped.
-* Calling `Error` on a zero-value `azcore.ResponseError` will no longer panic.
-* Fixed an issue in `fake.PagerResponder[T]` that would cause a trailing error to be omitted when iterating over pages.
-* Context values created by `azcore` will no longer flow across disjoint HTTP requests.
-
-### Other Changes
-
-* Skip generating trace info for no-op tracers.
-* The `clientName` paramater in client constructors has been renamed to `moduleName`.
-
-## 1.9.0-beta.1 (2023-10-05)
-
-### Other Changes
-
-* The beta features for tracing and fakes have been reinstated.
-
-## 1.8.0 (2023-10-05)
-
-### Features Added
-
-* This includes the following features from `v1.8.0-beta.N` releases.
- * Claims and CAE for authentication.
- * New `messaging` package.
- * Various helpers in the `runtime` package.
- * Deprecation of `runtime.With*` funcs and their replacements in the `policy` package.
-* Added types `KeyCredential` and `SASCredential` to the `azcore` package.
- * Includes their respective constructor functions.
-* Added types `KeyCredentialPolicy` and `SASCredentialPolicy` to the `azcore/runtime` package.
- * Includes their respective constructor functions and options types.
-
-### Breaking Changes
-> These changes affect only code written against beta versions of `v1.8.0`
-* The beta features for tracing and fakes have been omitted for this release.
-
-### Bugs Fixed
-
-* Fixed an issue that could cause some ARM RPs to not be automatically registered.
-* Block bearer token authentication for non TLS protected endpoints.
-
-### Other Changes
-
-* Updated dependencies.
-
-## 1.8.0-beta.3 (2023-09-07)
-
-### Features Added
-
-* Added function `FetcherForNextLink` and `FetcherForNextLinkOptions` to the `runtime` package to centralize creation of `Pager[T].Fetcher` from a next link URL.
-
-### Bugs Fixed
-
-* Suppress creating spans for nested SDK API calls. The HTTP span will be a child of the outer API span.
-
-### Other Changes
-
-* The following functions in the `runtime` package are now exposed from the `policy` package, and the `runtime` versions have been deprecated.
- * `WithCaptureResponse`
- * `WithHTTPHeader`
- * `WithRetryOptions`
-
-## 1.7.2 (2023-09-06)
-
-### Bugs Fixed
-
-* Fix default HTTP transport to work in WASM modules.
-
-## 1.8.0-beta.2 (2023-08-14)
-
-### Features Added
-
-* Added function `SanitizePagerPollerPath` to the `server` package to centralize sanitization and formalize the contract.
-* Added `TokenRequestOptions.EnableCAE` to indicate whether to request a CAE token.
-
-### Breaking Changes
-
-> This change affects only code written against beta version `v1.8.0-beta.1`.
-* `messaging.CloudEvent` deserializes JSON objects as `[]byte`, instead of `json.RawMessage`. See the documentation for CloudEvent.Data for more information.
-
-> This change affects only code written against beta versions `v1.7.0-beta.2` and `v1.8.0-beta.1`.
-* Removed parameter from method `Span.End()` and its type `tracing.SpanEndOptions`. This API GA'ed in `v1.2.0` so we cannot change it.
-
-### Bugs Fixed
-
-* Propagate any query parameters when constructing a fake poller and/or injecting next links.
-
-## 1.7.1 (2023-08-14)
-
-## Bugs Fixed
-
-* Enable TLS renegotiation in the default transport policy.
-
-## 1.8.0-beta.1 (2023-07-12)
-
-### Features Added
-
-- `messaging/CloudEvent` allows you to serialize/deserialize CloudEvents, as described in the CloudEvents 1.0 specification: [link](https://github.com/cloudevents/spec)
-
-### Other Changes
-
-* The beta features for CAE, tracing, and fakes have been reinstated.
-
-## 1.7.0 (2023-07-12)
-
-### Features Added
-* Added method `WithClientName()` to type `azcore.Client` to support shallow cloning of a client with a new name used for tracing.
-
-### Breaking Changes
-> These changes affect only code written against beta versions v1.7.0-beta.1 or v1.7.0-beta.2
-* The beta features for CAE, tracing, and fakes have been omitted for this release.
-
-## 1.7.0-beta.2 (2023-06-06)
-
-### Breaking Changes
-> These changes affect only code written against beta version v1.7.0-beta.1
-* Method `SpanFromContext()` on type `tracing.Tracer` had the `bool` return value removed.
- * This includes the field `SpanFromContext` in supporting type `tracing.TracerOptions`.
-* Method `AddError()` has been removed from type `tracing.Span`.
-* Method `Span.End()` now requires an argument of type `*tracing.SpanEndOptions`.
-
-## 1.6.1 (2023-06-06)
-
-### Bugs Fixed
-* Fixed an issue in `azcore.NewClient()` and `arm.NewClient()` that could cause an incorrect module name to be used in telemetry.
-
-### Other Changes
-* This version contains all bug fixes from `v1.7.0-beta.1`
-
-## 1.7.0-beta.1 (2023-05-24)
-
-### Features Added
-* Restored CAE support for ARM clients.
-* Added supporting features to enable distributed tracing.
- * Added func `runtime.StartSpan()` for use by SDKs to start spans.
- * Added method `WithContext()` to `runtime.Request` to support shallow cloning with a new context.
- * Added field `TracingNamespace` to `runtime.PipelineOptions`.
- * Added field `Tracer` to `runtime.NewPollerOptions` and `runtime.NewPollerFromResumeTokenOptions` types.
- * Added field `SpanFromContext` to `tracing.TracerOptions`.
- * Added methods `Enabled()`, `SetAttributes()`, and `SpanFromContext()` to `tracing.Tracer`.
- * Added supporting pipeline policies to include HTTP spans when creating clients.
-* Added package `fake` to support generated fakes packages in SDKs.
- * The package contains public surface area exposed by fake servers and supporting APIs intended only for use by the fake server implementations.
- * Added an internal fake poller implementation.
-
-### Bugs Fixed
-* Retry policy always clones the underlying `*http.Request` before invoking the next policy.
-* Added some non-standard error codes to the list of error codes for unregistered resource providers.
-
-## 1.6.0 (2023-05-04)
-
-### Features Added
-* Added support for ARM cross-tenant authentication. Set the `AuxiliaryTenants` field of `arm.ClientOptions` to enable.
-* Added `TenantID` field to `policy.TokenRequestOptions`.
-
-## 1.5.0 (2023-04-06)
-
-### Features Added
-* Added `ShouldRetry` to `policy.RetryOptions` for finer-grained control over when to retry.
-
-### Breaking Changes
-> These changes affect only code written against a beta version such as v1.5.0-beta.1
-> These features will return in v1.6.0-beta.1.
-* Removed `TokenRequestOptions.Claims` and `.TenantID`
-* Removed ARM client support for CAE and cross-tenant auth.
-
-### Bugs Fixed
-* Added non-conformant LRO terminal states `Cancelled` and `Completed`.
-
-### Other Changes
-* Updated to latest `internal` module.
-
-## 1.5.0-beta.1 (2023-03-02)
-
-### Features Added
-* This release includes the features added in v1.4.0-beta.1
-
-## 1.4.0 (2023-03-02)
-> This release doesn't include features added in v1.4.0-beta.1. They will return in v1.5.0-beta.1.
-
-### Features Added
-* Add `Clone()` method for `arm/policy.ClientOptions`.
-
-### Bugs Fixed
-* ARM's RP registration policy will no longer swallow unrecognized errors.
-* Fixed an issue in `runtime.NewPollerFromResumeToken()` when resuming a `Poller` with a custom `PollingHandler`.
-* Fixed wrong policy copy in `arm/runtime.NewPipeline()`.
-
-## 1.4.0-beta.1 (2023-02-02)
-
-### Features Added
-* Added support for ARM cross-tenant authentication. Set the `AuxiliaryTenants` field of `arm.ClientOptions` to enable.
-* Added `Claims` and `TenantID` fields to `policy.TokenRequestOptions`.
-* ARM bearer token policy handles CAE challenges.
-
-## 1.3.1 (2023-02-02)
-
-### Other Changes
-* Update dependencies to latest versions.
-
-## 1.3.0 (2023-01-06)
-
-### Features Added
-* Added `BearerTokenOptions.AuthorizationHandler` to enable extending `runtime.BearerTokenPolicy`
- with custom authorization logic
-* Added `Client` types and matching constructors to the `azcore` and `arm` packages. These represent a basic client for HTTP and ARM respectively.
-
-### Other Changes
-* Updated `internal` module to latest version.
-* `policy/Request.SetBody()` allows replacing a request's body with an empty one
-
-## 1.2.0 (2022-11-04)
-
-### Features Added
-* Added `ClientOptions.APIVersion` field, which overrides the default version a client
- requests of the service, if the client supports this (all ARM clients do).
-* Added package `tracing` that contains the building blocks for distributed tracing.
-* Added field `TracingProvider` to type `policy.ClientOptions` that will be used to set the per-client tracing implementation.
-
-### Bugs Fixed
-* Fixed an issue in `runtime.SetMultipartFormData` to properly handle slices of `io.ReadSeekCloser`.
-* Fixed the MaxRetryDelay default to be 60s.
-* Failure to poll the state of an LRO will now return an `*azcore.ResponseError` for poller types that require this behavior.
-* Fixed a bug in `runtime.NewPipeline` that would cause pipeline-specified allowed headers and query parameters to be lost.
-
-### Other Changes
-* Retain contents of read-only fields when sending requests.
-
-## 1.1.4 (2022-10-06)
-
-### Bugs Fixed
-* Don't retry a request if the `Retry-After` delay is greater than the configured `RetryOptions.MaxRetryDelay`.
-* `runtime.JoinPaths`: do not unconditionally add a forward slash before the query string
-
-### Other Changes
-* Removed logging URL from retry policy as it's redundant.
-* Retry policy logs when it exits due to a non-retriable status code.
-
-## 1.1.3 (2022-09-01)
-
-### Bugs Fixed
-* Adjusted the initial retry delay to 800ms per the Azure SDK guidelines.
-
-## 1.1.2 (2022-08-09)
-
-### Other Changes
-* Fixed various doc bugs.
-
-## 1.1.1 (2022-06-30)
-
-### Bugs Fixed
-* Avoid polling when a RELO LRO synchronously terminates.
-
-## 1.1.0 (2022-06-03)
-
-### Other Changes
-* The one-second floor for `Frequency` when calling `PollUntilDone()` has been removed when running tests.
-
-## 1.0.0 (2022-05-12)
-
-### Features Added
-* Added interface `runtime.PollingHandler` to support custom poller implementations.
- * Added field `PollingHandler` of this type to `runtime.NewPollerOptions[T]` and `runtime.NewPollerFromResumeTokenOptions[T]`.
-
-### Breaking Changes
-* Renamed `cloud.Configuration.LoginEndpoint` to `.ActiveDirectoryAuthorityHost`
-* Renamed `cloud.AzurePublicCloud` to `cloud.AzurePublic`
-* Removed `AuxiliaryTenants` field from `arm/ClientOptions` and `arm/policy/BearerTokenOptions`
-* Removed `TokenRequestOptions.TenantID`
-* `Poller[T].PollUntilDone()` now takes an `options *PollUntilDoneOptions` param instead of `freq time.Duration`
-* Removed `arm/runtime.Poller[T]`, `arm/runtime.NewPoller[T]()` and `arm/runtime.NewPollerFromResumeToken[T]()`
-* Removed `arm/runtime.FinalStateVia` and related `const` values
-* Renamed `runtime.PageProcessor` to `runtime.PagingHandler`
-* The `arm/runtime.ProviderRepsonse` and `arm/runtime.Provider` types are no longer exported.
-* Renamed `NewRequestIdPolicy()` to `NewRequestIDPolicy()`
-* `TokenCredential.GetToken` now returns `AccessToken` by value.
-
-### Bugs Fixed
-* When per-try timeouts are enabled, only cancel the context after the body has been read and closed.
-* The `Operation-Location` poller now properly handles `final-state-via` values.
-* Improvements in `runtime.Poller[T]`
- * `Poll()` shouldn't cache errors, allowing for additional retries when in a non-terminal state.
- * `Result()` will cache the terminal result or error but not transient errors, allowing for additional retries.
-
-### Other Changes
-* Updated to latest `internal` module and absorbed breaking changes.
- * Use `temporal.Resource` and deleted copy.
-* The internal poller implementation has been refactored.
- * The implementation in `internal/pollers/poller.go` has been merged into `runtime/poller.go` with some slight modification.
- * The internal poller types had their methods updated to conform to the `runtime.PollingHandler` interface.
- * The creation of resume tokens has been refactored so that implementers of `runtime.PollingHandler` don't need to know about it.
-* `NewPipeline()` places policies from `ClientOptions` after policies from `PipelineOptions`
-* Default User-Agent headers no longer include `azcore` version information
-
-## 0.23.1 (2022-04-14)
-
-### Bugs Fixed
-* Include XML header when marshalling XML content.
-* Handle XML namespaces when searching for error code.
-* Handle `odata.error` when searching for error code.
-
-## 0.23.0 (2022-04-04)
-
-### Features Added
-* Added `runtime.Pager[T any]` and `runtime.Poller[T any]` supporting types for central, generic, implementations.
-* Added `cloud` package with a new API for cloud configuration
-* Added `FinalStateVia` field to `runtime.NewPollerOptions[T any]` type.
-
-### Breaking Changes
-* Removed the `Poller` type-alias to the internal poller implementation.
-* Added `Ptr[T any]` and `SliceOfPtrs[T any]` in the `to` package and removed all non-generic implementations.
-* `NullValue` and `IsNullValue` now take a generic type parameter instead of an interface func parameter.
-* Replaced `arm.Endpoint` with `cloud` API
- * Removed the `endpoint` parameter from `NewRPRegistrationPolicy()`
- * `arm/runtime.NewPipeline()` and `.NewRPRegistrationPolicy()` now return an `error`
-* Refactored `NewPoller` and `NewPollerFromResumeToken` funcs in `arm/runtime` and `runtime` packages.
- * Removed the `pollerID` parameter as it's no longer required.
- * Created optional parameter structs and moved optional parameters into them.
-* Changed `FinalStateVia` field to a `const` type.
-
-### Other Changes
-* Converted expiring resource and dependent types to use generics.
-
-## 0.22.0 (2022-03-03)
-
-### Features Added
-* Added header `WWW-Authenticate` to the default allow-list of headers for logging.
-* Added a pipeline policy that enables the retrieval of HTTP responses from API calls.
- * Added `runtime.WithCaptureResponse` to enable the policy at the API level (off by default).
-
-### Breaking Changes
-* Moved `WithHTTPHeader` and `WithRetryOptions` from the `policy` package to the `runtime` package.
-
-## 0.21.1 (2022-02-04)
-
-### Bugs Fixed
-* Restore response body after reading in `Poller.FinalResponse()`. (#16911)
-* Fixed bug in `NullValue` that could lead to incorrect comparisons for empty maps/slices (#16969)
-
-### Other Changes
-* `BearerTokenPolicy` is more resilient to transient authentication failures. (#16789)
-
-## 0.21.0 (2022-01-11)
-
-### Features Added
-* Added `AllowedHeaders` and `AllowedQueryParams` to `policy.LogOptions` to control which headers and query parameters are written to the logger.
-* Added `azcore.ResponseError` type which is returned from APIs when a non-success HTTP status code is received.
-
-### Breaking Changes
-* Moved `[]policy.Policy` parameters of `arm/runtime.NewPipeline` and `runtime.NewPipeline` into a new struct, `runtime.PipelineOptions`
-* Renamed `arm/ClientOptions.Host` to `.Endpoint`
-* Moved `Request.SkipBodyDownload` method to function `runtime.SkipBodyDownload`
-* Removed `azcore.HTTPResponse` interface type
-* `arm.NewPoller()` and `runtime.NewPoller()` no longer require an `eu` parameter
-* `runtime.NewResponseError()` no longer requires an `error` parameter
-
-## 0.20.0 (2021-10-22)
-
-### Breaking Changes
-* Removed `arm.Connection`
-* Removed `azcore.Credential` and `.NewAnonymousCredential()`
- * `NewRPRegistrationPolicy` now requires an `azcore.TokenCredential`
-* `runtime.NewPipeline` has a new signature that simplifies implementing custom authentication
-* `arm/runtime.RegistrationOptions` embeds `policy.ClientOptions`
-* Contents in the `log` package have been slightly renamed.
-* Removed `AuthenticationOptions` in favor of `policy.BearerTokenOptions`
-* Changed parameters for `NewBearerTokenPolicy()`
-* Moved policy config options out of `arm/runtime` and into `arm/policy`
-
-### Features Added
-* Updating Documentation
-* Added string typdef `arm.Endpoint` to provide a hint toward expected ARM client endpoints
-* `azcore.ClientOptions` contains common pipeline configuration settings
-* Added support for multi-tenant authorization in `arm/runtime`
-* Require one second minimum when calling `PollUntilDone()`
-
-### Bug Fixes
-* Fixed a potential panic when creating the default Transporter.
-* Close LRO initial response body when creating a poller.
-* Fixed a panic when recursively cloning structs that contain time.Time.
-
-## 0.19.0 (2021-08-25)
-
-### Breaking Changes
-* Split content out of `azcore` into various packages. The intent is to separate content based on its usage (common, uncommon, SDK authors).
- * `azcore` has all core functionality.
- * `log` contains facilities for configuring in-box logging.
- * `policy` is used for configuring pipeline options and creating custom pipeline policies.
- * `runtime` contains various helpers used by SDK authors and generated content.
- * `streaming` has helpers for streaming IO operations.
-* `NewTelemetryPolicy()` now requires module and version parameters and the `Value` option has been removed.
- * As a result, the `Request.Telemetry()` method has been removed.
-* The telemetry policy now includes the SDK prefix `azsdk-go-` so callers no longer need to provide it.
-* The `*http.Request` in `runtime.Request` is no longer anonymously embedded. Use the `Raw()` method to access it.
-* The `UserAgent` and `Version` constants have been made internal, `Module` and `Version` respectively.
-
-### Bug Fixes
-* Fixed an issue in the retry policy where the request body could be overwritten after a rewind.
-
-### Other Changes
-* Moved modules `armcore` and `to` content into `arm` and `to` packages respectively.
- * The `Pipeline()` method on `armcore.Connection` has been replaced by `NewPipeline()` in `arm.Connection`. It takes module and version parameters used by the telemetry policy.
-* Poller logic has been consolidated across ARM and core implementations.
- * This required some changes to the internal interfaces for core pollers.
-* The core poller types have been improved, including more logging and test coverage.
-
-## 0.18.1 (2021-08-20)
-
-### Features Added
-* Adds an `ETag` type for comparing etags and handling etags on requests
-* Simplifies the `requestBodyProgess` and `responseBodyProgress` into a single `progress` object
-
-### Bugs Fixed
-* `JoinPaths` will preserve query parameters encoded in the `root` url.
-
-### Other Changes
-* Bumps dependency on `internal` module to the latest version (v0.7.0)
-
-## 0.18.0 (2021-07-29)
-### Features Added
-* Replaces methods from Logger type with two package methods for interacting with the logging functionality.
-* `azcore.SetClassifications` replaces `azcore.Logger().SetClassifications`
-* `azcore.SetListener` replaces `azcore.Logger().SetListener`
-
-### Breaking Changes
-* Removes `Logger` type from `azcore`
-
-
-## 0.17.0 (2021-07-27)
-### Features Added
-* Adding TenantID to TokenRequestOptions (https://github.com/Azure/azure-sdk-for-go/pull/14879)
-* Adding AuxiliaryTenants to AuthenticationOptions (https://github.com/Azure/azure-sdk-for-go/pull/15123)
-
-### Breaking Changes
-* Rename `AnonymousCredential` to `NewAnonymousCredential` (https://github.com/Azure/azure-sdk-for-go/pull/15104)
-* rename `AuthenticationPolicyOptions` to `AuthenticationOptions` (https://github.com/Azure/azure-sdk-for-go/pull/15103)
-* Make Header constants private (https://github.com/Azure/azure-sdk-for-go/pull/15038)
-
-
-## 0.16.2 (2021-05-26)
-### Features Added
-* Improved support for byte arrays [#14715](https://github.com/Azure/azure-sdk-for-go/pull/14715)
-
-
-## 0.16.1 (2021-05-19)
-### Features Added
-* Add license.txt to azcore module [#14682](https://github.com/Azure/azure-sdk-for-go/pull/14682)
-
-
-## 0.16.0 (2021-05-07)
-### Features Added
-* Remove extra `*` in UnmarshalAsByteArray() [#14642](https://github.com/Azure/azure-sdk-for-go/pull/14642)
-
-
-## 0.15.1 (2021-05-06)
-### Features Added
-* Cache the original request body on Request [#14634](https://github.com/Azure/azure-sdk-for-go/pull/14634)
-
-
-## 0.15.0 (2021-05-05)
-### Features Added
-* Add support for null map and slice
-* Export `Response.Payload` method
-
-### Breaking Changes
-* remove `Response.UnmarshalError` as it's no longer required
-
-
-## 0.14.5 (2021-04-23)
-### Features Added
-* Add `UnmarshalError()` on `azcore.Response`
-
-
-## 0.14.4 (2021-04-22)
-### Features Added
-* Support for basic LRO polling
-* Added type `LROPoller` and supporting types for basic polling on long running operations.
-* rename poller param and added doc comment
-
-### Bugs Fixed
-* Fixed content type detection bug in logging.
-
-
-## 0.14.3 (2021-03-29)
-### Features Added
-* Add support for multi-part form data
-* Added method `WriteMultipartFormData()` to Request.
-
-
-## 0.14.2 (2021-03-17)
-### Features Added
-* Add support for encoding JSON null values
-* Adds `NullValue()` and `IsNullValue()` functions for setting and detecting sentinel values used for encoding a JSON null.
-* Documentation fixes
-
-### Bugs Fixed
-* Fixed improper error wrapping
-
-
-## 0.14.1 (2021-02-08)
-### Features Added
-* Add `Pager` and `Poller` interfaces to azcore
-
-
-## 0.14.0 (2021-01-12)
-### Features Added
-* Accept zero-value options for default values
-* Specify zero-value options structs to accept default values.
-* Remove `DefaultXxxOptions()` methods.
-* Do not silently change TryTimeout on negative values
-* make per-try timeout opt-in
-
-
-## 0.13.4 (2020-11-20)
-### Features Added
-* Include telemetry string in User Agent
-
-
-## 0.13.3 (2020-11-20)
-### Features Added
-* Updating response body handling on `azcore.Response`
-
-
-## 0.13.2 (2020-11-13)
-### Features Added
-* Remove implementation of stateless policies as first-class functions.
-
-
-## 0.13.1 (2020-11-05)
-### Features Added
-* Add `Telemetry()` method to `azcore.Request()`
-
-
-## 0.13.0 (2020-10-14)
-### Features Added
-* Rename `log` to `logger` to avoid name collision with the log package.
-* Documentation improvements
-* Simplified `DefaultHTTPClientTransport()` implementation
-
-
-## 0.12.1 (2020-10-13)
-### Features Added
-* Update `internal` module dependence to `v0.5.0`
-
-
-## 0.12.0 (2020-10-08)
-### Features Added
-* Removed storage specific content
-* Removed internal content to prevent API clutter
-* Refactored various policy options to conform with our options pattern
-
-
-## 0.11.0 (2020-09-22)
-### Features Added
-
-* Removed `LogError` and `LogSlowResponse`.
-* Renamed `options` in `RequestLogOptions`.
-* Updated `NewRequestLogPolicy()` to follow standard pattern for options.
-* Refactored `requestLogPolicy.Do()` per above changes.
-* Cleaned up/added logging in retry policy.
-* Export `NewResponseError()`
-* Fix `RequestLogOptions` comment
-
-
-## 0.10.1 (2020-09-17)
-### Features Added
-* Add default console logger
-* Default console logger writes to stderr. To enable it, set env var `AZURE_SDK_GO_LOGGING` to the value 'all'.
-* Added `Logger.Writef()` to reduce the need for `ShouldLog()` checks.
-* Add `LogLongRunningOperation`
-
-
-## 0.10.0 (2020-09-10)
-### Features Added
-* The `request` and `transport` interfaces have been refactored to align with the patterns in the standard library.
-* `NewRequest()` now uses `http.NewRequestWithContext()` and performs additional validation, it also requires a context parameter.
-* The `Policy` and `Transport` interfaces have had their context parameter removed as the context is associated with the underlying `http.Request`.
-* `Pipeline.Do()` will validate the HTTP request before sending it through the pipeline, avoiding retries on a malformed request.
-* The `Retrier` interface has been replaced with the `NonRetriableError` interface, and the retry policy updated to test for this.
-* `Request.SetBody()` now requires a content type parameter for setting the request's MIME type.
-* moved path concatenation into `JoinPaths()` func
-
-
-## 0.9.6 (2020-08-18)
-### Features Added
-* Improvements to body download policy
-* Always download the response body for error responses, i.e. HTTP status codes >= 400.
-* Simplify variable declarations
-
-
-## 0.9.5 (2020-08-11)
-### Features Added
-* Set the Content-Length header in `Request.SetBody`
-
-
-## 0.9.4 (2020-08-03)
-### Features Added
-* Fix cancellation of per try timeout
-* Per try timeout is used to ensure that an HTTP operation doesn't take too long, e.g. that a GET on some URL doesn't take an inordinant amount of time.
-* Once the HTTP request returns, the per try timeout should be cancelled, not when the response has been read to completion.
-* Do not drain response body if there are no more retries
-* Do not retry non-idempotent operations when body download fails
-
-
-## 0.9.3 (2020-07-28)
-### Features Added
-* Add support for custom HTTP request headers
-* Inserts an internal policy into the pipeline that can extract HTTP header values from the caller's context, adding them to the request.
-* Use `azcore.WithHTTPHeader` to add HTTP headers to a context.
-* Remove method specific to Go 1.14
-
-
-## 0.9.2 (2020-07-28)
-### Features Added
-* Omit read-only content from request payloads
-* If any field in a payload's object graph contains `azure:"ro"`, make a clone of the object graph, omitting all fields with this annotation.
-* Verify no fields were dropped
-* Handle embedded struct types
-* Added test for cloning by value
-* Add messages to failures
-
-
-## 0.9.1 (2020-07-22)
-### Features Added
-* Updated dependency on internal module to fix race condition.
-
-
-## 0.9.0 (2020-07-09)
-### Features Added
-* Add `HTTPResponse` interface to be used by callers to access the raw HTTP response from an error in the event of an API call failure.
-* Updated `sdk/internal` dependency to latest version.
-* Rename package alias
-
-
-## 0.8.2 (2020-06-29)
-### Features Added
-* Added missing documentation comments
-
-### Bugs Fixed
-* Fixed a bug in body download policy.
-
-
-## 0.8.1 (2020-06-26)
-### Features Added
-* Miscellaneous clean-up reported by linters
-
-
-## 0.8.0 (2020-06-01)
-### Features Added
-* Differentiate between standard and URL encoding.
-
-
-## 0.7.1 (2020-05-27)
-### Features Added
-* Add support for for base64 encoding and decoding of payloads.
-
-
-## 0.7.0 (2020-05-12)
-### Features Added
-* Change `RetryAfter()` to a function.
-
-
-## 0.6.0 (2020-04-29)
-### Features Added
-* Updating `RetryAfter` to only return the detaion in the RetryAfter header
-
-
-## 0.5.0 (2020-03-23)
-### Features Added
-* Export `TransportFunc`
-
-### Breaking Changes
-* Removed `IterationDone`
-
-
-## 0.4.1 (2020-02-25)
-### Features Added
-* Ensure per-try timeout is properly cancelled
-* Explicitly call cancel the per-try timeout when the response body has been read/closed by the body download policy.
-* When the response body is returned to the caller for reading/closing, wrap it in a `responseBodyReader` that will cancel the timeout when the body is closed.
-* `Logger.Should()` will return false if no listener is set.
-
-
-## 0.4.0 (2020-02-18)
-### Features Added
-* Enable custom `RetryOptions` to be specified per API call
-* Added `WithRetryOptions()` that adds a custom `RetryOptions` to the provided context, allowing custom settings per API call.
-* Remove 429 from the list of default HTTP status codes for retry.
-* Change StatusCodesForRetry to a slice so consumers can append to it.
-* Added support for retry-after in HTTP-date format.
-* Cleaned up some comments specific to storage.
-* Remove `Request.SetQueryParam()`
-* Renamed `MaxTries` to `MaxRetries`
-
-## 0.3.0 (2020-01-16)
-### Features Added
-* Added `DefaultRetryOptions` to create initialized default options.
-
-### Breaking Changes
-* Removed `Response.CheckStatusCode()`
-
-
-## 0.2.0 (2020-01-15)
-### Features Added
-* Add support for marshalling and unmarshalling JSON
-* Removed `Response.Payload` field
-* Exit early when unmarsahlling if there is no payload
-
-
-## 0.1.0 (2020-01-10)
-### Features Added
-* Initial release
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt
deleted file mode 100644
index 48ea6616..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) Microsoft Corporation.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md
deleted file mode 100644
index 35a74e18..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md
+++ /dev/null
@@ -1,39 +0,0 @@
-# Azure Core Client Module for Go
-
-[](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore)
-[](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=1843&branchName=main)
-[](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main)
-
-The `azcore` module provides a set of common interfaces and types for Go SDK client modules.
-These modules follow the [Azure SDK Design Guidelines for Go](https://azure.github.io/azure-sdk/golang_introduction.html).
-
-## Getting started
-
-This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management.
-
-Typically, you will not need to explicitly install `azcore` as it will be installed as a client module dependency.
-To add the latest version to your `go.mod` file, execute the following command.
-
-```bash
-go get github.com/Azure/azure-sdk-for-go/sdk/azcore
-```
-
-General documentation and examples can be found on [pkg.go.dev](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore).
-
-## Contributing
-This project welcomes contributions and suggestions. Most contributions require
-you to agree to a Contributor License Agreement (CLA) declaring that you have
-the right to, and actually do, grant us the rights to use your contribution.
-For details, visit [https://cla.microsoft.com](https://cla.microsoft.com).
-
-When you submit a pull request, a CLA-bot will automatically determine whether
-you need to provide a CLA and decorate the PR appropriately (e.g., label,
-comment). Simply follow the instructions provided by the bot. You will only
-need to do this once across all repos using our CLA.
-
-This project has adopted the
-[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
-For more information, see the
-[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
-or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any
-additional questions or comments.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go
deleted file mode 100644
index 187fe82b..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go
+++ /dev/null
@@ -1,224 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package resource
-
-import (
- "fmt"
- "strings"
-)
-
-const (
- providersKey = "providers"
- subscriptionsKey = "subscriptions"
- resourceGroupsLowerKey = "resourcegroups"
- locationsKey = "locations"
- builtInResourceNamespace = "Microsoft.Resources"
-)
-
-// RootResourceID defines the tenant as the root parent of all other ResourceID.
-var RootResourceID = &ResourceID{
- Parent: nil,
- ResourceType: TenantResourceType,
- Name: "",
-}
-
-// ResourceID represents a resource ID such as `/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRg`.
-// Don't create this type directly, use ParseResourceID instead.
-type ResourceID struct {
- // Parent is the parent ResourceID of this instance.
- // Can be nil if there is no parent.
- Parent *ResourceID
-
- // SubscriptionID is the subscription ID in this resource ID.
- // The value can be empty if the resource ID does not contain a subscription ID.
- SubscriptionID string
-
- // ResourceGroupName is the resource group name in this resource ID.
- // The value can be empty if the resource ID does not contain a resource group name.
- ResourceGroupName string
-
- // Provider represents the provider name in this resource ID.
- // This is only valid when the resource ID represents a resource provider.
- // Example: `/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Insights`
- Provider string
-
- // Location is the location in this resource ID.
- // The value can be empty if the resource ID does not contain a location name.
- Location string
-
- // ResourceType represents the type of this resource ID.
- ResourceType ResourceType
-
- // Name is the resource name of this resource ID.
- Name string
-
- isChild bool
- stringValue string
-}
-
-// ParseResourceID parses a string to an instance of ResourceID
-func ParseResourceID(id string) (*ResourceID, error) {
- if len(id) == 0 {
- return nil, fmt.Errorf("invalid resource ID: id cannot be empty")
- }
-
- if !strings.HasPrefix(id, "/") {
- return nil, fmt.Errorf("invalid resource ID: resource id '%s' must start with '/'", id)
- }
-
- parts := splitStringAndOmitEmpty(id, "/")
-
- if len(parts) < 2 {
- return nil, fmt.Errorf("invalid resource ID: %s", id)
- }
-
- if !strings.EqualFold(parts[0], subscriptionsKey) && !strings.EqualFold(parts[0], providersKey) {
- return nil, fmt.Errorf("invalid resource ID: %s", id)
- }
-
- return appendNext(RootResourceID, parts, id)
-}
-
-// String returns the string of the ResourceID
-func (id *ResourceID) String() string {
- if len(id.stringValue) > 0 {
- return id.stringValue
- }
-
- if id.Parent == nil {
- return ""
- }
-
- builder := strings.Builder{}
- builder.WriteString(id.Parent.String())
-
- if id.isChild {
- builder.WriteString(fmt.Sprintf("/%s", id.ResourceType.lastType()))
- if len(id.Name) > 0 {
- builder.WriteString(fmt.Sprintf("/%s", id.Name))
- }
- } else {
- builder.WriteString(fmt.Sprintf("/providers/%s/%s/%s", id.ResourceType.Namespace, id.ResourceType.Type, id.Name))
- }
-
- id.stringValue = builder.String()
-
- return id.stringValue
-}
-
-func newResourceID(parent *ResourceID, resourceTypeName string, resourceName string) *ResourceID {
- id := &ResourceID{}
- id.init(parent, chooseResourceType(resourceTypeName, parent), resourceName, true)
- return id
-}
-
-func newResourceIDWithResourceType(parent *ResourceID, resourceType ResourceType, resourceName string) *ResourceID {
- id := &ResourceID{}
- id.init(parent, resourceType, resourceName, true)
- return id
-}
-
-func newResourceIDWithProvider(parent *ResourceID, providerNamespace, resourceTypeName, resourceName string) *ResourceID {
- id := &ResourceID{}
- id.init(parent, NewResourceType(providerNamespace, resourceTypeName), resourceName, false)
- return id
-}
-
-func chooseResourceType(resourceTypeName string, parent *ResourceID) ResourceType {
- if strings.EqualFold(resourceTypeName, resourceGroupsLowerKey) {
- return ResourceGroupResourceType
- } else if strings.EqualFold(resourceTypeName, subscriptionsKey) && parent != nil && parent.ResourceType.String() == TenantResourceType.String() {
- return SubscriptionResourceType
- }
-
- return parent.ResourceType.AppendChild(resourceTypeName)
-}
-
-func (id *ResourceID) init(parent *ResourceID, resourceType ResourceType, name string, isChild bool) {
- if parent != nil {
- id.Provider = parent.Provider
- id.SubscriptionID = parent.SubscriptionID
- id.ResourceGroupName = parent.ResourceGroupName
- id.Location = parent.Location
- }
-
- if resourceType.String() == SubscriptionResourceType.String() {
- id.SubscriptionID = name
- }
-
- if resourceType.lastType() == locationsKey {
- id.Location = name
- }
-
- if resourceType.String() == ResourceGroupResourceType.String() {
- id.ResourceGroupName = name
- }
-
- if resourceType.String() == ProviderResourceType.String() {
- id.Provider = name
- }
-
- if parent == nil {
- id.Parent = RootResourceID
- } else {
- id.Parent = parent
- }
- id.isChild = isChild
- id.ResourceType = resourceType
- id.Name = name
-}
-
-func appendNext(parent *ResourceID, parts []string, id string) (*ResourceID, error) {
- if len(parts) == 0 {
- return parent, nil
- }
-
- if len(parts) == 1 {
- // subscriptions and resourceGroups are not valid ids without their names
- if strings.EqualFold(parts[0], subscriptionsKey) || strings.EqualFold(parts[0], resourceGroupsLowerKey) {
- return nil, fmt.Errorf("invalid resource ID: %s", id)
- }
-
- // resourceGroup must contain either child or provider resource type
- if parent.ResourceType.String() == ResourceGroupResourceType.String() {
- return nil, fmt.Errorf("invalid resource ID: %s", id)
- }
-
- return newResourceID(parent, parts[0], ""), nil
- }
-
- if strings.EqualFold(parts[0], providersKey) && (len(parts) == 2 || strings.EqualFold(parts[2], providersKey)) {
- //provider resource can only be on a tenant or a subscription parent
- if parent.ResourceType.String() != SubscriptionResourceType.String() && parent.ResourceType.String() != TenantResourceType.String() {
- return nil, fmt.Errorf("invalid resource ID: %s", id)
- }
-
- return appendNext(newResourceIDWithResourceType(parent, ProviderResourceType, parts[1]), parts[2:], id)
- }
-
- if len(parts) > 3 && strings.EqualFold(parts[0], providersKey) {
- return appendNext(newResourceIDWithProvider(parent, parts[1], parts[2], parts[3]), parts[4:], id)
- }
-
- if len(parts) > 1 && !strings.EqualFold(parts[0], providersKey) {
- return appendNext(newResourceID(parent, parts[0], parts[1]), parts[2:], id)
- }
-
- return nil, fmt.Errorf("invalid resource ID: %s", id)
-}
-
-func splitStringAndOmitEmpty(v, sep string) []string {
- r := make([]string, 0)
- for _, s := range strings.Split(v, sep) {
- if len(s) == 0 {
- continue
- }
- r = append(r, s)
- }
-
- return r
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_type.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_type.go
deleted file mode 100644
index ca03ac97..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_type.go
+++ /dev/null
@@ -1,114 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package resource
-
-import (
- "fmt"
- "strings"
-)
-
-// SubscriptionResourceType is the ResourceType of a subscription
-var SubscriptionResourceType = NewResourceType(builtInResourceNamespace, "subscriptions")
-
-// ResourceGroupResourceType is the ResourceType of a resource group
-var ResourceGroupResourceType = NewResourceType(builtInResourceNamespace, "resourceGroups")
-
-// TenantResourceType is the ResourceType of a tenant
-var TenantResourceType = NewResourceType(builtInResourceNamespace, "tenants")
-
-// ProviderResourceType is the ResourceType of a provider
-var ProviderResourceType = NewResourceType(builtInResourceNamespace, "providers")
-
-// ResourceType represents an Azure resource type, e.g. "Microsoft.Network/virtualNetworks/subnets".
-// Don't create this type directly, use ParseResourceType or NewResourceType instead.
-type ResourceType struct {
- // Namespace is the namespace of the resource type.
- // e.g. "Microsoft.Network" in resource type "Microsoft.Network/virtualNetworks/subnets"
- Namespace string
-
- // Type is the full type name of the resource type.
- // e.g. "virtualNetworks/subnets" in resource type "Microsoft.Network/virtualNetworks/subnets"
- Type string
-
- // Types is the slice of all the sub-types of this resource type.
- // e.g. ["virtualNetworks", "subnets"] in resource type "Microsoft.Network/virtualNetworks/subnets"
- Types []string
-
- stringValue string
-}
-
-// String returns the string of the ResourceType
-func (t ResourceType) String() string {
- return t.stringValue
-}
-
-// IsParentOf returns true when the receiver is the parent resource type of the child.
-func (t ResourceType) IsParentOf(child ResourceType) bool {
- if !strings.EqualFold(t.Namespace, child.Namespace) {
- return false
- }
- if len(t.Types) >= len(child.Types) {
- return false
- }
- for i := range t.Types {
- if !strings.EqualFold(t.Types[i], child.Types[i]) {
- return false
- }
- }
-
- return true
-}
-
-// AppendChild creates an instance of ResourceType using the receiver as the parent with childType appended to it.
-func (t ResourceType) AppendChild(childType string) ResourceType {
- return NewResourceType(t.Namespace, fmt.Sprintf("%s/%s", t.Type, childType))
-}
-
-// NewResourceType creates an instance of ResourceType using a provider namespace
-// such as "Microsoft.Network" and type such as "virtualNetworks/subnets".
-func NewResourceType(providerNamespace, typeName string) ResourceType {
- return ResourceType{
- Namespace: providerNamespace,
- Type: typeName,
- Types: splitStringAndOmitEmpty(typeName, "/"),
- stringValue: fmt.Sprintf("%s/%s", providerNamespace, typeName),
- }
-}
-
-// ParseResourceType parses the ResourceType from a resource type string (e.g. Microsoft.Network/virtualNetworks/subsets)
-// or a resource identifier string.
-// e.g. /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRg/providers/Microsoft.Network/virtualNetworks/vnet/subnets/mySubnet)
-func ParseResourceType(resourceIDOrType string) (ResourceType, error) {
- // split the path into segments
- parts := splitStringAndOmitEmpty(resourceIDOrType, "/")
-
- // There must be at least a namespace and type name
- if len(parts) < 1 {
- return ResourceType{}, fmt.Errorf("invalid resource ID or type: %s", resourceIDOrType)
- }
-
- // if the type is just subscriptions, it is a built-in type in the Microsoft.Resources namespace
- if len(parts) == 1 {
- // Simple resource type
- return NewResourceType(builtInResourceNamespace, parts[0]), nil
- } else if strings.Contains(parts[0], ".") {
- // Handle resource types (Microsoft.Compute/virtualMachines, Microsoft.Network/virtualNetworks/subnets)
- // it is a full type name
- return NewResourceType(parts[0], strings.Join(parts[1:], "/")), nil
- } else {
- // Check if ResourceID
- id, err := ParseResourceID(resourceIDOrType)
- if err != nil {
- return ResourceType{}, err
- }
- return NewResourceType(id.ResourceType.Namespace, id.ResourceType.Type), nil
- }
-}
-
-func (t ResourceType) lastType() string {
- return t.Types[len(t.Types)-1]
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go
deleted file mode 100644
index f18caf84..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go
+++ /dev/null
@@ -1,108 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package policy
-
-import (
- "time"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
-)
-
-// BearerTokenOptions configures the bearer token policy's behavior.
-type BearerTokenOptions struct {
- // AuxiliaryTenants are additional tenant IDs for authenticating cross-tenant requests.
- // The policy will add a token from each of these tenants to every request. The
- // authenticating user or service principal must be a guest in these tenants, and the
- // policy's credential must support multitenant authentication.
- AuxiliaryTenants []string
-
- // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP.
- // By default, authenticated requests to an HTTP endpoint are rejected by the client.
- // WARNING: setting this to true will allow sending the authentication key in clear text. Use with caution.
- InsecureAllowCredentialWithHTTP bool
-
- // Scopes contains the list of permission scopes required for the token.
- Scopes []string
-}
-
-// RegistrationOptions configures the registration policy's behavior.
-// All zero-value fields will be initialized with their default values.
-type RegistrationOptions struct {
- policy.ClientOptions
-
- // MaxAttempts is the total number of times to attempt automatic registration
- // in the event that an attempt fails.
- // The default value is 3.
- // Set to a value less than zero to disable the policy.
- MaxAttempts int
-
- // PollingDelay is the amount of time to sleep between polling intervals.
- // The default value is 15 seconds.
- // A value less than zero means no delay between polling intervals (not recommended).
- PollingDelay time.Duration
-
- // PollingDuration is the amount of time to wait before abandoning polling.
- // The default valule is 5 minutes.
- // NOTE: Setting this to a small value might cause the policy to prematurely fail.
- PollingDuration time.Duration
-
- // StatusCodes contains the slice of custom HTTP status codes to use instead
- // of the default http.StatusConflict. This should only be set if a service
- // returns a non-standard HTTP status code when unregistered.
- StatusCodes []int
-}
-
-// ClientOptions contains configuration settings for a client's pipeline.
-type ClientOptions struct {
- policy.ClientOptions
-
- // AuxiliaryTenants are additional tenant IDs for authenticating cross-tenant requests.
- // The client will add a token from each of these tenants to every request. The
- // authenticating user or service principal must be a guest in these tenants, and the
- // client's credential must support multitenant authentication.
- AuxiliaryTenants []string
-
- // DisableRPRegistration disables the auto-RP registration policy. Defaults to false.
- DisableRPRegistration bool
-}
-
-// Clone return a deep copy of the current options.
-func (o *ClientOptions) Clone() *ClientOptions {
- if o == nil {
- return nil
- }
- copiedOptions := *o
- copiedOptions.Cloud.Services = copyMap(copiedOptions.Cloud.Services)
- copiedOptions.Logging.AllowedHeaders = copyArray(copiedOptions.Logging.AllowedHeaders)
- copiedOptions.Logging.AllowedQueryParams = copyArray(copiedOptions.Logging.AllowedQueryParams)
- copiedOptions.Retry.StatusCodes = copyArray(copiedOptions.Retry.StatusCodes)
- copiedOptions.PerRetryPolicies = copyArray(copiedOptions.PerRetryPolicies)
- copiedOptions.PerCallPolicies = copyArray(copiedOptions.PerCallPolicies)
- return &copiedOptions
-}
-
-// copyMap return a new map with all the key value pair in the src map
-func copyMap[K comparable, V any](src map[K]V) map[K]V {
- if src == nil {
- return nil
- }
- copiedMap := make(map[K]V)
- for k, v := range src {
- copiedMap[k] = v
- }
- return copiedMap
-}
-
-// copyMap return a new array with all the elements in the src array
-func copyArray[T any](src []T) []T {
- if src == nil {
- return nil
- }
- copiedArray := make([]T, len(src))
- copy(copiedArray, src)
- return copiedArray
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go
deleted file mode 100644
index 039b758b..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go
+++ /dev/null
@@ -1,66 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "errors"
- "reflect"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
- azpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
-)
-
-// NewPipeline creates a pipeline from connection options. Policies from ClientOptions are
-// placed after policies from PipelineOptions. The telemetry policy, when enabled, will
-// use the specified module and version info.
-func NewPipeline(module, version string, cred azcore.TokenCredential, plOpts azruntime.PipelineOptions, options *armpolicy.ClientOptions) (azruntime.Pipeline, error) {
- if options == nil {
- options = &armpolicy.ClientOptions{}
- }
- conf, err := getConfiguration(&options.ClientOptions)
- if err != nil {
- return azruntime.Pipeline{}, err
- }
- authPolicy := NewBearerTokenPolicy(cred, &armpolicy.BearerTokenOptions{
- AuxiliaryTenants: options.AuxiliaryTenants,
- InsecureAllowCredentialWithHTTP: options.InsecureAllowCredentialWithHTTP,
- Scopes: []string{conf.Audience + "/.default"},
- })
- perRetry := make([]azpolicy.Policy, len(plOpts.PerRetry), len(plOpts.PerRetry)+1)
- copy(perRetry, plOpts.PerRetry)
- plOpts.PerRetry = append(perRetry, authPolicy, exported.PolicyFunc(httpTraceNamespacePolicy))
- if !options.DisableRPRegistration {
- regRPOpts := armpolicy.RegistrationOptions{ClientOptions: options.ClientOptions}
- regPolicy, err := NewRPRegistrationPolicy(cred, ®RPOpts)
- if err != nil {
- return azruntime.Pipeline{}, err
- }
- perCall := make([]azpolicy.Policy, len(plOpts.PerCall), len(plOpts.PerCall)+1)
- copy(perCall, plOpts.PerCall)
- plOpts.PerCall = append(perCall, regPolicy)
- }
- if plOpts.APIVersion.Name == "" {
- plOpts.APIVersion.Name = "api-version"
- }
- return azruntime.NewPipeline(module, version, plOpts, &options.ClientOptions), nil
-}
-
-func getConfiguration(o *azpolicy.ClientOptions) (cloud.ServiceConfiguration, error) {
- c := cloud.AzurePublic
- if !reflect.ValueOf(o.Cloud).IsZero() {
- c = o.Cloud
- }
- if conf, ok := c.Services[cloud.ResourceManager]; ok && conf.Endpoint != "" && conf.Audience != "" {
- return conf, nil
- } else {
- return conf, errors.New("provided Cloud field is missing Azure Resource Manager configuration")
- }
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go
deleted file mode 100644
index 765fbc68..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go
+++ /dev/null
@@ -1,146 +0,0 @@
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "context"
- "encoding/base64"
- "fmt"
- "net/http"
- "strings"
- "time"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- azpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/temporal"
-)
-
-const headerAuxiliaryAuthorization = "x-ms-authorization-auxiliary"
-
-// acquiringResourceState holds data for an auxiliary token request
-type acquiringResourceState struct {
- ctx context.Context
- p *BearerTokenPolicy
- tenant string
-}
-
-// acquireAuxToken acquires a token from an auxiliary tenant. Only one thread/goroutine at a time ever calls this function.
-func acquireAuxToken(state acquiringResourceState) (newResource azcore.AccessToken, newExpiration time.Time, err error) {
- tk, err := state.p.cred.GetToken(state.ctx, azpolicy.TokenRequestOptions{
- EnableCAE: true,
- Scopes: state.p.scopes,
- TenantID: state.tenant,
- })
- if err != nil {
- return azcore.AccessToken{}, time.Time{}, err
- }
- return tk, tk.ExpiresOn, nil
-}
-
-// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential.
-type BearerTokenPolicy struct {
- auxResources map[string]*temporal.Resource[azcore.AccessToken, acquiringResourceState]
- btp *azruntime.BearerTokenPolicy
- cred azcore.TokenCredential
- scopes []string
-}
-
-// NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens.
-// cred: an azcore.TokenCredential implementation such as a credential object from azidentity
-// opts: optional settings. Pass nil to accept default values; this is the same as passing a zero-value options.
-func NewBearerTokenPolicy(cred azcore.TokenCredential, opts *armpolicy.BearerTokenOptions) *BearerTokenPolicy {
- if opts == nil {
- opts = &armpolicy.BearerTokenOptions{}
- }
- p := &BearerTokenPolicy{cred: cred}
- p.auxResources = make(map[string]*temporal.Resource[azcore.AccessToken, acquiringResourceState], len(opts.AuxiliaryTenants))
- for _, t := range opts.AuxiliaryTenants {
- p.auxResources[t] = temporal.NewResource(acquireAuxToken)
- }
- p.scopes = make([]string, len(opts.Scopes))
- copy(p.scopes, opts.Scopes)
- p.btp = azruntime.NewBearerTokenPolicy(cred, opts.Scopes, &azpolicy.BearerTokenOptions{
- InsecureAllowCredentialWithHTTP: opts.InsecureAllowCredentialWithHTTP,
- AuthorizationHandler: azpolicy.AuthorizationHandler{
- OnChallenge: p.onChallenge,
- OnRequest: p.onRequest,
- },
- })
- return p
-}
-
-func (b *BearerTokenPolicy) onChallenge(req *azpolicy.Request, res *http.Response, authNZ func(azpolicy.TokenRequestOptions) error) error {
- challenge := res.Header.Get(shared.HeaderWWWAuthenticate)
- claims, err := parseChallenge(challenge)
- if err != nil {
- // the challenge contains claims we can't parse
- return err
- } else if claims != "" {
- // request a new token having the specified claims, send the request again
- return authNZ(azpolicy.TokenRequestOptions{Claims: claims, EnableCAE: true, Scopes: b.scopes})
- }
- // auth challenge didn't include claims, so this is a simple authorization failure
- return azruntime.NewResponseError(res)
-}
-
-// onRequest authorizes requests with one or more bearer tokens
-func (b *BearerTokenPolicy) onRequest(req *azpolicy.Request, authNZ func(azpolicy.TokenRequestOptions) error) error {
- // authorize the request with a token for the primary tenant
- err := authNZ(azpolicy.TokenRequestOptions{EnableCAE: true, Scopes: b.scopes})
- if err != nil || len(b.auxResources) == 0 {
- return err
- }
- // add tokens for auxiliary tenants
- as := acquiringResourceState{
- ctx: req.Raw().Context(),
- p: b,
- }
- auxTokens := make([]string, 0, len(b.auxResources))
- for tenant, er := range b.auxResources {
- as.tenant = tenant
- auxTk, err := er.Get(as)
- if err != nil {
- return err
- }
- auxTokens = append(auxTokens, fmt.Sprintf("%s%s", shared.BearerTokenPrefix, auxTk.Token))
- }
- req.Raw().Header.Set(headerAuxiliaryAuthorization, strings.Join(auxTokens, ", "))
- return nil
-}
-
-// Do authorizes a request with a bearer token
-func (b *BearerTokenPolicy) Do(req *azpolicy.Request) (*http.Response, error) {
- return b.btp.Do(req)
-}
-
-// parseChallenge parses claims from an authentication challenge issued by ARM so a client can request a token
-// that will satisfy conditional access policies. It returns a non-nil error when the given value contains
-// claims it can't parse. If the value contains no claims, it returns an empty string and a nil error.
-func parseChallenge(wwwAuthenticate string) (string, error) {
- claims := ""
- var err error
- for _, param := range strings.Split(wwwAuthenticate, ",") {
- if _, after, found := strings.Cut(param, "claims="); found {
- if claims != "" {
- // The header contains multiple challenges, at least two of which specify claims. The specs allow this
- // but it's unclear what a client should do in this case and there's as yet no concrete example of it.
- err = fmt.Errorf("found multiple claims challenges in %q", wwwAuthenticate)
- break
- }
- // trim stuff that would get an error from RawURLEncoding; claims may or may not be padded
- claims = strings.Trim(after, `\"=`)
- // we don't return this error because it's something unhelpful like "illegal base64 data at input byte 42"
- if b, decErr := base64.RawURLEncoding.DecodeString(claims); decErr == nil {
- claims = string(b)
- } else {
- err = fmt.Errorf("failed to parse claims from %q", wwwAuthenticate)
- break
- }
- }
- }
- return claims, err
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go
deleted file mode 100644
index 810ac9d9..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go
+++ /dev/null
@@ -1,322 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "context"
- "fmt"
- "net/http"
- "net/url"
- "strings"
- "time"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource"
- armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- azpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
-)
-
-const (
- // LogRPRegistration entries contain information specific to the automatic registration of an RP.
- // Entries of this classification are written IFF the policy needs to take any action.
- LogRPRegistration log.Event = "RPRegistration"
-)
-
-// init sets any default values
-func setDefaults(r *armpolicy.RegistrationOptions) {
- if r.MaxAttempts == 0 {
- r.MaxAttempts = 3
- } else if r.MaxAttempts < 0 {
- r.MaxAttempts = 0
- }
- if r.PollingDelay == 0 {
- r.PollingDelay = 15 * time.Second
- } else if r.PollingDelay < 0 {
- r.PollingDelay = 0
- }
- if r.PollingDuration == 0 {
- r.PollingDuration = 5 * time.Minute
- }
- if len(r.StatusCodes) == 0 {
- r.StatusCodes = []int{http.StatusConflict}
- }
-}
-
-// NewRPRegistrationPolicy creates a policy object configured using the specified options.
-// The policy controls whether an unregistered resource provider should automatically be
-// registered. See https://aka.ms/rps-not-found for more information.
-func NewRPRegistrationPolicy(cred azcore.TokenCredential, o *armpolicy.RegistrationOptions) (azpolicy.Policy, error) {
- if o == nil {
- o = &armpolicy.RegistrationOptions{}
- }
- conf, err := getConfiguration(&o.ClientOptions)
- if err != nil {
- return nil, err
- }
- authPolicy := NewBearerTokenPolicy(cred, &armpolicy.BearerTokenOptions{Scopes: []string{conf.Audience + "/.default"}})
- p := &rpRegistrationPolicy{
- endpoint: conf.Endpoint,
- pipeline: runtime.NewPipeline(shared.Module, shared.Version, runtime.PipelineOptions{PerRetry: []azpolicy.Policy{authPolicy}}, &o.ClientOptions),
- options: *o,
- }
- // init the copy
- setDefaults(&p.options)
- return p, nil
-}
-
-type rpRegistrationPolicy struct {
- endpoint string
- pipeline runtime.Pipeline
- options armpolicy.RegistrationOptions
-}
-
-func (r *rpRegistrationPolicy) Do(req *azpolicy.Request) (*http.Response, error) {
- if r.options.MaxAttempts == 0 {
- // policy is disabled
- return req.Next()
- }
- const registeredState = "Registered"
- var rp string
- var resp *http.Response
- for attempts := 0; attempts < r.options.MaxAttempts; attempts++ {
- var err error
- // make the original request
- resp, err = req.Next()
- // getting a 409 is the first indication that the RP might need to be registered, check error response
- if err != nil || !runtime.HasStatusCode(resp, r.options.StatusCodes...) {
- return resp, err
- }
- var reqErr requestError
- if err = runtime.UnmarshalAsJSON(resp, &reqErr); err != nil {
- return resp, err
- }
- if reqErr.ServiceError == nil {
- // missing service error info. just return the response
- // to the caller so its error unmarshalling will kick in
- return resp, err
- }
- if !isUnregisteredRPCode(reqErr.ServiceError.Code) {
- // not a 409 due to unregistered RP. just return the response
- // to the caller so its error unmarshalling will kick in
- return resp, err
- }
- res, err := resource.ParseResourceID(req.Raw().URL.Path)
- if err != nil {
- return resp, err
- }
- rp = res.ResourceType.Namespace
- logRegistrationExit := func(v any) {
- log.Writef(LogRPRegistration, "END registration for %s: %v", rp, v)
- }
- log.Writef(LogRPRegistration, "BEGIN registration for %s", rp)
- // create client and make the registration request
- // we use the scheme and host from the original request
- rpOps := &providersOperations{
- p: r.pipeline,
- u: r.endpoint,
- subID: res.SubscriptionID,
- }
- if _, err = rpOps.Register(&shared.ContextWithDeniedValues{Context: req.Raw().Context()}, rp); err != nil {
- logRegistrationExit(err)
- return resp, err
- }
-
- // RP was registered, however we need to wait for the registration to complete
- pollCtx, pollCancel := context.WithTimeout(&shared.ContextWithDeniedValues{Context: req.Raw().Context()}, r.options.PollingDuration)
- var lastRegState string
- for {
- // get the current registration state
- getResp, err := rpOps.Get(pollCtx, rp)
- if err != nil {
- pollCancel()
- logRegistrationExit(err)
- return resp, err
- }
- if getResp.Provider.RegistrationState != nil && !strings.EqualFold(*getResp.Provider.RegistrationState, lastRegState) {
- // registration state has changed, or was updated for the first time
- lastRegState = *getResp.Provider.RegistrationState
- log.Writef(LogRPRegistration, "registration state is %s", lastRegState)
- }
- if strings.EqualFold(lastRegState, registeredState) {
- // registration complete
- pollCancel()
- logRegistrationExit(lastRegState)
- break
- }
- // wait before trying again
- select {
- case <-time.After(r.options.PollingDelay):
- // continue polling
- case <-pollCtx.Done():
- pollCancel()
- logRegistrationExit(pollCtx.Err())
- return resp, pollCtx.Err()
- }
- }
- // RP was successfully registered, retry the original request
- err = req.RewindBody()
- if err != nil {
- return resp, err
- }
- }
- // if we get here it means we exceeded the number of attempts
- return resp, fmt.Errorf("exceeded attempts to register %s", rp)
-}
-
-var unregisteredRPCodes = []string{
- "MissingSubscriptionRegistration",
- "MissingRegistrationForResourceProvider",
- "Subscription Not Registered",
- "SubscriptionNotRegistered",
-}
-
-func isUnregisteredRPCode(errorCode string) bool {
- for _, code := range unregisteredRPCodes {
- if strings.EqualFold(errorCode, code) {
- return true
- }
- }
- return false
-}
-
-// minimal error definitions to simplify detection
-type requestError struct {
- ServiceError *serviceError `json:"error"`
-}
-
-type serviceError struct {
- Code string `json:"code"`
-}
-
-///////////////////////////////////////////////////////////////////////////////////////////////
-// the following code was copied from module armresources, providers.go and models.go
-// only the minimum amount of code was copied to get this working and some edits were made.
-///////////////////////////////////////////////////////////////////////////////////////////////
-
-type providersOperations struct {
- p runtime.Pipeline
- u string
- subID string
-}
-
-// Get - Gets the specified resource provider.
-func (client *providersOperations) Get(ctx context.Context, resourceProviderNamespace string) (providerResponse, error) {
- req, err := client.getCreateRequest(ctx, resourceProviderNamespace)
- if err != nil {
- return providerResponse{}, err
- }
- resp, err := client.p.Do(req)
- if err != nil {
- return providerResponse{}, err
- }
- result, err := client.getHandleResponse(resp)
- if err != nil {
- return providerResponse{}, err
- }
- return result, nil
-}
-
-// getCreateRequest creates the Get request.
-func (client *providersOperations) getCreateRequest(ctx context.Context, resourceProviderNamespace string) (*azpolicy.Request, error) {
- urlPath := "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}"
- urlPath = strings.ReplaceAll(urlPath, "{resourceProviderNamespace}", url.PathEscape(resourceProviderNamespace))
- urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subID))
- req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.u, urlPath))
- if err != nil {
- return nil, err
- }
- query := req.Raw().URL.Query()
- query.Set("api-version", "2019-05-01")
- req.Raw().URL.RawQuery = query.Encode()
- return req, nil
-}
-
-// getHandleResponse handles the Get response.
-func (client *providersOperations) getHandleResponse(resp *http.Response) (providerResponse, error) {
- if !runtime.HasStatusCode(resp, http.StatusOK) {
- return providerResponse{}, exported.NewResponseError(resp)
- }
- result := providerResponse{RawResponse: resp}
- err := runtime.UnmarshalAsJSON(resp, &result.Provider)
- if err != nil {
- return providerResponse{}, err
- }
- return result, err
-}
-
-// Register - Registers a subscription with a resource provider.
-func (client *providersOperations) Register(ctx context.Context, resourceProviderNamespace string) (providerResponse, error) {
- req, err := client.registerCreateRequest(ctx, resourceProviderNamespace)
- if err != nil {
- return providerResponse{}, err
- }
- resp, err := client.p.Do(req)
- if err != nil {
- return providerResponse{}, err
- }
- result, err := client.registerHandleResponse(resp)
- if err != nil {
- return providerResponse{}, err
- }
- return result, nil
-}
-
-// registerCreateRequest creates the Register request.
-func (client *providersOperations) registerCreateRequest(ctx context.Context, resourceProviderNamespace string) (*azpolicy.Request, error) {
- urlPath := "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register"
- urlPath = strings.ReplaceAll(urlPath, "{resourceProviderNamespace}", url.PathEscape(resourceProviderNamespace))
- urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subID))
- req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.u, urlPath))
- if err != nil {
- return nil, err
- }
- query := req.Raw().URL.Query()
- query.Set("api-version", "2019-05-01")
- req.Raw().URL.RawQuery = query.Encode()
- return req, nil
-}
-
-// registerHandleResponse handles the Register response.
-func (client *providersOperations) registerHandleResponse(resp *http.Response) (providerResponse, error) {
- if !runtime.HasStatusCode(resp, http.StatusOK) {
- return providerResponse{}, exported.NewResponseError(resp)
- }
- result := providerResponse{RawResponse: resp}
- err := runtime.UnmarshalAsJSON(resp, &result.Provider)
- if err != nil {
- return providerResponse{}, err
- }
- return result, err
-}
-
-// ProviderResponse is the response envelope for operations that return a Provider type.
-type providerResponse struct {
- // Resource provider information.
- Provider *provider
-
- // RawResponse contains the underlying HTTP response.
- RawResponse *http.Response
-}
-
-// Provider - Resource provider information.
-type provider struct {
- // The provider ID.
- ID *string `json:"id,omitempty"`
-
- // The namespace of the resource provider.
- Namespace *string `json:"namespace,omitempty"`
-
- // The registration policy of the resource provider.
- RegistrationPolicy *string `json:"registrationPolicy,omitempty"`
-
- // The registration state of the resource provider.
- RegistrationState *string `json:"registrationState,omitempty"`
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_trace_namespace.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_trace_namespace.go
deleted file mode 100644
index 6cea1842..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_trace_namespace.go
+++ /dev/null
@@ -1,30 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "net/http"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing"
-)
-
-// httpTraceNamespacePolicy is a policy that adds the az.namespace attribute to the current Span
-func httpTraceNamespacePolicy(req *policy.Request) (resp *http.Response, err error) {
- rawTracer := req.Raw().Context().Value(shared.CtxWithTracingTracer{})
- if tracer, ok := rawTracer.(tracing.Tracer); ok && tracer.Enabled() {
- rt, err := resource.ParseResourceType(req.Raw().URL.Path)
- if err == nil {
- // add the namespace attribute to the current span
- span := tracer.SpanFromContext(req.Raw().Context())
- span.SetAttributes(tracing.Attribute{Key: shared.TracingNamespaceAttrName, Value: rt.Namespace})
- }
- }
- return req.Next()
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/runtime.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/runtime.go
deleted file mode 100644
index 1400d437..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/runtime.go
+++ /dev/null
@@ -1,24 +0,0 @@
-//go:build go1.16
-// +build go1.16
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
-
-func init() {
- cloud.AzureChina.Services[cloud.ResourceManager] = cloud.ServiceConfiguration{
- Audience: "https://management.core.chinacloudapi.cn",
- Endpoint: "https://management.chinacloudapi.cn",
- }
- cloud.AzureGovernment.Services[cloud.ResourceManager] = cloud.ServiceConfiguration{
- Audience: "https://management.core.usgovcloudapi.net",
- Endpoint: "https://management.usgovcloudapi.net",
- }
- cloud.AzurePublic.Services[cloud.ResourceManager] = cloud.ServiceConfiguration{
- Audience: "https://management.core.windows.net/",
- Endpoint: "https://management.azure.com",
- }
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml
deleted file mode 100644
index 99348527..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml
+++ /dev/null
@@ -1,29 +0,0 @@
-# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file.
-trigger:
- branches:
- include:
- - main
- - feature/*
- - hotfix/*
- - release/*
- paths:
- include:
- - sdk/azcore/
- - eng/
-
-pr:
- branches:
- include:
- - main
- - feature/*
- - hotfix/*
- - release/*
- paths:
- include:
- - sdk/azcore/
- - eng/
-
-extends:
- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml
- parameters:
- ServiceDirectory: azcore
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go
deleted file mode 100644
index 9d077a3e..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go
+++ /dev/null
@@ -1,44 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package cloud
-
-var (
- // AzureChina contains configuration for Azure China.
- AzureChina = Configuration{
- ActiveDirectoryAuthorityHost: "https://login.chinacloudapi.cn/", Services: map[ServiceName]ServiceConfiguration{},
- }
- // AzureGovernment contains configuration for Azure Government.
- AzureGovernment = Configuration{
- ActiveDirectoryAuthorityHost: "https://login.microsoftonline.us/", Services: map[ServiceName]ServiceConfiguration{},
- }
- // AzurePublic contains configuration for Azure Public Cloud.
- AzurePublic = Configuration{
- ActiveDirectoryAuthorityHost: "https://login.microsoftonline.com/", Services: map[ServiceName]ServiceConfiguration{},
- }
-)
-
-// ServiceName identifies a cloud service.
-type ServiceName string
-
-// ResourceManager is a global constant identifying Azure Resource Manager.
-const ResourceManager ServiceName = "resourceManager"
-
-// ServiceConfiguration configures a specific cloud service such as Azure Resource Manager.
-type ServiceConfiguration struct {
- // Audience is the audience the client will request for its access tokens.
- Audience string
- // Endpoint is the service's base URL.
- Endpoint string
-}
-
-// Configuration configures a cloud.
-type Configuration struct {
- // ActiveDirectoryAuthorityHost is the base URL of the cloud's Azure Active Directory.
- ActiveDirectoryAuthorityHost string
- // Services contains configuration for the cloud's services.
- Services map[ServiceName]ServiceConfiguration
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go
deleted file mode 100644
index 985b1bde..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go
+++ /dev/null
@@ -1,53 +0,0 @@
-//go:build go1.16
-// +build go1.16
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-/*
-Package cloud implements a configuration API for applications deployed to sovereign or private Azure clouds.
-
-Azure SDK client configuration defaults are appropriate for Azure Public Cloud (sometimes referred to as
-"Azure Commercial" or simply "Microsoft Azure"). This package enables applications deployed to other
-Azure Clouds to configure clients appropriately.
-
-This package contains predefined configuration for well-known sovereign clouds such as Azure Government and
-Azure China. Azure SDK clients accept this configuration via the Cloud field of azcore.ClientOptions. For
-example, configuring a credential and ARM client for Azure Government:
-
- opts := azcore.ClientOptions{Cloud: cloud.AzureGovernment}
- cred, err := azidentity.NewDefaultAzureCredential(
- &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts},
- )
- handle(err)
-
- client, err := armsubscription.NewClient(
- cred, &arm.ClientOptions{ClientOptions: opts},
- )
- handle(err)
-
-Applications deployed to a private cloud such as Azure Stack create a Configuration object with
-appropriate values:
-
- c := cloud.Configuration{
- ActiveDirectoryAuthorityHost: "https://...",
- Services: map[cloud.ServiceName]cloud.ServiceConfiguration{
- cloud.ResourceManager: {
- Audience: "...",
- Endpoint: "https://...",
- },
- },
- }
- opts := azcore.ClientOptions{Cloud: c}
-
- cred, err := azidentity.NewDefaultAzureCredential(
- &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts},
- )
- handle(err)
-
- client, err := armsubscription.NewClient(
- cred, &arm.ClientOptions{ClientOptions: opts},
- )
- handle(err)
-*/
-package cloud
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go
deleted file mode 100644
index 9d1c2f0c..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go
+++ /dev/null
@@ -1,173 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azcore
-
-import (
- "reflect"
- "sync"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing"
-)
-
-// AccessToken represents an Azure service bearer access token with expiry information.
-type AccessToken = exported.AccessToken
-
-// TokenCredential represents a credential capable of providing an OAuth token.
-type TokenCredential = exported.TokenCredential
-
-// KeyCredential contains an authentication key used to authenticate to an Azure service.
-type KeyCredential = exported.KeyCredential
-
-// NewKeyCredential creates a new instance of [KeyCredential] with the specified values.
-// - key is the authentication key
-func NewKeyCredential(key string) *KeyCredential {
- return exported.NewKeyCredential(key)
-}
-
-// SASCredential contains a shared access signature used to authenticate to an Azure service.
-type SASCredential = exported.SASCredential
-
-// NewSASCredential creates a new instance of [SASCredential] with the specified values.
-// - sas is the shared access signature
-func NewSASCredential(sas string) *SASCredential {
- return exported.NewSASCredential(sas)
-}
-
-// holds sentinel values used to send nulls
-var nullables map[reflect.Type]any = map[reflect.Type]any{}
-var nullablesMu sync.RWMutex
-
-// NullValue is used to send an explicit 'null' within a request.
-// This is typically used in JSON-MERGE-PATCH operations to delete a value.
-func NullValue[T any]() T {
- t := shared.TypeOfT[T]()
-
- nullablesMu.RLock()
- v, found := nullables[t]
- nullablesMu.RUnlock()
-
- if found {
- // return the sentinel object
- return v.(T)
- }
-
- // promote to exclusive lock and check again (double-checked locking pattern)
- nullablesMu.Lock()
- defer nullablesMu.Unlock()
- v, found = nullables[t]
-
- if !found {
- var o reflect.Value
- if k := t.Kind(); k == reflect.Map {
- o = reflect.MakeMap(t)
- } else if k == reflect.Slice {
- // empty slices appear to all point to the same data block
- // which causes comparisons to become ambiguous. so we create
- // a slice with len/cap of one which ensures a unique address.
- o = reflect.MakeSlice(t, 1, 1)
- } else {
- o = reflect.New(t.Elem())
- }
- v = o.Interface()
- nullables[t] = v
- }
- // return the sentinel object
- return v.(T)
-}
-
-// IsNullValue returns true if the field contains a null sentinel value.
-// This is used by custom marshallers to properly encode a null value.
-func IsNullValue[T any](v T) bool {
- // see if our map has a sentinel object for this *T
- t := reflect.TypeOf(v)
- nullablesMu.RLock()
- defer nullablesMu.RUnlock()
-
- if o, found := nullables[t]; found {
- o1 := reflect.ValueOf(o)
- v1 := reflect.ValueOf(v)
- // we found it; return true if v points to the sentinel object.
- // NOTE: maps and slices can only be compared to nil, else you get
- // a runtime panic. so we compare addresses instead.
- return o1.Pointer() == v1.Pointer()
- }
- // no sentinel object for this *t
- return false
-}
-
-// ClientOptions contains optional settings for a client's pipeline.
-// Instances can be shared across calls to SDK client constructors when uniform configuration is desired.
-// Zero-value fields will have their specified default values applied during use.
-type ClientOptions = policy.ClientOptions
-
-// Client is a basic HTTP client. It consists of a pipeline and tracing provider.
-type Client struct {
- pl runtime.Pipeline
- tr tracing.Tracer
-
- // cached on the client to support shallow copying with new values
- tp tracing.Provider
- modVer string
- namespace string
-}
-
-// NewClient creates a new Client instance with the provided values.
-// - moduleName - the fully qualified name of the module where the client is defined; used by the telemetry policy and tracing provider.
-// - moduleVersion - the semantic version of the module; used by the telemetry policy and tracing provider.
-// - plOpts - pipeline configuration options; can be the zero-value
-// - options - optional client configurations; pass nil to accept the default values
-func NewClient(moduleName, moduleVersion string, plOpts runtime.PipelineOptions, options *ClientOptions) (*Client, error) {
- if options == nil {
- options = &ClientOptions{}
- }
-
- if !options.Telemetry.Disabled {
- if err := shared.ValidateModVer(moduleVersion); err != nil {
- return nil, err
- }
- }
-
- pl := runtime.NewPipeline(moduleName, moduleVersion, plOpts, options)
-
- tr := options.TracingProvider.NewTracer(moduleName, moduleVersion)
- if tr.Enabled() && plOpts.Tracing.Namespace != "" {
- tr.SetAttributes(tracing.Attribute{Key: shared.TracingNamespaceAttrName, Value: plOpts.Tracing.Namespace})
- }
-
- return &Client{
- pl: pl,
- tr: tr,
- tp: options.TracingProvider,
- modVer: moduleVersion,
- namespace: plOpts.Tracing.Namespace,
- }, nil
-}
-
-// Pipeline returns the pipeline for this client.
-func (c *Client) Pipeline() runtime.Pipeline {
- return c.pl
-}
-
-// Tracer returns the tracer for this client.
-func (c *Client) Tracer() tracing.Tracer {
- return c.tr
-}
-
-// WithClientName returns a shallow copy of the Client with its tracing client name changed to clientName.
-// Note that the values for module name and version will be preserved from the source Client.
-// - clientName - the fully qualified name of the client ("package.Client"); this is used by the tracing provider when creating spans
-func (c *Client) WithClientName(clientName string) *Client {
- tr := c.tp.NewTracer(clientName, c.modVer)
- if tr.Enabled() && c.namespace != "" {
- tr.SetAttributes(tracing.Attribute{Key: shared.TracingNamespaceAttrName, Value: c.namespace})
- }
- return &Client{pl: c.pl, tr: tr, tp: c.tp, modVer: c.modVer, namespace: c.namespace}
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go
deleted file mode 100644
index 654a5f40..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go
+++ /dev/null
@@ -1,264 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright 2017 Microsoft Corporation. All rights reserved.
-// Use of this source code is governed by an MIT
-// license that can be found in the LICENSE file.
-
-/*
-Package azcore implements an HTTP request/response middleware pipeline used by Azure SDK clients.
-
-The middleware consists of three components.
-
- - One or more Policy instances.
- - A Transporter instance.
- - A Pipeline instance that combines the Policy and Transporter instances.
-
-# Implementing the Policy Interface
-
-A Policy can be implemented in two ways; as a first-class function for a stateless Policy, or as
-a method on a type for a stateful Policy. Note that HTTP requests made via the same pipeline share
-the same Policy instances, so if a Policy mutates its state it MUST be properly synchronized to
-avoid race conditions.
-
-A Policy's Do method is called when an HTTP request wants to be sent over the network. The Do method can
-perform any operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers,
-and/or query parameters, inject a failure, etc. Once the Policy has successfully completed its request
-work, it must call the Next() method on the *policy.Request instance in order to pass the request to the
-next Policy in the chain.
-
-When an HTTP response comes back, the Policy then gets a chance to process the response/error. The Policy instance
-can log the response, retry the operation if it failed due to a transient error or timeout, unmarshal the response
-body, etc. Once the Policy has successfully completed its response work, it must return the *http.Response
-and error instances to its caller.
-
-Template for implementing a stateless Policy:
-
- type policyFunc func(*policy.Request) (*http.Response, error)
-
- // Do implements the Policy interface on policyFunc.
- func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) {
- return pf(req)
- }
-
- func NewMyStatelessPolicy() policy.Policy {
- return policyFunc(func(req *policy.Request) (*http.Response, error) {
- // TODO: mutate/process Request here
-
- // forward Request to next Policy & get Response/error
- resp, err := req.Next()
-
- // TODO: mutate/process Response/error here
-
- // return Response/error to previous Policy
- return resp, err
- })
- }
-
-Template for implementing a stateful Policy:
-
- type MyStatefulPolicy struct {
- // TODO: add configuration/setting fields here
- }
-
- // TODO: add initialization args to NewMyStatefulPolicy()
- func NewMyStatefulPolicy() policy.Policy {
- return &MyStatefulPolicy{
- // TODO: initialize configuration/setting fields here
- }
- }
-
- func (p *MyStatefulPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
- // TODO: mutate/process Request here
-
- // forward Request to next Policy & get Response/error
- resp, err := req.Next()
-
- // TODO: mutate/process Response/error here
-
- // return Response/error to previous Policy
- return resp, err
- }
-
-# Implementing the Transporter Interface
-
-The Transporter interface is responsible for sending the HTTP request and returning the corresponding
-HTTP response or error. The Transporter is invoked by the last Policy in the chain. The default Transporter
-implementation uses a shared http.Client from the standard library.
-
-The same stateful/stateless rules for Policy implementations apply to Transporter implementations.
-
-# Using Policy and Transporter Instances Via a Pipeline
-
-To use the Policy and Transporter instances, an application passes them to the runtime.NewPipeline function.
-
- func NewPipeline(transport Transporter, policies ...Policy) Pipeline
-
-The specified Policy instances form a chain and are invoked in the order provided to NewPipeline
-followed by the Transporter.
-
-Once the Pipeline has been created, create a runtime.Request instance and pass it to Pipeline's Do method.
-
- func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error)
-
- func (p Pipeline) Do(req *Request) (*http.Request, error)
-
-The Pipeline.Do method sends the specified Request through the chain of Policy and Transporter
-instances. The response/error is then sent through the same chain of Policy instances in reverse
-order. For example, assuming there are Policy types PolicyA, PolicyB, and PolicyC along with
-TransportA.
-
- pipeline := NewPipeline(TransportA, PolicyA, PolicyB, PolicyC)
-
-The flow of Request and Response looks like the following:
-
- policy.Request -> PolicyA -> PolicyB -> PolicyC -> TransportA -----+
- |
- HTTP(S) endpoint
- |
- caller <--------- PolicyA <- PolicyB <- PolicyC <- http.Response-+
-
-# Creating a Request Instance
-
-The Request instance passed to Pipeline's Do method is a wrapper around an *http.Request. It also
-contains some internal state and provides various convenience methods. You create a Request instance
-by calling the runtime.NewRequest function:
-
- func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error)
-
-If the Request should contain a body, call the SetBody method.
-
- func (req *Request) SetBody(body ReadSeekCloser, contentType string) error
-
-A seekable stream is required so that upon retry, the retry Policy instance can seek the stream
-back to the beginning before retrying the network request and re-uploading the body.
-
-# Sending an Explicit Null
-
-Operations like JSON-MERGE-PATCH send a JSON null to indicate a value should be deleted.
-
- {
- "delete-me": null
- }
-
-This requirement conflicts with the SDK's default marshalling that specifies "omitempty" as
-a means to resolve the ambiguity between a field to be excluded and its zero-value.
-
- type Widget struct {
- Name *string `json:",omitempty"`
- Count *int `json:",omitempty"`
- }
-
-In the above example, Name and Count are defined as pointer-to-type to disambiguate between
-a missing value (nil) and a zero-value (0) which might have semantic differences.
-
-In a PATCH operation, any fields left as nil are to have their values preserved. When updating
-a Widget's count, one simply specifies the new value for Count, leaving Name nil.
-
-To fulfill the requirement for sending a JSON null, the NullValue() function can be used.
-
- w := Widget{
- Count: azcore.NullValue[*int](),
- }
-
-This sends an explict "null" for Count, indicating that any current value for Count should be deleted.
-
-# Processing the Response
-
-When the HTTP response is received, the *http.Response is returned directly. Each Policy instance
-can inspect/mutate the *http.Response.
-
-# Built-in Logging
-
-To enable logging, set environment variable AZURE_SDK_GO_LOGGING to "all" before executing your program.
-
-By default the logger writes to stderr. This can be customized by calling log.SetListener, providing
-a callback that writes to the desired location. Any custom logging implementation MUST provide its
-own synchronization to handle concurrent invocations.
-
-See the docs for the log package for further details.
-
-# Pageable Operations
-
-Pageable operations return potentially large data sets spread over multiple GET requests. The result of
-each GET is a "page" of data consisting of a slice of items.
-
-Pageable operations can be identified by their New*Pager naming convention and return type of *runtime.Pager[T].
-
- func (c *WidgetClient) NewListWidgetsPager(o *Options) *runtime.Pager[PageResponse]
-
-The call to WidgetClient.NewListWidgetsPager() returns an instance of *runtime.Pager[T] for fetching pages
-and determining if there are more pages to fetch. No IO calls are made until the NextPage() method is invoked.
-
- pager := widgetClient.NewListWidgetsPager(nil)
- for pager.More() {
- page, err := pager.NextPage(context.TODO())
- // handle err
- for _, widget := range page.Values {
- // process widget
- }
- }
-
-# Long-Running Operations
-
-Long-running operations (LROs) are operations consisting of an initial request to start the operation followed
-by polling to determine when the operation has reached a terminal state. An LRO's terminal state is one
-of the following values.
-
- - Succeeded - the LRO completed successfully
- - Failed - the LRO failed to complete
- - Canceled - the LRO was canceled
-
-LROs can be identified by their Begin* prefix and their return type of *runtime.Poller[T].
-
- func (c *WidgetClient) BeginCreateOrUpdate(ctx context.Context, w Widget, o *Options) (*runtime.Poller[Response], error)
-
-When a call to WidgetClient.BeginCreateOrUpdate() returns a nil error, it means that the LRO has started.
-It does _not_ mean that the widget has been created or updated (or failed to be created/updated).
-
-The *runtime.Poller[T] provides APIs for determining the state of the LRO. To wait for the LRO to complete,
-call the PollUntilDone() method.
-
- poller, err := widgetClient.BeginCreateOrUpdate(context.TODO(), Widget{}, nil)
- // handle err
- result, err := poller.PollUntilDone(context.TODO(), nil)
- // handle err
- // use result
-
-The call to PollUntilDone() will block the current goroutine until the LRO has reached a terminal state or the
-context is canceled/timed out.
-
-Note that LROs can take anywhere from several seconds to several minutes. The duration is operation-dependent. Due to
-this variant behavior, pollers do _not_ have a preconfigured time-out. Use a context with the appropriate cancellation
-mechanism as required.
-
-# Resume Tokens
-
-Pollers provide the ability to serialize their state into a "resume token" which can be used by another process to
-recreate the poller. This is achieved via the runtime.Poller[T].ResumeToken() method.
-
- token, err := poller.ResumeToken()
- // handle error
-
-Note that a token can only be obtained for a poller that's in a non-terminal state. Also note that any subsequent calls
-to poller.Poll() might change the poller's state. In this case, a new token should be created.
-
-After the token has been obtained, it can be used to recreate an instance of the originating poller.
-
- poller, err := widgetClient.BeginCreateOrUpdate(nil, Widget{}, &Options{
- ResumeToken: token,
- })
-
-When resuming a poller, no IO is performed, and zero-value arguments can be used for everything but the Options.ResumeToken.
-
-Resume tokens are unique per service client and operation. Attempting to resume a poller for LRO BeginB() with a token from LRO
-BeginA() will result in an error.
-
-# Fakes
-
-The fake package contains types used for constructing in-memory fake servers used in unit tests.
-This allows writing tests to cover various success/error conditions without the need for connecting to a live service.
-
-Please see https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/samples/fakes for details and examples on how to use fakes.
-*/
-package azcore
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go
deleted file mode 100644
index 17bd50c6..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go
+++ /dev/null
@@ -1,14 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azcore
-
-import "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
-
-// ResponseError is returned when a request is made to a service and
-// the service returns a non-success HTTP status code.
-// Use errors.As() to access this type in the error chain.
-type ResponseError = exported.ResponseError
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go
deleted file mode 100644
index 2b19d01f..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go
+++ /dev/null
@@ -1,57 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azcore
-
-import (
- "strings"
-)
-
-// ETag is a property used for optimistic concurrency during updates
-// ETag is a validator based on https://tools.ietf.org/html/rfc7232#section-2.3.2
-// An ETag can be empty ("").
-type ETag string
-
-// ETagAny is an ETag that represents everything, the value is "*"
-const ETagAny ETag = "*"
-
-// Equals does a strong comparison of two ETags. Equals returns true when both
-// ETags are not weak and the values of the underlying strings are equal.
-func (e ETag) Equals(other ETag) bool {
- return !e.IsWeak() && !other.IsWeak() && e == other
-}
-
-// WeakEquals does a weak comparison of two ETags. Two ETags are equivalent if their opaque-tags match
-// character-by-character, regardless of either or both being tagged as "weak".
-func (e ETag) WeakEquals(other ETag) bool {
- getStart := func(e1 ETag) int {
- if e1.IsWeak() {
- return 2
- }
- return 0
- }
- aStart := getStart(e)
- bStart := getStart(other)
-
- aVal := e[aStart:]
- bVal := other[bStart:]
-
- return aVal == bVal
-}
-
-// IsWeak specifies whether the ETag is strong or weak.
-func (e ETag) IsWeak() bool {
- return len(e) >= 4 && strings.HasPrefix(string(e), "W/\"") && strings.HasSuffix(string(e), "\"")
-}
-
-// MatchConditions specifies HTTP options for conditional requests.
-type MatchConditions struct {
- // Optionally limit requests to resources that have a matching ETag.
- IfMatch *ETag
-
- // Optionally limit requests to resources that do not match the ETag.
- IfNoneMatch *ETag
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go
deleted file mode 100644
index f2b296b6..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go
+++ /dev/null
@@ -1,175 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package exported
-
-import (
- "context"
- "encoding/base64"
- "fmt"
- "io"
- "net/http"
- "sync/atomic"
- "time"
-)
-
-type nopCloser struct {
- io.ReadSeeker
-}
-
-func (n nopCloser) Close() error {
- return nil
-}
-
-// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker.
-// Exported as streaming.NopCloser().
-func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser {
- return nopCloser{rs}
-}
-
-// HasStatusCode returns true if the Response's status code is one of the specified values.
-// Exported as runtime.HasStatusCode().
-func HasStatusCode(resp *http.Response, statusCodes ...int) bool {
- if resp == nil {
- return false
- }
- for _, sc := range statusCodes {
- if resp.StatusCode == sc {
- return true
- }
- }
- return false
-}
-
-// AccessToken represents an Azure service bearer access token with expiry information.
-// Exported as azcore.AccessToken.
-type AccessToken struct {
- Token string
- ExpiresOn time.Time
-}
-
-// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token.
-// Exported as policy.TokenRequestOptions.
-type TokenRequestOptions struct {
- // Claims are any additional claims required for the token to satisfy a conditional access policy, such as a
- // service may return in a claims challenge following an authorization failure. If a service returned the
- // claims value base64 encoded, it must be decoded before setting this field.
- Claims string
-
- // EnableCAE indicates whether to enable Continuous Access Evaluation (CAE) for the requested token. When true,
- // azidentity credentials request CAE tokens for resource APIs supporting CAE. Clients are responsible for
- // handling CAE challenges. If a client that doesn't handle CAE challenges receives a CAE token, it may end up
- // in a loop retrying an API call with a token that has been revoked due to CAE.
- EnableCAE bool
-
- // Scopes contains the list of permission scopes required for the token.
- Scopes []string
-
- // TenantID identifies the tenant from which to request the token. azidentity credentials authenticate in
- // their configured default tenants when this field isn't set.
- TenantID string
-}
-
-// TokenCredential represents a credential capable of providing an OAuth token.
-// Exported as azcore.TokenCredential.
-type TokenCredential interface {
- // GetToken requests an access token for the specified set of scopes.
- GetToken(ctx context.Context, options TokenRequestOptions) (AccessToken, error)
-}
-
-// DecodeByteArray will base-64 decode the provided string into v.
-// Exported as runtime.DecodeByteArray()
-func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error {
- if len(s) == 0 {
- return nil
- }
- payload := string(s)
- if payload[0] == '"' {
- // remove surrounding quotes
- payload = payload[1 : len(payload)-1]
- }
- switch format {
- case Base64StdFormat:
- decoded, err := base64.StdEncoding.DecodeString(payload)
- if err == nil {
- *v = decoded
- return nil
- }
- return err
- case Base64URLFormat:
- // use raw encoding as URL format should not contain any '=' characters
- decoded, err := base64.RawURLEncoding.DecodeString(payload)
- if err == nil {
- *v = decoded
- return nil
- }
- return err
- default:
- return fmt.Errorf("unrecognized byte array format: %d", format)
- }
-}
-
-// KeyCredential contains an authentication key used to authenticate to an Azure service.
-// Exported as azcore.KeyCredential.
-type KeyCredential struct {
- cred *keyCredential
-}
-
-// NewKeyCredential creates a new instance of [KeyCredential] with the specified values.
-// - key is the authentication key
-func NewKeyCredential(key string) *KeyCredential {
- return &KeyCredential{cred: newKeyCredential(key)}
-}
-
-// Update replaces the existing key with the specified value.
-func (k *KeyCredential) Update(key string) {
- k.cred.Update(key)
-}
-
-// SASCredential contains a shared access signature used to authenticate to an Azure service.
-// Exported as azcore.SASCredential.
-type SASCredential struct {
- cred *keyCredential
-}
-
-// NewSASCredential creates a new instance of [SASCredential] with the specified values.
-// - sas is the shared access signature
-func NewSASCredential(sas string) *SASCredential {
- return &SASCredential{cred: newKeyCredential(sas)}
-}
-
-// Update replaces the existing shared access signature with the specified value.
-func (k *SASCredential) Update(sas string) {
- k.cred.Update(sas)
-}
-
-// KeyCredentialGet returns the key for cred.
-func KeyCredentialGet(cred *KeyCredential) string {
- return cred.cred.Get()
-}
-
-// SASCredentialGet returns the shared access sig for cred.
-func SASCredentialGet(cred *SASCredential) string {
- return cred.cred.Get()
-}
-
-type keyCredential struct {
- key atomic.Value // string
-}
-
-func newKeyCredential(key string) *keyCredential {
- keyCred := keyCredential{}
- keyCred.key.Store(key)
- return &keyCred
-}
-
-func (k *keyCredential) Get() string {
- return k.key.Load().(string)
-}
-
-func (k *keyCredential) Update(key string) {
- k.key.Store(key)
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go
deleted file mode 100644
index e45f831e..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go
+++ /dev/null
@@ -1,77 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package exported
-
-import (
- "errors"
- "net/http"
-)
-
-// Policy represents an extensibility point for the Pipeline that can mutate the specified
-// Request and react to the received Response.
-// Exported as policy.Policy.
-type Policy interface {
- // Do applies the policy to the specified Request. When implementing a Policy, mutate the
- // request before calling req.Next() to move on to the next policy, and respond to the result
- // before returning to the caller.
- Do(req *Request) (*http.Response, error)
-}
-
-// Pipeline represents a primitive for sending HTTP requests and receiving responses.
-// Its behavior can be extended by specifying policies during construction.
-// Exported as runtime.Pipeline.
-type Pipeline struct {
- policies []Policy
-}
-
-// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses.
-// Exported as policy.Transporter.
-type Transporter interface {
- // Do sends the HTTP request and returns the HTTP response or error.
- Do(req *http.Request) (*http.Response, error)
-}
-
-// used to adapt a TransportPolicy to a Policy
-type transportPolicy struct {
- trans Transporter
-}
-
-func (tp transportPolicy) Do(req *Request) (*http.Response, error) {
- if tp.trans == nil {
- return nil, errors.New("missing transporter")
- }
- resp, err := tp.trans.Do(req.Raw())
- if err != nil {
- return nil, err
- } else if resp == nil {
- // there was no response and no error (rare but can happen)
- // this ensures the retry policy will retry the request
- return nil, errors.New("received nil response")
- }
- return resp, nil
-}
-
-// NewPipeline creates a new Pipeline object from the specified Policies.
-// Not directly exported, but used as part of runtime.NewPipeline().
-func NewPipeline(transport Transporter, policies ...Policy) Pipeline {
- // transport policy must always be the last in the slice
- policies = append(policies, transportPolicy{trans: transport})
- return Pipeline{
- policies: policies,
- }
-}
-
-// Do is called for each and every HTTP request. It passes the request through all
-// the Policy objects (which can transform the Request's URL/query parameters/headers)
-// and ultimately sends the transformed HTTP request over the network.
-func (p Pipeline) Do(req *Request) (*http.Response, error) {
- if req == nil {
- return nil, errors.New("request cannot be nil")
- }
- req.policies = p.policies
- return req.Next()
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go
deleted file mode 100644
index 3041984d..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go
+++ /dev/null
@@ -1,223 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package exported
-
-import (
- "context"
- "encoding/base64"
- "errors"
- "fmt"
- "io"
- "net/http"
- "reflect"
- "strconv"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
-)
-
-// Base64Encoding is usesd to specify which base-64 encoder/decoder to use when
-// encoding/decoding a slice of bytes to/from a string.
-// Exported as runtime.Base64Encoding
-type Base64Encoding int
-
-const (
- // Base64StdFormat uses base64.StdEncoding for encoding and decoding payloads.
- Base64StdFormat Base64Encoding = 0
-
- // Base64URLFormat uses base64.RawURLEncoding for encoding and decoding payloads.
- Base64URLFormat Base64Encoding = 1
-)
-
-// EncodeByteArray will base-64 encode the byte slice v.
-// Exported as runtime.EncodeByteArray()
-func EncodeByteArray(v []byte, format Base64Encoding) string {
- if format == Base64URLFormat {
- return base64.RawURLEncoding.EncodeToString(v)
- }
- return base64.StdEncoding.EncodeToString(v)
-}
-
-// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline.
-// Don't use this type directly, use NewRequest() instead.
-// Exported as policy.Request.
-type Request struct {
- req *http.Request
- body io.ReadSeekCloser
- policies []Policy
- values opValues
-}
-
-type opValues map[reflect.Type]any
-
-// Set adds/changes a value
-func (ov opValues) set(value any) {
- ov[reflect.TypeOf(value)] = value
-}
-
-// Get looks for a value set by SetValue first
-func (ov opValues) get(value any) bool {
- v, ok := ov[reflect.ValueOf(value).Elem().Type()]
- if ok {
- reflect.ValueOf(value).Elem().Set(reflect.ValueOf(v))
- }
- return ok
-}
-
-// NewRequest creates a new Request with the specified input.
-// Exported as runtime.NewRequest().
-func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) {
- req, err := http.NewRequestWithContext(ctx, httpMethod, endpoint, nil)
- if err != nil {
- return nil, err
- }
- if req.URL.Host == "" {
- return nil, errors.New("no Host in request URL")
- }
- if !(req.URL.Scheme == "http" || req.URL.Scheme == "https") {
- return nil, fmt.Errorf("unsupported protocol scheme %s", req.URL.Scheme)
- }
- return &Request{req: req}, nil
-}
-
-// Body returns the original body specified when the Request was created.
-func (req *Request) Body() io.ReadSeekCloser {
- return req.body
-}
-
-// Raw returns the underlying HTTP request.
-func (req *Request) Raw() *http.Request {
- return req.req
-}
-
-// Next calls the next policy in the pipeline.
-// If there are no more policies, nil and an error are returned.
-// This method is intended to be called from pipeline policies.
-// To send a request through a pipeline call Pipeline.Do().
-func (req *Request) Next() (*http.Response, error) {
- if len(req.policies) == 0 {
- return nil, errors.New("no more policies")
- }
- nextPolicy := req.policies[0]
- nextReq := *req
- nextReq.policies = nextReq.policies[1:]
- return nextPolicy.Do(&nextReq)
-}
-
-// SetOperationValue adds/changes a mutable key/value associated with a single operation.
-func (req *Request) SetOperationValue(value any) {
- if req.values == nil {
- req.values = opValues{}
- }
- req.values.set(value)
-}
-
-// OperationValue looks for a value set by SetOperationValue().
-func (req *Request) OperationValue(value any) bool {
- if req.values == nil {
- return false
- }
- return req.values.get(value)
-}
-
-// SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length
-// accordingly. If the ReadSeekCloser is nil or empty, Content-Length won't be set. If contentType is "",
-// Content-Type won't be set, and if it was set, will be deleted.
-// Use streaming.NopCloser to turn an io.ReadSeeker into an io.ReadSeekCloser.
-func (req *Request) SetBody(body io.ReadSeekCloser, contentType string) error {
- // clobber the existing Content-Type to preserve behavior
- return SetBody(req, body, contentType, true)
-}
-
-// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation.
-func (req *Request) RewindBody() error {
- if req.body != nil {
- // Reset the stream back to the beginning and restore the body
- _, err := req.body.Seek(0, io.SeekStart)
- req.req.Body = req.body
- return err
- }
- return nil
-}
-
-// Close closes the request body.
-func (req *Request) Close() error {
- if req.body == nil {
- return nil
- }
- return req.body.Close()
-}
-
-// Clone returns a deep copy of the request with its context changed to ctx.
-func (req *Request) Clone(ctx context.Context) *Request {
- r2 := *req
- r2.req = req.req.Clone(ctx)
- return &r2
-}
-
-// WithContext returns a shallow copy of the request with its context changed to ctx.
-func (req *Request) WithContext(ctx context.Context) *Request {
- r2 := new(Request)
- *r2 = *req
- r2.req = r2.req.WithContext(ctx)
- return r2
-}
-
-// not exported but dependent on Request
-
-// PolicyFunc is a type that implements the Policy interface.
-// Use this type when implementing a stateless policy as a first-class function.
-type PolicyFunc func(*Request) (*http.Response, error)
-
-// Do implements the Policy interface on policyFunc.
-func (pf PolicyFunc) Do(req *Request) (*http.Response, error) {
- return pf(req)
-}
-
-// SetBody sets the specified ReadSeekCloser as the HTTP request body, and sets Content-Type and Content-Length accordingly.
-// - req is the request to modify
-// - body is the request body; if nil or empty, Content-Length won't be set
-// - contentType is the value for the Content-Type header; if empty, Content-Type will be deleted
-// - clobberContentType when true, will overwrite the existing value of Content-Type with contentType
-func SetBody(req *Request, body io.ReadSeekCloser, contentType string, clobberContentType bool) error {
- var err error
- var size int64
- if body != nil {
- size, err = body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size
- if err != nil {
- return err
- }
- }
- if size == 0 {
- // treat an empty stream the same as a nil one: assign req a nil body
- body = nil
- // RFC 9110 specifies a client shouldn't set Content-Length on a request containing no content
- // (Del is a no-op when the header has no value)
- req.req.Header.Del(shared.HeaderContentLength)
- } else {
- _, err = body.Seek(0, io.SeekStart)
- if err != nil {
- return err
- }
- req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10))
- req.Raw().GetBody = func() (io.ReadCloser, error) {
- _, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream
- return body, err
- }
- }
- // keep a copy of the body argument. this is to handle cases
- // where req.Body is replaced, e.g. httputil.DumpRequest and friends.
- req.body = body
- req.req.Body = body
- req.req.ContentLength = size
- if contentType == "" {
- // Del is a no-op when the header has no value
- req.req.Header.Del(shared.HeaderContentType)
- } else if req.req.Header.Get(shared.HeaderContentType) == "" || clobberContentType {
- req.req.Header.Set(shared.HeaderContentType, contentType)
- }
- return nil
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go
deleted file mode 100644
index 08a95458..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go
+++ /dev/null
@@ -1,167 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package exported
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "net/http"
- "regexp"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/exported"
-)
-
-// NewResponseError creates a new *ResponseError from the provided HTTP response.
-// Exported as runtime.NewResponseError().
-func NewResponseError(resp *http.Response) error {
- // prefer the error code in the response header
- if ec := resp.Header.Get(shared.HeaderXMSErrorCode); ec != "" {
- return NewResponseErrorWithErrorCode(resp, ec)
- }
-
- // if we didn't get x-ms-error-code, check in the response body
- body, err := exported.Payload(resp, nil)
- if err != nil {
- // since we're not returning the ResponseError in this
- // case we also don't want to write it to the log.
- return err
- }
-
- var errorCode string
- if len(body) > 0 {
- if fromJSON := extractErrorCodeJSON(body); fromJSON != "" {
- errorCode = fromJSON
- } else if fromXML := extractErrorCodeXML(body); fromXML != "" {
- errorCode = fromXML
- }
- }
-
- return NewResponseErrorWithErrorCode(resp, errorCode)
-}
-
-// NewResponseErrorWithErrorCode creates an *azcore.ResponseError from the provided HTTP response and errorCode.
-// Exported as runtime.NewResponseErrorWithErrorCode().
-func NewResponseErrorWithErrorCode(resp *http.Response, errorCode string) error {
- respErr := &ResponseError{
- ErrorCode: errorCode,
- StatusCode: resp.StatusCode,
- RawResponse: resp,
- }
- log.Write(log.EventResponseError, respErr.Error())
- return respErr
-}
-
-func extractErrorCodeJSON(body []byte) string {
- var rawObj map[string]any
- if err := json.Unmarshal(body, &rawObj); err != nil {
- // not a JSON object
- return ""
- }
-
- // check if this is a wrapped error, i.e. { "error": { ... } }
- // if so then unwrap it
- if wrapped, ok := rawObj["error"]; ok {
- unwrapped, ok := wrapped.(map[string]any)
- if !ok {
- return ""
- }
- rawObj = unwrapped
- } else if wrapped, ok := rawObj["odata.error"]; ok {
- // check if this a wrapped odata error, i.e. { "odata.error": { ... } }
- unwrapped, ok := wrapped.(map[string]any)
- if !ok {
- return ""
- }
- rawObj = unwrapped
- }
-
- // now check for the error code
- code, ok := rawObj["code"]
- if !ok {
- return ""
- }
- codeStr, ok := code.(string)
- if !ok {
- return ""
- }
- return codeStr
-}
-
-func extractErrorCodeXML(body []byte) string {
- // regular expression is much easier than dealing with the XML parser
- rx := regexp.MustCompile(`<(?:\w+:)?[c|C]ode>\s*(\w+)\s*<\/(?:\w+:)?[c|C]ode>`)
- res := rx.FindStringSubmatch(string(body))
- if len(res) != 2 {
- return ""
- }
- // first submatch is the entire thing, second one is the captured error code
- return res[1]
-}
-
-// ResponseError is returned when a request is made to a service and
-// the service returns a non-success HTTP status code.
-// Use errors.As() to access this type in the error chain.
-// Exported as azcore.ResponseError.
-type ResponseError struct {
- // ErrorCode is the error code returned by the resource provider if available.
- ErrorCode string
-
- // StatusCode is the HTTP status code as defined in https://pkg.go.dev/net/http#pkg-constants.
- StatusCode int
-
- // RawResponse is the underlying HTTP response.
- RawResponse *http.Response
-}
-
-// Error implements the error interface for type ResponseError.
-// Note that the message contents are not contractual and can change over time.
-func (e *ResponseError) Error() string {
- const separator = "--------------------------------------------------------------------------------"
- // write the request method and URL with response status code
- msg := &bytes.Buffer{}
- if e.RawResponse != nil {
- if e.RawResponse.Request != nil {
- fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path)
- } else {
- fmt.Fprintln(msg, "Request information not available")
- }
- fmt.Fprintln(msg, separator)
- fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status)
- } else {
- fmt.Fprintln(msg, "Missing RawResponse")
- fmt.Fprintln(msg, separator)
- }
- if e.ErrorCode != "" {
- fmt.Fprintf(msg, "ERROR CODE: %s\n", e.ErrorCode)
- } else {
- fmt.Fprintln(msg, "ERROR CODE UNAVAILABLE")
- }
- if e.RawResponse != nil {
- fmt.Fprintln(msg, separator)
- body, err := exported.Payload(e.RawResponse, nil)
- if err != nil {
- // this really shouldn't fail at this point as the response
- // body is already cached (it was read in NewResponseError)
- fmt.Fprintf(msg, "Error reading response body: %v", err)
- } else if len(body) > 0 {
- if err := json.Indent(msg, body, "", " "); err != nil {
- // failed to pretty-print so just dump it verbatim
- fmt.Fprint(msg, string(body))
- }
- // the standard library doesn't have a pretty-printer for XML
- fmt.Fprintln(msg)
- } else {
- fmt.Fprintln(msg, "Response contained no body")
- }
- }
- fmt.Fprintln(msg, separator)
-
- return msg.String()
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go
deleted file mode 100644
index 6fc6d140..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go
+++ /dev/null
@@ -1,50 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-// This is an internal helper package to combine the complete logging APIs.
-package log
-
-import (
- azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
-)
-
-type Event = log.Event
-
-const (
- EventRequest = azlog.EventRequest
- EventResponse = azlog.EventResponse
- EventResponseError = azlog.EventResponseError
- EventRetryPolicy = azlog.EventRetryPolicy
- EventLRO = azlog.EventLRO
-)
-
-// Write invokes the underlying listener with the specified event and message.
-// If the event shouldn't be logged or there is no listener then Write does nothing.
-func Write(cls log.Event, msg string) {
- log.Write(cls, msg)
-}
-
-// Writef invokes the underlying listener with the specified event and formatted message.
-// If the event shouldn't be logged or there is no listener then Writef does nothing.
-func Writef(cls log.Event, format string, a ...any) {
- log.Writef(cls, format, a...)
-}
-
-// SetListener will set the Logger to write to the specified listener.
-func SetListener(lst func(Event, string)) {
- log.SetListener(lst)
-}
-
-// Should returns true if the specified log event should be written to the log.
-// By default all log events will be logged. Call SetEvents() to limit
-// the log events for logging.
-// If no listener has been set this will return false.
-// Calling this method is useful when the message to log is computationally expensive
-// and you want to avoid the overhead if its log event is not enabled.
-func Should(cls log.Event) bool {
- return log.Should(cls)
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go
deleted file mode 100644
index a5346276..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go
+++ /dev/null
@@ -1,159 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package async
-
-import (
- "context"
- "errors"
- "fmt"
- "net/http"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/poller"
-)
-
-// see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/async-api-reference.md
-
-// Applicable returns true if the LRO is using Azure-AsyncOperation.
-func Applicable(resp *http.Response) bool {
- return resp.Header.Get(shared.HeaderAzureAsync) != ""
-}
-
-// CanResume returns true if the token can rehydrate this poller type.
-func CanResume(token map[string]any) bool {
- _, ok := token["asyncURL"]
- return ok
-}
-
-// Poller is an LRO poller that uses the Azure-AsyncOperation pattern.
-type Poller[T any] struct {
- pl exported.Pipeline
-
- resp *http.Response
-
- // The URL from Azure-AsyncOperation header.
- AsyncURL string `json:"asyncURL"`
-
- // The URL from Location header.
- LocURL string `json:"locURL"`
-
- // The URL from the initial LRO request.
- OrigURL string `json:"origURL"`
-
- // The HTTP method from the initial LRO request.
- Method string `json:"method"`
-
- // The value of final-state-via from swagger, can be the empty string.
- FinalState pollers.FinalStateVia `json:"finalState"`
-
- // The LRO's current state.
- CurState string `json:"state"`
-}
-
-// New creates a new Poller from the provided initial response and final-state type.
-// Pass nil for response to create an empty Poller for rehydration.
-func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) {
- if resp == nil {
- log.Write(log.EventLRO, "Resuming Azure-AsyncOperation poller.")
- return &Poller[T]{pl: pl}, nil
- }
- log.Write(log.EventLRO, "Using Azure-AsyncOperation poller.")
- asyncURL := resp.Header.Get(shared.HeaderAzureAsync)
- if asyncURL == "" {
- return nil, errors.New("response is missing Azure-AsyncOperation header")
- }
- if !poller.IsValidURL(asyncURL) {
- return nil, fmt.Errorf("invalid polling URL %s", asyncURL)
- }
- // check for provisioning state. if the operation is a RELO
- // and terminates synchronously this will prevent extra polling.
- // it's ok if there's no provisioning state.
- state, _ := poller.GetProvisioningState(resp)
- if state == "" {
- state = poller.StatusInProgress
- }
- p := &Poller[T]{
- pl: pl,
- resp: resp,
- AsyncURL: asyncURL,
- LocURL: resp.Header.Get(shared.HeaderLocation),
- OrigURL: resp.Request.URL.String(),
- Method: resp.Request.Method,
- FinalState: finalState,
- CurState: state,
- }
- return p, nil
-}
-
-// Done returns true if the LRO is in a terminal state.
-func (p *Poller[T]) Done() bool {
- return poller.IsTerminalState(p.CurState)
-}
-
-// Poll retrieves the current state of the LRO.
-func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
- err := pollers.PollHelper(ctx, p.AsyncURL, p.pl, func(resp *http.Response) (string, error) {
- if !poller.StatusCodeValid(resp) {
- p.resp = resp
- return "", exported.NewResponseError(resp)
- }
- state, err := poller.GetStatus(resp)
- if err != nil {
- return "", err
- } else if state == "" {
- return "", errors.New("the response did not contain a status")
- }
- p.resp = resp
- p.CurState = state
- return p.CurState, nil
- })
- if err != nil {
- return nil, err
- }
- return p.resp, nil
-}
-
-func (p *Poller[T]) Result(ctx context.Context, out *T) error {
- if p.resp.StatusCode == http.StatusNoContent {
- return nil
- } else if poller.Failed(p.CurState) {
- return exported.NewResponseError(p.resp)
- }
- var req *exported.Request
- var err error
- if p.Method == http.MethodPatch || p.Method == http.MethodPut {
- // for PATCH and PUT, the final GET is on the original resource URL
- req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL)
- } else if p.Method == http.MethodPost {
- if p.FinalState == pollers.FinalStateViaAzureAsyncOp {
- // no final GET required
- } else if p.FinalState == pollers.FinalStateViaOriginalURI {
- req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL)
- } else if p.LocURL != "" {
- // ideally FinalState would be set to "location" but it isn't always.
- // must check last due to more permissive condition.
- req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL)
- }
- }
- if err != nil {
- return err
- }
-
- // if a final GET request has been created, execute it
- if req != nil {
- resp, err := p.pl.Do(req)
- if err != nil {
- return err
- }
- p.resp = resp
- }
-
- return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), "", out)
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go
deleted file mode 100644
index 8751b051..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go
+++ /dev/null
@@ -1,135 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package body
-
-import (
- "context"
- "errors"
- "net/http"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/poller"
-)
-
-// Kind is the identifier of this type in a resume token.
-const kind = "body"
-
-// Applicable returns true if the LRO is using no headers, just provisioning state.
-// This is only applicable to PATCH and PUT methods and assumes no polling headers.
-func Applicable(resp *http.Response) bool {
- // we can't check for absense of headers due to some misbehaving services
- // like redis that return a Location header but don't actually use that protocol
- return resp.Request.Method == http.MethodPatch || resp.Request.Method == http.MethodPut
-}
-
-// CanResume returns true if the token can rehydrate this poller type.
-func CanResume(token map[string]any) bool {
- t, ok := token["type"]
- if !ok {
- return false
- }
- tt, ok := t.(string)
- if !ok {
- return false
- }
- return tt == kind
-}
-
-// Poller is an LRO poller that uses the Body pattern.
-type Poller[T any] struct {
- pl exported.Pipeline
-
- resp *http.Response
-
- // The poller's type, used for resume token processing.
- Type string `json:"type"`
-
- // The URL for polling.
- PollURL string `json:"pollURL"`
-
- // The LRO's current state.
- CurState string `json:"state"`
-}
-
-// New creates a new Poller from the provided initial response.
-// Pass nil for response to create an empty Poller for rehydration.
-func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) {
- if resp == nil {
- log.Write(log.EventLRO, "Resuming Body poller.")
- return &Poller[T]{pl: pl}, nil
- }
- log.Write(log.EventLRO, "Using Body poller.")
- p := &Poller[T]{
- pl: pl,
- resp: resp,
- Type: kind,
- PollURL: resp.Request.URL.String(),
- }
- // default initial state to InProgress. depending on the HTTP
- // status code and provisioning state, we might change the value.
- curState := poller.StatusInProgress
- provState, err := poller.GetProvisioningState(resp)
- if err != nil && !errors.Is(err, poller.ErrNoBody) {
- return nil, err
- }
- if resp.StatusCode == http.StatusCreated && provState != "" {
- // absense of provisioning state is ok for a 201, means the operation is in progress
- curState = provState
- } else if resp.StatusCode == http.StatusOK {
- if provState != "" {
- curState = provState
- } else if provState == "" {
- // for a 200, absense of provisioning state indicates success
- curState = poller.StatusSucceeded
- }
- } else if resp.StatusCode == http.StatusNoContent {
- curState = poller.StatusSucceeded
- }
- p.CurState = curState
- return p, nil
-}
-
-func (p *Poller[T]) Done() bool {
- return poller.IsTerminalState(p.CurState)
-}
-
-func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
- err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) {
- if !poller.StatusCodeValid(resp) {
- p.resp = resp
- return "", exported.NewResponseError(resp)
- }
- if resp.StatusCode == http.StatusNoContent {
- p.resp = resp
- p.CurState = poller.StatusSucceeded
- return p.CurState, nil
- }
- state, err := poller.GetProvisioningState(resp)
- if errors.Is(err, poller.ErrNoBody) {
- // a missing response body in non-204 case is an error
- return "", err
- } else if state == "" {
- // a response body without provisioning state is considered terminal success
- state = poller.StatusSucceeded
- } else if err != nil {
- return "", err
- }
- p.resp = resp
- p.CurState = state
- return p.CurState, nil
- })
- if err != nil {
- return nil, err
- }
- return p.resp, nil
-}
-
-func (p *Poller[T]) Result(ctx context.Context, out *T) error {
- return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), "", out)
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go
deleted file mode 100644
index 7f8d11b8..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake/fake.go
+++ /dev/null
@@ -1,133 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package fake
-
-import (
- "context"
- "errors"
- "fmt"
- "net/http"
- "strings"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/poller"
-)
-
-// Applicable returns true if the LRO is a fake.
-func Applicable(resp *http.Response) bool {
- return resp.Header.Get(shared.HeaderFakePollerStatus) != ""
-}
-
-// CanResume returns true if the token can rehydrate this poller type.
-func CanResume(token map[string]any) bool {
- _, ok := token["fakeURL"]
- return ok
-}
-
-// Poller is an LRO poller that uses the Core-Fake-Poller pattern.
-type Poller[T any] struct {
- pl exported.Pipeline
-
- resp *http.Response
-
- // The API name from CtxAPINameKey
- APIName string `json:"apiName"`
-
- // The URL from Core-Fake-Poller header.
- FakeURL string `json:"fakeURL"`
-
- // The LRO's current state.
- FakeStatus string `json:"status"`
-}
-
-// lroStatusURLSuffix is the URL path suffix for a faked LRO.
-const lroStatusURLSuffix = "/get/fake/status"
-
-// New creates a new Poller from the provided initial response.
-// Pass nil for response to create an empty Poller for rehydration.
-func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) {
- if resp == nil {
- log.Write(log.EventLRO, "Resuming Core-Fake-Poller poller.")
- return &Poller[T]{pl: pl}, nil
- }
-
- log.Write(log.EventLRO, "Using Core-Fake-Poller poller.")
- fakeStatus := resp.Header.Get(shared.HeaderFakePollerStatus)
- if fakeStatus == "" {
- return nil, errors.New("response is missing Fake-Poller-Status header")
- }
-
- ctxVal := resp.Request.Context().Value(shared.CtxAPINameKey{})
- if ctxVal == nil {
- return nil, errors.New("missing value for CtxAPINameKey")
- }
-
- apiName, ok := ctxVal.(string)
- if !ok {
- return nil, fmt.Errorf("expected string for CtxAPINameKey, the type was %T", ctxVal)
- }
-
- qp := ""
- if resp.Request.URL.RawQuery != "" {
- qp = "?" + resp.Request.URL.RawQuery
- }
-
- p := &Poller[T]{
- pl: pl,
- resp: resp,
- APIName: apiName,
- // NOTE: any changes to this path format MUST be reflected in SanitizePollerPath()
- FakeURL: fmt.Sprintf("%s://%s%s%s%s", resp.Request.URL.Scheme, resp.Request.URL.Host, resp.Request.URL.Path, lroStatusURLSuffix, qp),
- FakeStatus: fakeStatus,
- }
- return p, nil
-}
-
-// Done returns true if the LRO is in a terminal state.
-func (p *Poller[T]) Done() bool {
- return poller.IsTerminalState(p.FakeStatus)
-}
-
-// Poll retrieves the current state of the LRO.
-func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
- ctx = context.WithValue(ctx, shared.CtxAPINameKey{}, p.APIName)
- err := pollers.PollHelper(ctx, p.FakeURL, p.pl, func(resp *http.Response) (string, error) {
- if !poller.StatusCodeValid(resp) {
- p.resp = resp
- return "", exported.NewResponseError(resp)
- }
- fakeStatus := resp.Header.Get(shared.HeaderFakePollerStatus)
- if fakeStatus == "" {
- return "", errors.New("response is missing Fake-Poller-Status header")
- }
- p.resp = resp
- p.FakeStatus = fakeStatus
- return p.FakeStatus, nil
- })
- if err != nil {
- return nil, err
- }
- return p.resp, nil
-}
-
-func (p *Poller[T]) Result(ctx context.Context, out *T) error {
- if p.resp.StatusCode == http.StatusNoContent {
- return nil
- } else if poller.Failed(p.FakeStatus) {
- return exported.NewResponseError(p.resp)
- }
-
- return pollers.ResultHelper(p.resp, poller.Failed(p.FakeStatus), "", out)
-}
-
-// SanitizePollerPath removes any fake-appended suffix from a URL's path.
-func SanitizePollerPath(path string) string {
- return strings.TrimSuffix(path, lroStatusURLSuffix)
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go
deleted file mode 100644
index 04828527..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go
+++ /dev/null
@@ -1,123 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package loc
-
-import (
- "context"
- "errors"
- "fmt"
- "net/http"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/poller"
-)
-
-// Kind is the identifier of this type in a resume token.
-const kind = "loc"
-
-// Applicable returns true if the LRO is using Location.
-func Applicable(resp *http.Response) bool {
- return resp.Header.Get(shared.HeaderLocation) != ""
-}
-
-// CanResume returns true if the token can rehydrate this poller type.
-func CanResume(token map[string]any) bool {
- t, ok := token["type"]
- if !ok {
- return false
- }
- tt, ok := t.(string)
- if !ok {
- return false
- }
- return tt == kind
-}
-
-// Poller is an LRO poller that uses the Location pattern.
-type Poller[T any] struct {
- pl exported.Pipeline
- resp *http.Response
-
- Type string `json:"type"`
- PollURL string `json:"pollURL"`
- CurState string `json:"state"`
-}
-
-// New creates a new Poller from the provided initial response.
-// Pass nil for response to create an empty Poller for rehydration.
-func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) {
- if resp == nil {
- log.Write(log.EventLRO, "Resuming Location poller.")
- return &Poller[T]{pl: pl}, nil
- }
- log.Write(log.EventLRO, "Using Location poller.")
- locURL := resp.Header.Get(shared.HeaderLocation)
- if locURL == "" {
- return nil, errors.New("response is missing Location header")
- }
- if !poller.IsValidURL(locURL) {
- return nil, fmt.Errorf("invalid polling URL %s", locURL)
- }
- // check for provisioning state. if the operation is a RELO
- // and terminates synchronously this will prevent extra polling.
- // it's ok if there's no provisioning state.
- state, _ := poller.GetProvisioningState(resp)
- if state == "" {
- state = poller.StatusInProgress
- }
- return &Poller[T]{
- pl: pl,
- resp: resp,
- Type: kind,
- PollURL: locURL,
- CurState: state,
- }, nil
-}
-
-func (p *Poller[T]) Done() bool {
- return poller.IsTerminalState(p.CurState)
-}
-
-func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
- err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) {
- // location polling can return an updated polling URL
- if h := resp.Header.Get(shared.HeaderLocation); h != "" {
- p.PollURL = h
- }
- // if provisioning state is available, use that. this is only
- // for some ARM LRO scenarios (e.g. DELETE with a Location header)
- // so if it's missing then use HTTP status code.
- provState, _ := poller.GetProvisioningState(resp)
- p.resp = resp
- if provState != "" {
- p.CurState = provState
- } else if resp.StatusCode == http.StatusAccepted {
- p.CurState = poller.StatusInProgress
- } else if resp.StatusCode > 199 && resp.StatusCode < 300 {
- // any 2xx other than a 202 indicates success
- p.CurState = poller.StatusSucceeded
- } else if pollers.IsNonTerminalHTTPStatusCode(resp) {
- // the request timed out or is being throttled.
- // DO NOT include this as a terminal failure. preserve
- // the existing state and return the response.
- } else {
- p.CurState = poller.StatusFailed
- }
- return p.CurState, nil
- })
- if err != nil {
- return nil, err
- }
- return p.resp, nil
-}
-
-func (p *Poller[T]) Result(ctx context.Context, out *T) error {
- return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), "", out)
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go
deleted file mode 100644
index 03699fd7..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go
+++ /dev/null
@@ -1,150 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package op
-
-import (
- "context"
- "errors"
- "fmt"
- "net/http"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/poller"
-)
-
-// Applicable returns true if the LRO is using Operation-Location.
-func Applicable(resp *http.Response) bool {
- return resp.Header.Get(shared.HeaderOperationLocation) != ""
-}
-
-// CanResume returns true if the token can rehydrate this poller type.
-func CanResume(token map[string]any) bool {
- _, ok := token["oplocURL"]
- return ok
-}
-
-// Poller is an LRO poller that uses the Operation-Location pattern.
-type Poller[T any] struct {
- pl exported.Pipeline
- resp *http.Response
-
- OpLocURL string `json:"oplocURL"`
- LocURL string `json:"locURL"`
- OrigURL string `json:"origURL"`
- Method string `json:"method"`
- FinalState pollers.FinalStateVia `json:"finalState"`
- CurState string `json:"state"`
-}
-
-// New creates a new Poller from the provided initial response.
-// Pass nil for response to create an empty Poller for rehydration.
-func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) {
- if resp == nil {
- log.Write(log.EventLRO, "Resuming Operation-Location poller.")
- return &Poller[T]{pl: pl}, nil
- }
- log.Write(log.EventLRO, "Using Operation-Location poller.")
- opURL := resp.Header.Get(shared.HeaderOperationLocation)
- if opURL == "" {
- return nil, errors.New("response is missing Operation-Location header")
- }
- if !poller.IsValidURL(opURL) {
- return nil, fmt.Errorf("invalid Operation-Location URL %s", opURL)
- }
- locURL := resp.Header.Get(shared.HeaderLocation)
- // Location header is optional
- if locURL != "" && !poller.IsValidURL(locURL) {
- return nil, fmt.Errorf("invalid Location URL %s", locURL)
- }
- // default initial state to InProgress. if the
- // service sent us a status then use that instead.
- curState := poller.StatusInProgress
- status, err := poller.GetStatus(resp)
- if err != nil && !errors.Is(err, poller.ErrNoBody) {
- return nil, err
- }
- if status != "" {
- curState = status
- }
-
- return &Poller[T]{
- pl: pl,
- resp: resp,
- OpLocURL: opURL,
- LocURL: locURL,
- OrigURL: resp.Request.URL.String(),
- Method: resp.Request.Method,
- FinalState: finalState,
- CurState: curState,
- }, nil
-}
-
-func (p *Poller[T]) Done() bool {
- return poller.IsTerminalState(p.CurState)
-}
-
-func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) {
- err := pollers.PollHelper(ctx, p.OpLocURL, p.pl, func(resp *http.Response) (string, error) {
- if !poller.StatusCodeValid(resp) {
- p.resp = resp
- return "", exported.NewResponseError(resp)
- }
- state, err := poller.GetStatus(resp)
- if err != nil {
- return "", err
- } else if state == "" {
- return "", errors.New("the response did not contain a status")
- }
- p.resp = resp
- p.CurState = state
- return p.CurState, nil
- })
- if err != nil {
- return nil, err
- }
- return p.resp, nil
-}
-
-func (p *Poller[T]) Result(ctx context.Context, out *T) error {
- var req *exported.Request
- var err error
-
- // when the payload is included with the status monitor on
- // terminal success it's in the "result" JSON property
- payloadPath := "result"
-
- if p.FinalState == pollers.FinalStateViaLocation && p.LocURL != "" {
- req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL)
- } else if rl, rlErr := poller.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, poller.ErrNoBody) {
- return rlErr
- } else if rl != "" {
- req, err = exported.NewRequest(ctx, http.MethodGet, rl)
- } else if p.Method == http.MethodPatch || p.Method == http.MethodPut {
- req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL)
- } else if p.Method == http.MethodPost && p.LocURL != "" {
- req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL)
- }
- if err != nil {
- return err
- }
-
- // if a final GET request has been created, execute it
- if req != nil {
- // no JSON path when making a final GET request
- payloadPath = ""
- resp, err := p.pl.Do(req)
- if err != nil {
- return err
- }
- p.resp = resp
- }
-
- return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), payloadPath, out)
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go
deleted file mode 100644
index 37ed647f..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go
+++ /dev/null
@@ -1,24 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package pollers
-
-// FinalStateVia is the enumerated type for the possible final-state-via values.
-type FinalStateVia string
-
-const (
- // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL.
- FinalStateViaAzureAsyncOp FinalStateVia = "azure-async-operation"
-
- // FinalStateViaLocation indicates the final payload comes from the Location URL.
- FinalStateViaLocation FinalStateVia = "location"
-
- // FinalStateViaOriginalURI indicates the final payload comes from the original URL.
- FinalStateViaOriginalURI FinalStateVia = "original-uri"
-
- // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL.
- FinalStateViaOpLocation FinalStateVia = "operation-location"
-)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go
deleted file mode 100644
index 6a7a32e0..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go
+++ /dev/null
@@ -1,212 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package pollers
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "reflect"
-
- azexported "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/exported"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/poller"
-)
-
-// getTokenTypeName creates a type name from the type parameter T.
-func getTokenTypeName[T any]() (string, error) {
- tt := shared.TypeOfT[T]()
- var n string
- if tt.Kind() == reflect.Pointer {
- n = "*"
- tt = tt.Elem()
- }
- n += tt.Name()
- if n == "" {
- return "", errors.New("nameless types are not allowed")
- }
- return n, nil
-}
-
-type resumeTokenWrapper[T any] struct {
- Type string `json:"type"`
- Token T `json:"token"`
-}
-
-// NewResumeToken creates a resume token from the specified type.
-// An error is returned if the generic type has no name (e.g. struct{}).
-func NewResumeToken[TResult, TSource any](from TSource) (string, error) {
- n, err := getTokenTypeName[TResult]()
- if err != nil {
- return "", err
- }
- b, err := json.Marshal(resumeTokenWrapper[TSource]{
- Type: n,
- Token: from,
- })
- if err != nil {
- return "", err
- }
- return string(b), nil
-}
-
-// ExtractToken returns the poller-specific token information from the provided token value.
-func ExtractToken(token string) ([]byte, error) {
- raw := map[string]json.RawMessage{}
- if err := json.Unmarshal([]byte(token), &raw); err != nil {
- return nil, err
- }
- // this is dependent on the type resumeTokenWrapper[T]
- tk, ok := raw["token"]
- if !ok {
- return nil, errors.New("missing token value")
- }
- return tk, nil
-}
-
-// IsTokenValid returns an error if the specified token isn't applicable for generic type T.
-func IsTokenValid[T any](token string) error {
- raw := map[string]any{}
- if err := json.Unmarshal([]byte(token), &raw); err != nil {
- return err
- }
- t, ok := raw["type"]
- if !ok {
- return errors.New("missing type value")
- }
- tt, ok := t.(string)
- if !ok {
- return fmt.Errorf("invalid type format %T", t)
- }
- n, err := getTokenTypeName[T]()
- if err != nil {
- return err
- }
- if tt != n {
- return fmt.Errorf("cannot resume from this poller token. token is for type %s, not %s", tt, n)
- }
- return nil
-}
-
-// used if the operation synchronously completed
-type NopPoller[T any] struct {
- resp *http.Response
- result T
-}
-
-// NewNopPoller creates a NopPoller from the provided response.
-// It unmarshals the response body into an instance of T.
-func NewNopPoller[T any](resp *http.Response) (*NopPoller[T], error) {
- np := &NopPoller[T]{resp: resp}
- if resp.StatusCode == http.StatusNoContent {
- return np, nil
- }
- payload, err := exported.Payload(resp, nil)
- if err != nil {
- return nil, err
- }
- if len(payload) == 0 {
- return np, nil
- }
- if err = json.Unmarshal(payload, &np.result); err != nil {
- return nil, err
- }
- return np, nil
-}
-
-func (*NopPoller[T]) Done() bool {
- return true
-}
-
-func (p *NopPoller[T]) Poll(context.Context) (*http.Response, error) {
- return p.resp, nil
-}
-
-func (p *NopPoller[T]) Result(ctx context.Context, out *T) error {
- *out = p.result
- return nil
-}
-
-// PollHelper creates and executes the request, calling update() with the response.
-// If the request fails, the update func is not called.
-// The update func returns the state of the operation for logging purposes or an error
-// if it fails to extract the required state from the response.
-func PollHelper(ctx context.Context, endpoint string, pl azexported.Pipeline, update func(resp *http.Response) (string, error)) error {
- req, err := azexported.NewRequest(ctx, http.MethodGet, endpoint)
- if err != nil {
- return err
- }
- resp, err := pl.Do(req)
- if err != nil {
- return err
- }
- state, err := update(resp)
- if err != nil {
- return err
- }
- log.Writef(log.EventLRO, "State %s", state)
- return nil
-}
-
-// ResultHelper processes the response as success or failure.
-// In the success case, it unmarshals the payload into either a new instance of T or out.
-// In the failure case, it creates an *azcore.Response error from the response.
-func ResultHelper[T any](resp *http.Response, failed bool, jsonPath string, out *T) error {
- // short-circuit the simple success case with no response body to unmarshal
- if resp.StatusCode == http.StatusNoContent {
- return nil
- }
-
- defer resp.Body.Close()
- if !poller.StatusCodeValid(resp) || failed {
- // the LRO failed. unmarshall the error and update state
- return azexported.NewResponseError(resp)
- }
-
- // success case
- payload, err := exported.Payload(resp, nil)
- if err != nil {
- return err
- }
-
- if jsonPath != "" && len(payload) > 0 {
- // extract the payload from the specified JSON path.
- // do this before the zero-length check in case there
- // is no payload.
- jsonBody := map[string]json.RawMessage{}
- if err = json.Unmarshal(payload, &jsonBody); err != nil {
- return err
- }
- payload = jsonBody[jsonPath]
- }
-
- if len(payload) == 0 {
- return nil
- }
-
- if err = json.Unmarshal(payload, out); err != nil {
- return err
- }
- return nil
-}
-
-// IsNonTerminalHTTPStatusCode returns true if the HTTP status code should be
-// considered non-terminal thus eligible for retry.
-func IsNonTerminalHTTPStatusCode(resp *http.Response) bool {
- return exported.HasStatusCode(resp,
- http.StatusRequestTimeout, // 408
- http.StatusTooManyRequests, // 429
- http.StatusInternalServerError, // 500
- http.StatusBadGateway, // 502
- http.StatusServiceUnavailable, // 503
- http.StatusGatewayTimeout, // 504
- )
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
deleted file mode 100644
index 79651fd9..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
+++ /dev/null
@@ -1,44 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package shared
-
-const (
- ContentTypeAppJSON = "application/json"
- ContentTypeAppXML = "application/xml"
- ContentTypeTextPlain = "text/plain"
-)
-
-const (
- HeaderAuthorization = "Authorization"
- HeaderAuxiliaryAuthorization = "x-ms-authorization-auxiliary"
- HeaderAzureAsync = "Azure-AsyncOperation"
- HeaderContentLength = "Content-Length"
- HeaderContentType = "Content-Type"
- HeaderFakePollerStatus = "Fake-Poller-Status"
- HeaderLocation = "Location"
- HeaderOperationLocation = "Operation-Location"
- HeaderRetryAfter = "Retry-After"
- HeaderRetryAfterMS = "Retry-After-Ms"
- HeaderUserAgent = "User-Agent"
- HeaderWWWAuthenticate = "WWW-Authenticate"
- HeaderXMSClientRequestID = "x-ms-client-request-id"
- HeaderXMSRequestID = "x-ms-request-id"
- HeaderXMSErrorCode = "x-ms-error-code"
- HeaderXMSRetryAfterMS = "x-ms-retry-after-ms"
-)
-
-const BearerTokenPrefix = "Bearer "
-
-const TracingNamespaceAttrName = "az.namespace"
-
-const (
- // Module is the name of the calling module used in telemetry data.
- Module = "azcore"
-
- // Version is the semantic version (see http://semver.org) of this module.
- Version = "v1.12.0"
-)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go
deleted file mode 100644
index d3da2c5f..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go
+++ /dev/null
@@ -1,149 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package shared
-
-import (
- "context"
- "fmt"
- "net/http"
- "reflect"
- "regexp"
- "strconv"
- "time"
-)
-
-// NOTE: when adding a new context key type, it likely needs to be
-// added to the deny-list of key types in ContextWithDeniedValues
-
-// CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header.
-type CtxWithHTTPHeaderKey struct{}
-
-// CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions.
-type CtxWithRetryOptionsKey struct{}
-
-// CtxWithCaptureResponse is used as a context key for retrieving the raw response.
-type CtxWithCaptureResponse struct{}
-
-// CtxWithTracingTracer is used as a context key for adding/retrieving tracing.Tracer.
-type CtxWithTracingTracer struct{}
-
-// CtxAPINameKey is used as a context key for adding/retrieving the API name.
-type CtxAPINameKey struct{}
-
-// Delay waits for the duration to elapse or the context to be cancelled.
-func Delay(ctx context.Context, delay time.Duration) error {
- select {
- case <-time.After(delay):
- return nil
- case <-ctx.Done():
- return ctx.Err()
- }
-}
-
-// RetryAfter returns non-zero if the response contains one of the headers with a "retry after" value.
-// Headers are checked in the following order: retry-after-ms, x-ms-retry-after-ms, retry-after
-func RetryAfter(resp *http.Response) time.Duration {
- if resp == nil {
- return 0
- }
-
- type retryData struct {
- header string
- units time.Duration
-
- // custom is used when the regular algorithm failed and is optional.
- // the returned duration is used verbatim (units is not applied).
- custom func(string) time.Duration
- }
-
- nop := func(string) time.Duration { return 0 }
-
- // the headers are listed in order of preference
- retries := []retryData{
- {
- header: HeaderRetryAfterMS,
- units: time.Millisecond,
- custom: nop,
- },
- {
- header: HeaderXMSRetryAfterMS,
- units: time.Millisecond,
- custom: nop,
- },
- {
- header: HeaderRetryAfter,
- units: time.Second,
-
- // retry-after values are expressed in either number of
- // seconds or an HTTP-date indicating when to try again
- custom: func(ra string) time.Duration {
- t, err := time.Parse(time.RFC1123, ra)
- if err != nil {
- return 0
- }
- return time.Until(t)
- },
- },
- }
-
- for _, retry := range retries {
- v := resp.Header.Get(retry.header)
- if v == "" {
- continue
- }
- if retryAfter, _ := strconv.Atoi(v); retryAfter > 0 {
- return time.Duration(retryAfter) * retry.units
- } else if d := retry.custom(v); d > 0 {
- return d
- }
- }
-
- return 0
-}
-
-// TypeOfT returns the type of the generic type param.
-func TypeOfT[T any]() reflect.Type {
- // you can't, at present, obtain the type of
- // a type parameter, so this is the trick
- return reflect.TypeOf((*T)(nil)).Elem()
-}
-
-// TransportFunc is a helper to use a first-class func to satisfy the Transporter interface.
-type TransportFunc func(*http.Request) (*http.Response, error)
-
-// Do implements the Transporter interface for the TransportFunc type.
-func (pf TransportFunc) Do(req *http.Request) (*http.Response, error) {
- return pf(req)
-}
-
-// ValidateModVer verifies that moduleVersion is a valid semver 2.0 string.
-func ValidateModVer(moduleVersion string) error {
- modVerRegx := regexp.MustCompile(`^v\d+\.\d+\.\d+(?:-[a-zA-Z0-9_.-]+)?$`)
- if !modVerRegx.MatchString(moduleVersion) {
- return fmt.Errorf("malformed moduleVersion param value %s", moduleVersion)
- }
- return nil
-}
-
-// ContextWithDeniedValues wraps an existing [context.Context], denying access to certain context values.
-// Pipeline policies that create new requests to be sent down their own pipeline MUST wrap the caller's
-// context with an instance of this type. This is to prevent context values from flowing across disjoint
-// requests which can have unintended side-effects.
-type ContextWithDeniedValues struct {
- context.Context
-}
-
-// Value implements part of the [context.Context] interface.
-// It acts as a deny-list for certain context keys.
-func (c *ContextWithDeniedValues) Value(key any) any {
- switch key.(type) {
- case CtxAPINameKey, CtxWithCaptureResponse, CtxWithHTTPHeaderKey, CtxWithRetryOptionsKey, CtxWithTracingTracer:
- return nil
- default:
- return c.Context.Value(key)
- }
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go
deleted file mode 100644
index 2f3901bf..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go
+++ /dev/null
@@ -1,10 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright 2017 Microsoft Corporation. All rights reserved.
-// Use of this source code is governed by an MIT
-// license that can be found in the LICENSE file.
-
-// Package log contains functionality for configuring logging behavior.
-// Default logging to stderr can be enabled by setting environment variable AZURE_SDK_GO_LOGGING to "all".
-package log
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go
deleted file mode 100644
index f260dac3..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go
+++ /dev/null
@@ -1,55 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-// Package log provides functionality for configuring logging facilities.
-package log
-
-import (
- "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
-)
-
-// Event is used to group entries. Each group can be toggled on or off.
-type Event = log.Event
-
-const (
- // EventRequest entries contain information about HTTP requests.
- // This includes information like the URL, query parameters, and headers.
- EventRequest Event = "Request"
-
- // EventResponse entries contain information about HTTP responses.
- // This includes information like the HTTP status code, headers, and request URL.
- EventResponse Event = "Response"
-
- // EventResponseError entries contain information about HTTP responses that returned
- // an *azcore.ResponseError (i.e. responses with a non 2xx HTTP status code).
- // This includes the contents of ResponseError.Error().
- EventResponseError Event = "ResponseError"
-
- // EventRetryPolicy entries contain information specific to the retry policy in use.
- EventRetryPolicy Event = "Retry"
-
- // EventLRO entries contain information specific to long-running operations.
- // This includes information like polling location, operation state, and sleep intervals.
- EventLRO Event = "LongRunningOperation"
-)
-
-// SetEvents is used to control which events are written to
-// the log. By default all log events are writen.
-// NOTE: this is not goroutine safe and should be called before using SDK clients.
-func SetEvents(cls ...Event) {
- log.SetEvents(cls...)
-}
-
-// SetListener will set the Logger to write to the specified Listener.
-// NOTE: this is not goroutine safe and should be called before using SDK clients.
-func SetListener(lst func(Event, string)) {
- log.SetListener(lst)
-}
-
-// for testing purposes
-func resetEvents() {
- log.TestResetEvents()
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go
deleted file mode 100644
index fad2579e..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go
+++ /dev/null
@@ -1,10 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright 2017 Microsoft Corporation. All rights reserved.
-// Use of this source code is governed by an MIT
-// license that can be found in the LICENSE file.
-
-// Package policy contains the definitions needed for configuring in-box pipeline policies
-// and creating custom policies.
-package policy
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go
deleted file mode 100644
index 8d984535..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go
+++ /dev/null
@@ -1,197 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package policy
-
-import (
- "context"
- "net/http"
- "time"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing"
-)
-
-// Policy represents an extensibility point for the Pipeline that can mutate the specified
-// Request and react to the received Response.
-type Policy = exported.Policy
-
-// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses.
-type Transporter = exported.Transporter
-
-// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline.
-// Don't use this type directly, use runtime.NewRequest() instead.
-type Request = exported.Request
-
-// ClientOptions contains optional settings for a client's pipeline.
-// Instances can be shared across calls to SDK client constructors when uniform configuration is desired.
-// Zero-value fields will have their specified default values applied during use.
-type ClientOptions struct {
- // APIVersion overrides the default version requested of the service.
- // Set with caution as this package version has not been tested with arbitrary service versions.
- APIVersion string
-
- // Cloud specifies a cloud for the client. The default is Azure Public Cloud.
- Cloud cloud.Configuration
-
- // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP.
- // By default, authenticated requests to an HTTP endpoint are rejected by the client.
- // WARNING: setting this to true will allow sending the credential in clear text. Use with caution.
- InsecureAllowCredentialWithHTTP bool
-
- // Logging configures the built-in logging policy.
- Logging LogOptions
-
- // Retry configures the built-in retry policy.
- Retry RetryOptions
-
- // Telemetry configures the built-in telemetry policy.
- Telemetry TelemetryOptions
-
- // TracingProvider configures the tracing provider.
- // It defaults to a no-op tracer.
- TracingProvider tracing.Provider
-
- // Transport sets the transport for HTTP requests.
- Transport Transporter
-
- // PerCallPolicies contains custom policies to inject into the pipeline.
- // Each policy is executed once per request.
- PerCallPolicies []Policy
-
- // PerRetryPolicies contains custom policies to inject into the pipeline.
- // Each policy is executed once per request, and for each retry of that request.
- PerRetryPolicies []Policy
-}
-
-// LogOptions configures the logging policy's behavior.
-type LogOptions struct {
- // IncludeBody indicates if request and response bodies should be included in logging.
- // The default value is false.
- // NOTE: enabling this can lead to disclosure of sensitive information, use with care.
- IncludeBody bool
-
- // AllowedHeaders is the slice of headers to log with their values intact.
- // All headers not in the slice will have their values REDACTED.
- // Applies to request and response headers.
- AllowedHeaders []string
-
- // AllowedQueryParams is the slice of query parameters to log with their values intact.
- // All query parameters not in the slice will have their values REDACTED.
- AllowedQueryParams []string
-}
-
-// RetryOptions configures the retry policy's behavior.
-// Zero-value fields will have their specified default values applied during use.
-// This allows for modification of a subset of fields.
-type RetryOptions struct {
- // MaxRetries specifies the maximum number of attempts a failed operation will be retried
- // before producing an error.
- // The default value is three. A value less than zero means one try and no retries.
- MaxRetries int32
-
- // TryTimeout indicates the maximum time allowed for any single try of an HTTP request.
- // This is disabled by default. Specify a value greater than zero to enable.
- // NOTE: Setting this to a small value might cause premature HTTP request time-outs.
- TryTimeout time.Duration
-
- // RetryDelay specifies the initial amount of delay to use before retrying an operation.
- // The value is used only if the HTTP response does not contain a Retry-After header.
- // The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay.
- // The default value is four seconds. A value less than zero means no delay between retries.
- RetryDelay time.Duration
-
- // MaxRetryDelay specifies the maximum delay allowed before retrying an operation.
- // Typically the value is greater than or equal to the value specified in RetryDelay.
- // The default Value is 60 seconds. A value less than zero means there is no cap.
- MaxRetryDelay time.Duration
-
- // StatusCodes specifies the HTTP status codes that indicate the operation should be retried.
- // A nil slice will use the following values.
- // http.StatusRequestTimeout 408
- // http.StatusTooManyRequests 429
- // http.StatusInternalServerError 500
- // http.StatusBadGateway 502
- // http.StatusServiceUnavailable 503
- // http.StatusGatewayTimeout 504
- // Specifying values will replace the default values.
- // Specifying an empty slice will disable retries for HTTP status codes.
- StatusCodes []int
-
- // ShouldRetry evaluates if the retry policy should retry the request.
- // When specified, the function overrides comparison against the list of
- // HTTP status codes and error checking within the retry policy. Context
- // and NonRetriable errors remain evaluated before calling ShouldRetry.
- // The *http.Response and error parameters are mutually exclusive, i.e.
- // if one is nil, the other is not nil.
- // A return value of true means the retry policy should retry.
- ShouldRetry func(*http.Response, error) bool
-}
-
-// TelemetryOptions configures the telemetry policy's behavior.
-type TelemetryOptions struct {
- // ApplicationID is an application-specific identification string to add to the User-Agent.
- // It has a maximum length of 24 characters and must not contain any spaces.
- ApplicationID string
-
- // Disabled will prevent the addition of any telemetry data to the User-Agent.
- Disabled bool
-}
-
-// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token.
-type TokenRequestOptions = exported.TokenRequestOptions
-
-// BearerTokenOptions configures the bearer token policy's behavior.
-type BearerTokenOptions struct {
- // AuthorizationHandler allows SDK developers to run client-specific logic when BearerTokenPolicy must authorize a request.
- // When this field isn't set, the policy follows its default behavior of authorizing every request with a bearer token from
- // its given credential.
- AuthorizationHandler AuthorizationHandler
-
- // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP.
- // By default, authenticated requests to an HTTP endpoint are rejected by the client.
- // WARNING: setting this to true will allow sending the bearer token in clear text. Use with caution.
- InsecureAllowCredentialWithHTTP bool
-}
-
-// AuthorizationHandler allows SDK developers to insert custom logic that runs when BearerTokenPolicy must authorize a request.
-type AuthorizationHandler struct {
- // OnRequest is called each time the policy receives a request. Its func parameter authorizes the request with a token
- // from the policy's given credential. Implementations that need to perform I/O should use the Request's context,
- // available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't
- // send the request. When OnRequest is nil, the policy follows its default behavior, authorizing the request with a
- // token from its credential according to its configuration.
- OnRequest func(*Request, func(TokenRequestOptions) error) error
-
- // OnChallenge is called when the policy receives a 401 response, allowing the AuthorizationHandler to re-authorize the
- // request according to an authentication challenge (the Response's WWW-Authenticate header). OnChallenge is responsible
- // for parsing parameters from the challenge. Its func parameter will authorize the request with a token from the policy's
- // given credential. Implementations that need to perform I/O should use the Request's context, available from
- // Request.Raw().Context(). When OnChallenge returns nil, the policy will send the request again. When OnChallenge is nil,
- // the policy will return any 401 response to the client.
- OnChallenge func(*Request, *http.Response, func(TokenRequestOptions) error) error
-}
-
-// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context.
-// The resp parameter will contain the HTTP response after the request has completed.
-func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context {
- return context.WithValue(parent, shared.CtxWithCaptureResponse{}, resp)
-}
-
-// WithHTTPHeader adds the specified http.Header to the parent context.
-// Use this to specify custom HTTP headers at the API-call level.
-// Any overlapping headers will have their values replaced with the values specified here.
-func WithHTTPHeader(parent context.Context, header http.Header) context.Context {
- return context.WithValue(parent, shared.CtxWithHTTPHeaderKey{}, header)
-}
-
-// WithRetryOptions adds the specified RetryOptions to the parent context.
-// Use this to specify custom RetryOptions at the API-call level.
-func WithRetryOptions(parent context.Context, options RetryOptions) context.Context {
- return context.WithValue(parent, shared.CtxWithRetryOptionsKey{}, options)
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go
deleted file mode 100644
index c9cfa438..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go
+++ /dev/null
@@ -1,10 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright 2017 Microsoft Corporation. All rights reserved.
-// Use of this source code is governed by an MIT
-// license that can be found in the LICENSE file.
-
-// Package runtime contains various facilities for creating requests and handling responses.
-// The content is intended for SDK authors.
-package runtime
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go
deleted file mode 100644
index c0d56158..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go
+++ /dev/null
@@ -1,27 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "net/http"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
-)
-
-// NewResponseError creates an *azcore.ResponseError from the provided HTTP response.
-// Call this when a service request returns a non-successful status code.
-// The error code will be extracted from the *http.Response, either from the x-ms-error-code
-// header (preferred) or attempted to be parsed from the response body.
-func NewResponseError(resp *http.Response) error {
- return exported.NewResponseError(resp)
-}
-
-// NewResponseErrorWithErrorCode creates an *azcore.ResponseError from the provided HTTP response and errorCode.
-// Use this variant when the error code is in a non-standard location.
-func NewResponseErrorWithErrorCode(resp *http.Response, errorCode string) error {
- return exported.NewResponseErrorWithErrorCode(resp, errorCode)
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go
deleted file mode 100644
index b960cff0..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go
+++ /dev/null
@@ -1,137 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "reflect"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing"
-)
-
-// PagingHandler contains the required data for constructing a Pager.
-type PagingHandler[T any] struct {
- // More returns a boolean indicating if there are more pages to fetch.
- // It uses the provided page to make the determination.
- More func(T) bool
-
- // Fetcher fetches the first and subsequent pages.
- Fetcher func(context.Context, *T) (T, error)
-
- // Tracer contains the Tracer from the client that's creating the Pager.
- Tracer tracing.Tracer
-}
-
-// Pager provides operations for iterating over paged responses.
-type Pager[T any] struct {
- current *T
- handler PagingHandler[T]
- tracer tracing.Tracer
- firstPage bool
-}
-
-// NewPager creates an instance of Pager using the specified PagingHandler.
-// Pass a non-nil T for firstPage if the first page has already been retrieved.
-func NewPager[T any](handler PagingHandler[T]) *Pager[T] {
- return &Pager[T]{
- handler: handler,
- tracer: handler.Tracer,
- firstPage: true,
- }
-}
-
-// More returns true if there are more pages to retrieve.
-func (p *Pager[T]) More() bool {
- if p.current != nil {
- return p.handler.More(*p.current)
- }
- return true
-}
-
-// NextPage advances the pager to the next page.
-func (p *Pager[T]) NextPage(ctx context.Context) (T, error) {
- if p.current != nil {
- if p.firstPage {
- // we get here if it's an LRO-pager, we already have the first page
- p.firstPage = false
- return *p.current, nil
- } else if !p.handler.More(*p.current) {
- return *new(T), errors.New("no more pages")
- }
- } else {
- // non-LRO case, first page
- p.firstPage = false
- }
-
- var err error
- ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.NextPage", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil)
- defer func() { endSpan(err) }()
-
- resp, err := p.handler.Fetcher(ctx, p.current)
- if err != nil {
- return *new(T), err
- }
- p.current = &resp
- return *p.current, nil
-}
-
-// UnmarshalJSON implements the json.Unmarshaler interface for Pager[T].
-func (p *Pager[T]) UnmarshalJSON(data []byte) error {
- return json.Unmarshal(data, &p.current)
-}
-
-// FetcherForNextLinkOptions contains the optional values for [FetcherForNextLink].
-type FetcherForNextLinkOptions struct {
- // NextReq is the func to be called when requesting subsequent pages.
- // Used for paged operations that have a custom next link operation.
- NextReq func(context.Context, string) (*policy.Request, error)
-
- // StatusCodes contains additional HTTP status codes indicating success.
- // The default value is http.StatusOK.
- StatusCodes []int
-}
-
-// FetcherForNextLink is a helper containing boilerplate code to simplify creating a PagingHandler[T].Fetcher from a next link URL.
-// - ctx is the [context.Context] controlling the lifetime of the HTTP operation
-// - pl is the [Pipeline] used to dispatch the HTTP request
-// - nextLink is the URL used to fetch the next page. the empty string indicates the first page is to be requested
-// - firstReq is the func to be called when creating the request for the first page
-// - options contains any optional parameters, pass nil to accept the default values
-func FetcherForNextLink(ctx context.Context, pl Pipeline, nextLink string, firstReq func(context.Context) (*policy.Request, error), options *FetcherForNextLinkOptions) (*http.Response, error) {
- var req *policy.Request
- var err error
- if options == nil {
- options = &FetcherForNextLinkOptions{}
- }
- if nextLink == "" {
- req, err = firstReq(ctx)
- } else if nextLink, err = EncodeQueryParams(nextLink); err == nil {
- if options.NextReq != nil {
- req, err = options.NextReq(ctx, nextLink)
- } else {
- req, err = NewRequest(ctx, http.MethodGet, nextLink)
- }
- }
- if err != nil {
- return nil, err
- }
- resp, err := pl.Do(req)
- if err != nil {
- return nil, err
- }
- successCodes := []int{http.StatusOK}
- successCodes = append(successCodes, options.StatusCodes...)
- if !HasStatusCode(resp, successCodes...) {
- return nil, NewResponseError(resp)
- }
- return resp, nil
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go
deleted file mode 100644
index 6b1f5c08..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go
+++ /dev/null
@@ -1,94 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
-)
-
-// PipelineOptions contains Pipeline options for SDK developers
-type PipelineOptions struct {
- // AllowedHeaders is the slice of headers to log with their values intact.
- // All headers not in the slice will have their values REDACTED.
- // Applies to request and response headers.
- AllowedHeaders []string
-
- // AllowedQueryParameters is the slice of query parameters to log with their values intact.
- // All query parameters not in the slice will have their values REDACTED.
- AllowedQueryParameters []string
-
- // APIVersion overrides the default version requested of the service.
- // Set with caution as this package version has not been tested with arbitrary service versions.
- APIVersion APIVersionOptions
-
- // PerCall contains custom policies to inject into the pipeline.
- // Each policy is executed once per request.
- PerCall []policy.Policy
-
- // PerRetry contains custom policies to inject into the pipeline.
- // Each policy is executed once per request, and for each retry of that request.
- PerRetry []policy.Policy
-
- // Tracing contains options used to configure distributed tracing.
- Tracing TracingOptions
-}
-
-// TracingOptions contains tracing options for SDK developers.
-type TracingOptions struct {
- // Namespace contains the value to use for the az.namespace span attribute.
- Namespace string
-}
-
-// Pipeline represents a primitive for sending HTTP requests and receiving responses.
-// Its behavior can be extended by specifying policies during construction.
-type Pipeline = exported.Pipeline
-
-// NewPipeline creates a pipeline from connection options, with any additional policies as specified.
-// Policies from ClientOptions are placed after policies from PipelineOptions.
-// The module and version parameters are used by the telemetry policy, when enabled.
-func NewPipeline(module, version string, plOpts PipelineOptions, options *policy.ClientOptions) Pipeline {
- cp := policy.ClientOptions{}
- if options != nil {
- cp = *options
- }
- if len(plOpts.AllowedHeaders) > 0 {
- headers := make([]string, len(plOpts.AllowedHeaders)+len(cp.Logging.AllowedHeaders))
- copy(headers, plOpts.AllowedHeaders)
- headers = append(headers, cp.Logging.AllowedHeaders...)
- cp.Logging.AllowedHeaders = headers
- }
- if len(plOpts.AllowedQueryParameters) > 0 {
- qp := make([]string, len(plOpts.AllowedQueryParameters)+len(cp.Logging.AllowedQueryParams))
- copy(qp, plOpts.AllowedQueryParameters)
- qp = append(qp, cp.Logging.AllowedQueryParams...)
- cp.Logging.AllowedQueryParams = qp
- }
- // we put the includeResponsePolicy at the very beginning so that the raw response
- // is populated with the final response (some policies might mutate the response)
- policies := []policy.Policy{exported.PolicyFunc(includeResponsePolicy)}
- if cp.APIVersion != "" {
- policies = append(policies, newAPIVersionPolicy(cp.APIVersion, &plOpts.APIVersion))
- }
- if !cp.Telemetry.Disabled {
- policies = append(policies, NewTelemetryPolicy(module, version, &cp.Telemetry))
- }
- policies = append(policies, plOpts.PerCall...)
- policies = append(policies, cp.PerCallPolicies...)
- policies = append(policies, NewRetryPolicy(&cp.Retry))
- policies = append(policies, plOpts.PerRetry...)
- policies = append(policies, cp.PerRetryPolicies...)
- policies = append(policies, exported.PolicyFunc(httpHeaderPolicy))
- policies = append(policies, newHTTPTracePolicy(cp.Logging.AllowedQueryParams))
- policies = append(policies, NewLogPolicy(&cp.Logging))
- policies = append(policies, exported.PolicyFunc(bodyDownloadPolicy))
- transport := cp.Transport
- if transport == nil {
- transport = defaultHTTPClient
- }
- return exported.NewPipeline(transport, policies...)
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go
deleted file mode 100644
index e5309aa6..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_api_version.go
+++ /dev/null
@@ -1,75 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "errors"
- "fmt"
- "net/http"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
-)
-
-// APIVersionOptions contains options for API versions
-type APIVersionOptions struct {
- // Location indicates where to set the version on a request, for example in a header or query param
- Location APIVersionLocation
- // Name is the name of the header or query parameter, for example "api-version"
- Name string
-}
-
-// APIVersionLocation indicates which part of a request identifies the service version
-type APIVersionLocation int
-
-const (
- // APIVersionLocationQueryParam indicates a query parameter
- APIVersionLocationQueryParam = 0
- // APIVersionLocationHeader indicates a header
- APIVersionLocationHeader = 1
-)
-
-// newAPIVersionPolicy constructs an APIVersionPolicy. If version is "", Do will be a no-op. If version
-// isn't empty and opts.Name is empty, Do will return an error.
-func newAPIVersionPolicy(version string, opts *APIVersionOptions) *apiVersionPolicy {
- if opts == nil {
- opts = &APIVersionOptions{}
- }
- return &apiVersionPolicy{location: opts.Location, name: opts.Name, version: version}
-}
-
-// apiVersionPolicy enables users to set the API version of every request a client sends.
-type apiVersionPolicy struct {
- // location indicates whether "name" refers to a query parameter or header.
- location APIVersionLocation
-
- // name of the query param or header whose value should be overridden; provided by the client.
- name string
-
- // version is the value (provided by the user) that replaces the default version value.
- version string
-}
-
-// Do sets the request's API version, if the policy is configured to do so, replacing any prior value.
-func (a *apiVersionPolicy) Do(req *policy.Request) (*http.Response, error) {
- if a.version != "" {
- if a.name == "" {
- // user set ClientOptions.APIVersion but the client ctor didn't set PipelineOptions.APIVersionOptions
- return nil, errors.New("this client doesn't support overriding its API version")
- }
- switch a.location {
- case APIVersionLocationHeader:
- req.Raw().Header.Set(a.name, a.version)
- case APIVersionLocationQueryParam:
- q := req.Raw().URL.Query()
- q.Set(a.name, a.version)
- req.Raw().URL.RawQuery = q.Encode()
- default:
- return nil, fmt.Errorf("unknown APIVersionLocation %d", a.location)
- }
- }
- return req.Next()
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
deleted file mode 100644
index cb2a6952..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "errors"
- "net/http"
- "strings"
- "time"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/temporal"
-)
-
-// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential.
-type BearerTokenPolicy struct {
- // mainResource is the resource to be retreived using the tenant specified in the credential
- mainResource *temporal.Resource[exported.AccessToken, acquiringResourceState]
- // the following fields are read-only
- authzHandler policy.AuthorizationHandler
- cred exported.TokenCredential
- scopes []string
- allowHTTP bool
-}
-
-type acquiringResourceState struct {
- req *policy.Request
- p *BearerTokenPolicy
- tro policy.TokenRequestOptions
-}
-
-// acquire acquires or updates the resource; only one
-// thread/goroutine at a time ever calls this function
-func acquire(state acquiringResourceState) (newResource exported.AccessToken, newExpiration time.Time, err error) {
- tk, err := state.p.cred.GetToken(&shared.ContextWithDeniedValues{Context: state.req.Raw().Context()}, state.tro)
- if err != nil {
- return exported.AccessToken{}, time.Time{}, err
- }
- return tk, tk.ExpiresOn, nil
-}
-
-// NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens.
-// cred: an azcore.TokenCredential implementation such as a credential object from azidentity
-// scopes: the list of permission scopes required for the token.
-// opts: optional settings. Pass nil to accept default values; this is the same as passing a zero-value options.
-func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts *policy.BearerTokenOptions) *BearerTokenPolicy {
- if opts == nil {
- opts = &policy.BearerTokenOptions{}
- }
- return &BearerTokenPolicy{
- authzHandler: opts.AuthorizationHandler,
- cred: cred,
- scopes: scopes,
- mainResource: temporal.NewResource(acquire),
- allowHTTP: opts.InsecureAllowCredentialWithHTTP,
- }
-}
-
-// authenticateAndAuthorize returns a function which authorizes req with a token from the policy's credential
-func (b *BearerTokenPolicy) authenticateAndAuthorize(req *policy.Request) func(policy.TokenRequestOptions) error {
- return func(tro policy.TokenRequestOptions) error {
- as := acquiringResourceState{p: b, req: req, tro: tro}
- tk, err := b.mainResource.Get(as)
- if err != nil {
- return err
- }
- req.Raw().Header.Set(shared.HeaderAuthorization, shared.BearerTokenPrefix+tk.Token)
- return nil
- }
-}
-
-// Do authorizes a request with a bearer token
-func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) {
- // skip adding the authorization header if no TokenCredential was provided.
- // this prevents a panic that might be hard to diagnose and allows testing
- // against http endpoints that don't require authentication.
- if b.cred == nil {
- return req.Next()
- }
-
- if err := checkHTTPSForAuth(req, b.allowHTTP); err != nil {
- return nil, err
- }
-
- var err error
- if b.authzHandler.OnRequest != nil {
- err = b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req))
- } else {
- err = b.authenticateAndAuthorize(req)(policy.TokenRequestOptions{Scopes: b.scopes})
- }
- if err != nil {
- return nil, errorinfo.NonRetriableError(err)
- }
-
- res, err := req.Next()
- if err != nil {
- return nil, err
- }
-
- if res.StatusCode == http.StatusUnauthorized {
- b.mainResource.Expire()
- if res.Header.Get("WWW-Authenticate") != "" && b.authzHandler.OnChallenge != nil {
- if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil {
- res, err = req.Next()
- }
- }
- }
- if err != nil {
- err = errorinfo.NonRetriableError(err)
- }
- return res, err
-}
-
-func checkHTTPSForAuth(req *policy.Request, allowHTTP bool) error {
- if strings.ToLower(req.Raw().URL.Scheme) != "https" && !allowHTTP {
- return errorinfo.NonRetriableError(errors.New("authenticated requests are not permitted for non TLS protected (https) endpoints"))
- }
- return nil
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go
deleted file mode 100644
index 99dc029f..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go
+++ /dev/null
@@ -1,72 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "fmt"
- "net/http"
- "strings"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo"
-)
-
-// bodyDownloadPolicy creates a policy object that downloads the response's body to a []byte.
-func bodyDownloadPolicy(req *policy.Request) (*http.Response, error) {
- resp, err := req.Next()
- if err != nil {
- return resp, err
- }
- var opValues bodyDownloadPolicyOpValues
- // don't skip downloading error response bodies
- if req.OperationValue(&opValues); opValues.Skip && resp.StatusCode < 400 {
- return resp, err
- }
- // Either bodyDownloadPolicyOpValues was not specified (so skip is false)
- // or it was specified and skip is false: don't skip downloading the body
- _, err = Payload(resp)
- if err != nil {
- return resp, newBodyDownloadError(err, req)
- }
- return resp, err
-}
-
-// bodyDownloadPolicyOpValues is the struct containing the per-operation values
-type bodyDownloadPolicyOpValues struct {
- Skip bool
-}
-
-type bodyDownloadError struct {
- err error
-}
-
-func newBodyDownloadError(err error, req *policy.Request) error {
- // on failure, only retry the request for idempotent operations.
- // we currently identify them as DELETE, GET, and PUT requests.
- if m := strings.ToUpper(req.Raw().Method); m == http.MethodDelete || m == http.MethodGet || m == http.MethodPut {
- // error is safe for retry
- return err
- }
- // wrap error to avoid retries
- return &bodyDownloadError{
- err: err,
- }
-}
-
-func (b *bodyDownloadError) Error() string {
- return fmt.Sprintf("body download policy: %s", b.err.Error())
-}
-
-func (b *bodyDownloadError) NonRetriable() {
- // marker method
-}
-
-func (b *bodyDownloadError) Unwrap() error {
- return b.err
-}
-
-var _ errorinfo.NonRetriable = (*bodyDownloadError)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go
deleted file mode 100644
index c230af0a..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go
+++ /dev/null
@@ -1,40 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "context"
- "net/http"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
-)
-
-// newHTTPHeaderPolicy creates a policy object that adds custom HTTP headers to a request
-func httpHeaderPolicy(req *policy.Request) (*http.Response, error) {
- // check if any custom HTTP headers have been specified
- if header := req.Raw().Context().Value(shared.CtxWithHTTPHeaderKey{}); header != nil {
- for k, v := range header.(http.Header) {
- // use Set to replace any existing value
- // it also canonicalizes the header key
- req.Raw().Header.Set(k, v[0])
- // add any remaining values
- for i := 1; i < len(v); i++ {
- req.Raw().Header.Add(k, v[i])
- }
- }
- }
- return req.Next()
-}
-
-// WithHTTPHeader adds the specified http.Header to the parent context.
-// Use this to specify custom HTTP headers at the API-call level.
-// Any overlapping headers will have their values replaced with the values specified here.
-// Deprecated: use [policy.WithHTTPHeader] instead.
-func WithHTTPHeader(parent context.Context, header http.Header) context.Context {
- return policy.WithHTTPHeader(parent, header)
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go
deleted file mode 100644
index 3df1c121..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go
+++ /dev/null
@@ -1,143 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "context"
- "errors"
- "fmt"
- "net/http"
- "net/url"
- "strings"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing"
-)
-
-const (
- attrHTTPMethod = "http.method"
- attrHTTPURL = "http.url"
- attrHTTPUserAgent = "http.user_agent"
- attrHTTPStatusCode = "http.status_code"
-
- attrAZClientReqID = "az.client_request_id"
- attrAZServiceReqID = "az.service_request_id"
-
- attrNetPeerName = "net.peer.name"
-)
-
-// newHTTPTracePolicy creates a new instance of the httpTracePolicy.
-// - allowedQueryParams contains the user-specified query parameters that don't need to be redacted from the trace
-func newHTTPTracePolicy(allowedQueryParams []string) exported.Policy {
- return &httpTracePolicy{allowedQP: getAllowedQueryParams(allowedQueryParams)}
-}
-
-// httpTracePolicy is a policy that creates a trace for the HTTP request and its response
-type httpTracePolicy struct {
- allowedQP map[string]struct{}
-}
-
-// Do implements the pipeline.Policy interfaces for the httpTracePolicy type.
-func (h *httpTracePolicy) Do(req *policy.Request) (resp *http.Response, err error) {
- rawTracer := req.Raw().Context().Value(shared.CtxWithTracingTracer{})
- if tracer, ok := rawTracer.(tracing.Tracer); ok && tracer.Enabled() {
- attributes := []tracing.Attribute{
- {Key: attrHTTPMethod, Value: req.Raw().Method},
- {Key: attrHTTPURL, Value: getSanitizedURL(*req.Raw().URL, h.allowedQP)},
- {Key: attrNetPeerName, Value: req.Raw().URL.Host},
- }
-
- if ua := req.Raw().Header.Get(shared.HeaderUserAgent); ua != "" {
- attributes = append(attributes, tracing.Attribute{Key: attrHTTPUserAgent, Value: ua})
- }
- if reqID := req.Raw().Header.Get(shared.HeaderXMSClientRequestID); reqID != "" {
- attributes = append(attributes, tracing.Attribute{Key: attrAZClientReqID, Value: reqID})
- }
-
- ctx := req.Raw().Context()
- ctx, span := tracer.Start(ctx, "HTTP "+req.Raw().Method, &tracing.SpanOptions{
- Kind: tracing.SpanKindClient,
- Attributes: attributes,
- })
-
- defer func() {
- if resp != nil {
- span.SetAttributes(tracing.Attribute{Key: attrHTTPStatusCode, Value: resp.StatusCode})
- if resp.StatusCode > 399 {
- span.SetStatus(tracing.SpanStatusError, resp.Status)
- }
- if reqID := resp.Header.Get(shared.HeaderXMSRequestID); reqID != "" {
- span.SetAttributes(tracing.Attribute{Key: attrAZServiceReqID, Value: reqID})
- }
- } else if err != nil {
- var urlErr *url.Error
- if errors.As(err, &urlErr) {
- // calling *url.Error.Error() will include the unsanitized URL
- // which we don't want. in addition, we already have the HTTP verb
- // and sanitized URL in the trace so we aren't losing any info
- err = urlErr.Err
- }
- span.SetStatus(tracing.SpanStatusError, err.Error())
- }
- span.End()
- }()
-
- req = req.WithContext(ctx)
- }
- resp, err = req.Next()
- return
-}
-
-// StartSpanOptions contains the optional values for StartSpan.
-type StartSpanOptions struct {
- // for future expansion
-}
-
-// StartSpan starts a new tracing span.
-// You must call the returned func to terminate the span. Pass the applicable error
-// if the span will exit with an error condition.
-// - ctx is the parent context of the newly created context
-// - name is the name of the span. this is typically the fully qualified name of an API ("Client.Method")
-// - tracer is the client's Tracer for creating spans
-// - options contains optional values. pass nil to accept any default values
-func StartSpan(ctx context.Context, name string, tracer tracing.Tracer, options *StartSpanOptions) (context.Context, func(error)) {
- if !tracer.Enabled() {
- return ctx, func(err error) {}
- }
-
- // we MUST propagate the active tracer before returning so that the trace policy can access it
- ctx = context.WithValue(ctx, shared.CtxWithTracingTracer{}, tracer)
-
- const newSpanKind = tracing.SpanKindInternal
- if activeSpan := ctx.Value(ctxActiveSpan{}); activeSpan != nil {
- // per the design guidelines, if a SDK method Foo() calls SDK method Bar(),
- // then the span for Bar() must be suppressed. however, if Bar() makes a REST
- // call, then Bar's HTTP span must be a child of Foo's span.
- // however, there is an exception to this rule. if the SDK method Foo() is a
- // messaging producer/consumer, and it takes a callback that's a SDK method
- // Bar(), then the span for Bar() must _not_ be suppressed.
- if kind := activeSpan.(tracing.SpanKind); kind == tracing.SpanKindClient || kind == tracing.SpanKindInternal {
- return ctx, func(err error) {}
- }
- }
- ctx, span := tracer.Start(ctx, name, &tracing.SpanOptions{
- Kind: newSpanKind,
- })
- ctx = context.WithValue(ctx, ctxActiveSpan{}, newSpanKind)
- return ctx, func(err error) {
- if err != nil {
- errType := strings.Replace(fmt.Sprintf("%T", err), "*exported.", "*azcore.", 1)
- span.SetStatus(tracing.SpanStatusError, fmt.Sprintf("%s:\n%s", errType, err.Error()))
- }
- span.End()
- }
-}
-
-// ctxActiveSpan is used as a context key for indicating a SDK client span is in progress.
-type ctxActiveSpan struct{}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go
deleted file mode 100644
index bb00f6c2..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go
+++ /dev/null
@@ -1,35 +0,0 @@
-//go:build go1.16
-// +build go1.16
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "context"
- "net/http"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
-)
-
-// includeResponsePolicy creates a policy that retrieves the raw HTTP response upon request
-func includeResponsePolicy(req *policy.Request) (*http.Response, error) {
- resp, err := req.Next()
- if resp == nil {
- return resp, err
- }
- if httpOutRaw := req.Raw().Context().Value(shared.CtxWithCaptureResponse{}); httpOutRaw != nil {
- httpOut := httpOutRaw.(**http.Response)
- *httpOut = resp
- }
- return resp, err
-}
-
-// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context.
-// The resp parameter will contain the HTTP response after the request has completed.
-// Deprecated: use [policy.WithCaptureResponse] instead.
-func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context {
- return policy.WithCaptureResponse(parent, resp)
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go
deleted file mode 100644
index eeb1c09c..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "net/http"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
-)
-
-// KeyCredentialPolicy authorizes requests with a [azcore.KeyCredential].
-type KeyCredentialPolicy struct {
- cred *exported.KeyCredential
- header string
- prefix string
- allowHTTP bool
-}
-
-// KeyCredentialPolicyOptions contains the optional values configuring [KeyCredentialPolicy].
-type KeyCredentialPolicyOptions struct {
- // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP.
- // By default, authenticated requests to an HTTP endpoint are rejected by the client.
- // WARNING: setting this to true will allow sending the authentication key in clear text. Use with caution.
- InsecureAllowCredentialWithHTTP bool
-
- // Prefix is used if the key requires a prefix before it's inserted into the HTTP request.
- Prefix string
-}
-
-// NewKeyCredentialPolicy creates a new instance of [KeyCredentialPolicy].
-// - cred is the [azcore.KeyCredential] used to authenticate with the service
-// - header is the name of the HTTP request header in which the key is placed
-// - options contains optional configuration, pass nil to accept the default values
-func NewKeyCredentialPolicy(cred *exported.KeyCredential, header string, options *KeyCredentialPolicyOptions) *KeyCredentialPolicy {
- if options == nil {
- options = &KeyCredentialPolicyOptions{}
- }
- return &KeyCredentialPolicy{
- cred: cred,
- header: header,
- prefix: options.Prefix,
- allowHTTP: options.InsecureAllowCredentialWithHTTP,
- }
-}
-
-// Do implementes the Do method on the [policy.Polilcy] interface.
-func (k *KeyCredentialPolicy) Do(req *policy.Request) (*http.Response, error) {
- // skip adding the authorization header if no KeyCredential was provided.
- // this prevents a panic that might be hard to diagnose and allows testing
- // against http endpoints that don't require authentication.
- if k.cred != nil {
- if err := checkHTTPSForAuth(req, k.allowHTTP); err != nil {
- return nil, err
- }
- val := exported.KeyCredentialGet(k.cred)
- if k.prefix != "" {
- val = k.prefix + val
- }
- req.Raw().Header.Add(k.header, val)
- }
- return req.Next()
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go
deleted file mode 100644
index f048d7fb..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go
+++ /dev/null
@@ -1,264 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "bytes"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "sort"
- "strings"
- "time"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/diag"
-)
-
-type logPolicy struct {
- includeBody bool
- allowedHeaders map[string]struct{}
- allowedQP map[string]struct{}
-}
-
-// NewLogPolicy creates a request/response logging policy object configured using the specified options.
-// Pass nil to accept the default values; this is the same as passing a zero-value options.
-func NewLogPolicy(o *policy.LogOptions) policy.Policy {
- if o == nil {
- o = &policy.LogOptions{}
- }
- // construct default hash set of allowed headers
- allowedHeaders := map[string]struct{}{
- "accept": {},
- "cache-control": {},
- "connection": {},
- "content-length": {},
- "content-type": {},
- "date": {},
- "etag": {},
- "expires": {},
- "if-match": {},
- "if-modified-since": {},
- "if-none-match": {},
- "if-unmodified-since": {},
- "last-modified": {},
- "ms-cv": {},
- "pragma": {},
- "request-id": {},
- "retry-after": {},
- "server": {},
- "traceparent": {},
- "transfer-encoding": {},
- "user-agent": {},
- "www-authenticate": {},
- "x-ms-request-id": {},
- "x-ms-client-request-id": {},
- "x-ms-return-client-request-id": {},
- }
- // add any caller-specified allowed headers to the set
- for _, ah := range o.AllowedHeaders {
- allowedHeaders[strings.ToLower(ah)] = struct{}{}
- }
- // now do the same thing for query params
- allowedQP := getAllowedQueryParams(o.AllowedQueryParams)
- return &logPolicy{
- includeBody: o.IncludeBody,
- allowedHeaders: allowedHeaders,
- allowedQP: allowedQP,
- }
-}
-
-// getAllowedQueryParams merges the default set of allowed query parameters
-// with a custom set (usually comes from client options).
-func getAllowedQueryParams(customAllowedQP []string) map[string]struct{} {
- allowedQP := map[string]struct{}{
- "api-version": {},
- }
- for _, qp := range customAllowedQP {
- allowedQP[strings.ToLower(qp)] = struct{}{}
- }
- return allowedQP
-}
-
-// logPolicyOpValues is the struct containing the per-operation values
-type logPolicyOpValues struct {
- try int32
- start time.Time
-}
-
-func (p *logPolicy) Do(req *policy.Request) (*http.Response, error) {
- // Get the per-operation values. These are saved in the Message's map so that they persist across each retry calling into this policy object.
- var opValues logPolicyOpValues
- if req.OperationValue(&opValues); opValues.start.IsZero() {
- opValues.start = time.Now() // If this is the 1st try, record this operation's start time
- }
- opValues.try++ // The first try is #1 (not #0)
- req.SetOperationValue(opValues)
-
- // Log the outgoing request as informational
- if log.Should(log.EventRequest) {
- b := &bytes.Buffer{}
- fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", opValues.try)
- p.writeRequestWithResponse(b, req, nil, nil)
- var err error
- if p.includeBody {
- err = writeReqBody(req, b)
- }
- log.Write(log.EventRequest, b.String())
- if err != nil {
- return nil, err
- }
- }
-
- // Set the time for this particular retry operation and then Do the operation.
- tryStart := time.Now()
- response, err := req.Next() // Make the request
- tryEnd := time.Now()
- tryDuration := tryEnd.Sub(tryStart)
- opDuration := tryEnd.Sub(opValues.start)
-
- if log.Should(log.EventResponse) {
- // We're going to log this; build the string to log
- b := &bytes.Buffer{}
- fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v, OpTime=%v) -- ", opValues.try, tryDuration, opDuration)
- if err != nil { // This HTTP request did not get a response from the service
- fmt.Fprint(b, "REQUEST ERROR\n")
- } else {
- fmt.Fprint(b, "RESPONSE RECEIVED\n")
- }
-
- p.writeRequestWithResponse(b, req, response, err)
- if err != nil {
- // skip frames runtime.Callers() and runtime.StackTrace()
- b.WriteString(diag.StackTrace(2, 32))
- } else if p.includeBody {
- err = writeRespBody(response, b)
- }
- log.Write(log.EventResponse, b.String())
- }
- return response, err
-}
-
-const redactedValue = "REDACTED"
-
-// getSanitizedURL returns a sanitized string for the provided url.URL
-func getSanitizedURL(u url.URL, allowedQueryParams map[string]struct{}) string {
- // redact applicable query params
- qp := u.Query()
- for k := range qp {
- if _, ok := allowedQueryParams[strings.ToLower(k)]; !ok {
- qp.Set(k, redactedValue)
- }
- }
- u.RawQuery = qp.Encode()
- return u.String()
-}
-
-// writeRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are
-// not nil, then these are also written into the Buffer.
-func (p *logPolicy) writeRequestWithResponse(b *bytes.Buffer, req *policy.Request, resp *http.Response, err error) {
- // Write the request into the buffer.
- fmt.Fprint(b, " "+req.Raw().Method+" "+getSanitizedURL(*req.Raw().URL, p.allowedQP)+"\n")
- p.writeHeader(b, req.Raw().Header)
- if resp != nil {
- fmt.Fprintln(b, " --------------------------------------------------------------------------------")
- fmt.Fprint(b, " RESPONSE Status: "+resp.Status+"\n")
- p.writeHeader(b, resp.Header)
- }
- if err != nil {
- fmt.Fprintln(b, " --------------------------------------------------------------------------------")
- fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n")
- }
-}
-
-// formatHeaders appends an HTTP request's or response's header into a Buffer.
-func (p *logPolicy) writeHeader(b *bytes.Buffer, header http.Header) {
- if len(header) == 0 {
- b.WriteString(" (no headers)\n")
- return
- }
- keys := make([]string, 0, len(header))
- // Alphabetize the headers
- for k := range header {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- for _, k := range keys {
- // don't use Get() as it will canonicalize k which might cause a mismatch
- value := header[k][0]
- // redact all header values not in the allow-list
- if _, ok := p.allowedHeaders[strings.ToLower(k)]; !ok {
- value = redactedValue
- }
- fmt.Fprintf(b, " %s: %+v\n", k, value)
- }
-}
-
-// returns true if the request/response body should be logged.
-// this is determined by looking at the content-type header value.
-func shouldLogBody(b *bytes.Buffer, contentType string) bool {
- contentType = strings.ToLower(contentType)
- if strings.HasPrefix(contentType, "text") ||
- strings.Contains(contentType, "json") ||
- strings.Contains(contentType, "xml") {
- return true
- }
- fmt.Fprintf(b, " Skip logging body for %s\n", contentType)
- return false
-}
-
-// writes to a buffer, used for logging purposes
-func writeReqBody(req *policy.Request, b *bytes.Buffer) error {
- if req.Raw().Body == nil {
- fmt.Fprint(b, " Request contained no body\n")
- return nil
- }
- if ct := req.Raw().Header.Get(shared.HeaderContentType); !shouldLogBody(b, ct) {
- return nil
- }
- body, err := io.ReadAll(req.Raw().Body)
- if err != nil {
- fmt.Fprintf(b, " Failed to read request body: %s\n", err.Error())
- return err
- }
- if err := req.RewindBody(); err != nil {
- return err
- }
- logBody(b, body)
- return nil
-}
-
-// writes to a buffer, used for logging purposes
-func writeRespBody(resp *http.Response, b *bytes.Buffer) error {
- ct := resp.Header.Get(shared.HeaderContentType)
- if ct == "" {
- fmt.Fprint(b, " Response contained no body\n")
- return nil
- } else if !shouldLogBody(b, ct) {
- return nil
- }
- body, err := Payload(resp)
- if err != nil {
- fmt.Fprintf(b, " Failed to read response body: %s\n", err.Error())
- return err
- }
- if len(body) > 0 {
- logBody(b, body)
- } else {
- fmt.Fprint(b, " Response contained no body\n")
- }
- return nil
-}
-
-func logBody(b *bytes.Buffer, body []byte) {
- fmt.Fprintln(b, " --------------------------------------------------------------------------------")
- fmt.Fprintln(b, string(body))
- fmt.Fprintln(b, " --------------------------------------------------------------------------------")
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go
deleted file mode 100644
index 360a7f21..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go
+++ /dev/null
@@ -1,34 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "net/http"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
-)
-
-type requestIDPolicy struct{}
-
-// NewRequestIDPolicy returns a policy that add the x-ms-client-request-id header
-func NewRequestIDPolicy() policy.Policy {
- return &requestIDPolicy{}
-}
-
-func (r *requestIDPolicy) Do(req *policy.Request) (*http.Response, error) {
- if req.Raw().Header.Get(shared.HeaderXMSClientRequestID) == "" {
- id, err := uuid.New()
- if err != nil {
- return nil, err
- }
- req.Raw().Header.Set(shared.HeaderXMSClientRequestID, id.String())
- }
-
- return req.Next()
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go
deleted file mode 100644
index 04d7bb4e..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go
+++ /dev/null
@@ -1,255 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "context"
- "errors"
- "io"
- "math"
- "math/rand"
- "net/http"
- "time"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/exported"
-)
-
-const (
- defaultMaxRetries = 3
-)
-
-func setDefaults(o *policy.RetryOptions) {
- if o.MaxRetries == 0 {
- o.MaxRetries = defaultMaxRetries
- } else if o.MaxRetries < 0 {
- o.MaxRetries = 0
- }
-
- // SDK guidelines specify the default MaxRetryDelay is 60 seconds
- if o.MaxRetryDelay == 0 {
- o.MaxRetryDelay = 60 * time.Second
- } else if o.MaxRetryDelay < 0 {
- // not really an unlimited cap, but sufficiently large enough to be considered as such
- o.MaxRetryDelay = math.MaxInt64
- }
- if o.RetryDelay == 0 {
- o.RetryDelay = 800 * time.Millisecond
- } else if o.RetryDelay < 0 {
- o.RetryDelay = 0
- }
- if o.StatusCodes == nil {
- // NOTE: if you change this list, you MUST update the docs in policy/policy.go
- o.StatusCodes = []int{
- http.StatusRequestTimeout, // 408
- http.StatusTooManyRequests, // 429
- http.StatusInternalServerError, // 500
- http.StatusBadGateway, // 502
- http.StatusServiceUnavailable, // 503
- http.StatusGatewayTimeout, // 504
- }
- }
-}
-
-func calcDelay(o policy.RetryOptions, try int32) time.Duration { // try is >=1; never 0
- delay := time.Duration((1< o.MaxRetryDelay {
- delay = o.MaxRetryDelay
- }
- return delay
-}
-
-// NewRetryPolicy creates a policy object configured using the specified options.
-// Pass nil to accept the default values; this is the same as passing a zero-value options.
-func NewRetryPolicy(o *policy.RetryOptions) policy.Policy {
- if o == nil {
- o = &policy.RetryOptions{}
- }
- p := &retryPolicy{options: *o}
- return p
-}
-
-type retryPolicy struct {
- options policy.RetryOptions
-}
-
-func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
- options := p.options
- // check if the retry options have been overridden for this call
- if override := req.Raw().Context().Value(shared.CtxWithRetryOptionsKey{}); override != nil {
- options = override.(policy.RetryOptions)
- }
- setDefaults(&options)
- // Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2)
- // When to retry: connection failure or temporary/timeout.
- var rwbody *retryableRequestBody
- if req.Body() != nil {
- // wrap the body so we control when it's actually closed.
- // do this outside the for loop so defers don't accumulate.
- rwbody = &retryableRequestBody{body: req.Body()}
- defer rwbody.realClose()
- }
- try := int32(1)
- for {
- resp = nil // reset
- log.Writef(log.EventRetryPolicy, "=====> Try=%d", try)
-
- // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because
- // the stream may not be at offset 0 when we first get it and we want the same behavior for the
- // 1st try as for additional tries.
- err = req.RewindBody()
- if err != nil {
- return
- }
- // RewindBody() restores Raw().Body to its original state, so set our rewindable after
- if rwbody != nil {
- req.Raw().Body = rwbody
- }
-
- if options.TryTimeout == 0 {
- clone := req.Clone(req.Raw().Context())
- resp, err = clone.Next()
- } else {
- // Set the per-try time for this particular retry operation and then Do the operation.
- tryCtx, tryCancel := context.WithTimeout(req.Raw().Context(), options.TryTimeout)
- clone := req.Clone(tryCtx)
- resp, err = clone.Next() // Make the request
- // if the body was already downloaded or there was an error it's safe to cancel the context now
- if err != nil {
- tryCancel()
- } else if exported.PayloadDownloaded(resp) {
- tryCancel()
- } else {
- // must cancel the context after the body has been read and closed
- resp.Body = &contextCancelReadCloser{cf: tryCancel, body: resp.Body}
- }
- }
- if err == nil {
- log.Writef(log.EventRetryPolicy, "response %d", resp.StatusCode)
- } else {
- log.Writef(log.EventRetryPolicy, "error %v", err)
- }
-
- if ctxErr := req.Raw().Context().Err(); ctxErr != nil {
- // don't retry if the parent context has been cancelled or its deadline exceeded
- err = ctxErr
- log.Writef(log.EventRetryPolicy, "abort due to %v", err)
- return
- }
-
- // check if the error is not retriable
- var nre errorinfo.NonRetriable
- if errors.As(err, &nre) {
- // the error says it's not retriable so don't retry
- log.Writef(log.EventRetryPolicy, "non-retriable error %T", nre)
- return
- }
-
- if options.ShouldRetry != nil {
- // a non-nil ShouldRetry overrides our HTTP status code check
- if !options.ShouldRetry(resp, err) {
- // predicate says we shouldn't retry
- log.Write(log.EventRetryPolicy, "exit due to ShouldRetry")
- return
- }
- } else if err == nil && !HasStatusCode(resp, options.StatusCodes...) {
- // if there is no error and the response code isn't in the list of retry codes then we're done.
- log.Write(log.EventRetryPolicy, "exit due to non-retriable status code")
- return
- }
-
- if try == options.MaxRetries+1 {
- // max number of tries has been reached, don't sleep again
- log.Writef(log.EventRetryPolicy, "MaxRetries %d exceeded", options.MaxRetries)
- return
- }
-
- // use the delay from retry-after if available
- delay := shared.RetryAfter(resp)
- if delay <= 0 {
- delay = calcDelay(options, try)
- } else if delay > options.MaxRetryDelay {
- // the retry-after delay exceeds the the cap so don't retry
- log.Writef(log.EventRetryPolicy, "Retry-After delay %s exceeds MaxRetryDelay of %s", delay, options.MaxRetryDelay)
- return
- }
-
- // drain before retrying so nothing is leaked
- Drain(resp)
-
- log.Writef(log.EventRetryPolicy, "End Try #%d, Delay=%v", try, delay)
- select {
- case <-time.After(delay):
- try++
- case <-req.Raw().Context().Done():
- err = req.Raw().Context().Err()
- log.Writef(log.EventRetryPolicy, "abort due to %v", err)
- return
- }
- }
-}
-
-// WithRetryOptions adds the specified RetryOptions to the parent context.
-// Use this to specify custom RetryOptions at the API-call level.
-// Deprecated: use [policy.WithRetryOptions] instead.
-func WithRetryOptions(parent context.Context, options policy.RetryOptions) context.Context {
- return policy.WithRetryOptions(parent, options)
-}
-
-// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser)
-
-// This struct is used when sending a body to the network
-type retryableRequestBody struct {
- body io.ReadSeeker // Seeking is required to support retries
-}
-
-// Read reads a block of data from an inner stream and reports progress
-func (b *retryableRequestBody) Read(p []byte) (n int, err error) {
- return b.body.Read(p)
-}
-
-func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) {
- return b.body.Seek(offset, whence)
-}
-
-func (b *retryableRequestBody) Close() error {
- // We don't want the underlying transport to close the request body on transient failures so this is a nop.
- // The retry policy closes the request body upon success.
- return nil
-}
-
-func (b *retryableRequestBody) realClose() error {
- if c, ok := b.body.(io.Closer); ok {
- return c.Close()
- }
- return nil
-}
-
-// ********** The following type/methods implement the contextCancelReadCloser
-
-// contextCancelReadCloser combines an io.ReadCloser with a cancel func.
-// it ensures the cancel func is invoked once the body has been read and closed.
-type contextCancelReadCloser struct {
- cf context.CancelFunc
- body io.ReadCloser
-}
-
-func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) {
- return rc.body.Read(p)
-}
-
-func (rc *contextCancelReadCloser) Close() error {
- err := rc.body.Close()
- rc.cf()
- return err
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go
deleted file mode 100644
index 3964beea..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "net/http"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
-)
-
-// SASCredentialPolicy authorizes requests with a [azcore.SASCredential].
-type SASCredentialPolicy struct {
- cred *exported.SASCredential
- header string
- allowHTTP bool
-}
-
-// SASCredentialPolicyOptions contains the optional values configuring [SASCredentialPolicy].
-type SASCredentialPolicyOptions struct {
- // InsecureAllowCredentialWithHTTP enables authenticated requests over HTTP.
- // By default, authenticated requests to an HTTP endpoint are rejected by the client.
- // WARNING: setting this to true will allow sending the authentication key in clear text. Use with caution.
- InsecureAllowCredentialWithHTTP bool
-}
-
-// NewSASCredentialPolicy creates a new instance of [SASCredentialPolicy].
-// - cred is the [azcore.SASCredential] used to authenticate with the service
-// - header is the name of the HTTP request header in which the shared access signature is placed
-// - options contains optional configuration, pass nil to accept the default values
-func NewSASCredentialPolicy(cred *exported.SASCredential, header string, options *SASCredentialPolicyOptions) *SASCredentialPolicy {
- if options == nil {
- options = &SASCredentialPolicyOptions{}
- }
- return &SASCredentialPolicy{
- cred: cred,
- header: header,
- allowHTTP: options.InsecureAllowCredentialWithHTTP,
- }
-}
-
-// Do implementes the Do method on the [policy.Polilcy] interface.
-func (k *SASCredentialPolicy) Do(req *policy.Request) (*http.Response, error) {
- // skip adding the authorization header if no SASCredential was provided.
- // this prevents a panic that might be hard to diagnose and allows testing
- // against http endpoints that don't require authentication.
- if k.cred != nil {
- if err := checkHTTPSForAuth(req, k.allowHTTP); err != nil {
- return nil, err
- }
- req.Raw().Header.Add(k.header, exported.SASCredentialGet(k.cred))
- }
- return req.Next()
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go
deleted file mode 100644
index 80a90354..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go
+++ /dev/null
@@ -1,83 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "bytes"
- "fmt"
- "net/http"
- "os"
- "runtime"
- "strings"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
-)
-
-type telemetryPolicy struct {
- telemetryValue string
-}
-
-// NewTelemetryPolicy creates a telemetry policy object that adds telemetry information to outgoing HTTP requests.
-// The format is [ ]azsdk-go-/ .
-// Pass nil to accept the default values; this is the same as passing a zero-value options.
-func NewTelemetryPolicy(mod, ver string, o *policy.TelemetryOptions) policy.Policy {
- if o == nil {
- o = &policy.TelemetryOptions{}
- }
- tp := telemetryPolicy{}
- if o.Disabled {
- return &tp
- }
- b := &bytes.Buffer{}
- // normalize ApplicationID
- if o.ApplicationID != "" {
- o.ApplicationID = strings.ReplaceAll(o.ApplicationID, " ", "/")
- if len(o.ApplicationID) > 24 {
- o.ApplicationID = o.ApplicationID[:24]
- }
- b.WriteString(o.ApplicationID)
- b.WriteRune(' ')
- }
- // mod might be the fully qualified name. in that case, we just want the package name
- if i := strings.LastIndex(mod, "/"); i > -1 {
- mod = mod[i+1:]
- }
- b.WriteString(formatTelemetry(mod, ver))
- b.WriteRune(' ')
- b.WriteString(platformInfo)
- tp.telemetryValue = b.String()
- return &tp
-}
-
-func formatTelemetry(comp, ver string) string {
- return fmt.Sprintf("azsdk-go-%s/%s", comp, ver)
-}
-
-func (p telemetryPolicy) Do(req *policy.Request) (*http.Response, error) {
- if p.telemetryValue == "" {
- return req.Next()
- }
- // preserve the existing User-Agent string
- if ua := req.Raw().Header.Get(shared.HeaderUserAgent); ua != "" {
- p.telemetryValue = fmt.Sprintf("%s %s", p.telemetryValue, ua)
- }
- req.Raw().Header.Set(shared.HeaderUserAgent, p.telemetryValue)
- return req.Next()
-}
-
-// NOTE: the ONLY function that should write to this variable is this func
-var platformInfo = func() string {
- operatingSystem := runtime.GOOS // Default OS string
- switch operatingSystem {
- case "windows":
- operatingSystem = os.Getenv("OS") // Get more specific OS information
- case "linux": // accept default OS info
- case "freebsd": // accept default OS info
- }
- return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem)
-}()
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go
deleted file mode 100644
index 03f76c9a..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go
+++ /dev/null
@@ -1,389 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "context"
- "encoding/json"
- "errors"
- "flag"
- "fmt"
- "net/http"
- "reflect"
- "strings"
- "time"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/fake"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/poller"
-)
-
-// FinalStateVia is the enumerated type for the possible final-state-via values.
-type FinalStateVia = pollers.FinalStateVia
-
-const (
- // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL.
- FinalStateViaAzureAsyncOp = pollers.FinalStateViaAzureAsyncOp
-
- // FinalStateViaLocation indicates the final payload comes from the Location URL.
- FinalStateViaLocation = pollers.FinalStateViaLocation
-
- // FinalStateViaOriginalURI indicates the final payload comes from the original URL.
- FinalStateViaOriginalURI = pollers.FinalStateViaOriginalURI
-
- // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL.
- FinalStateViaOpLocation = pollers.FinalStateViaOpLocation
-)
-
-// NewPollerOptions contains the optional parameters for NewPoller.
-type NewPollerOptions[T any] struct {
- // FinalStateVia contains the final-state-via value for the LRO.
- FinalStateVia FinalStateVia
-
- // Response contains a preconstructed response type.
- // The final payload will be unmarshaled into it and returned.
- Response *T
-
- // Handler[T] contains a custom polling implementation.
- Handler PollingHandler[T]
-
- // Tracer contains the Tracer from the client that's creating the Poller.
- Tracer tracing.Tracer
-}
-
-// NewPoller creates a Poller based on the provided initial response.
-func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPollerOptions[T]) (*Poller[T], error) {
- if options == nil {
- options = &NewPollerOptions[T]{}
- }
- result := options.Response
- if result == nil {
- result = new(T)
- }
- if options.Handler != nil {
- return &Poller[T]{
- op: options.Handler,
- resp: resp,
- result: result,
- tracer: options.Tracer,
- }, nil
- }
-
- defer resp.Body.Close()
- // this is a back-stop in case the swagger is incorrect (i.e. missing one or more status codes for success).
- // ideally the codegen should return an error if the initial response failed and not even create a poller.
- if !poller.StatusCodeValid(resp) {
- return nil, errors.New("the operation failed or was cancelled")
- }
-
- // determine the polling method
- var opr PollingHandler[T]
- var err error
- if fake.Applicable(resp) {
- opr, err = fake.New[T](pl, resp)
- } else if async.Applicable(resp) {
- // async poller must be checked first as it can also have a location header
- opr, err = async.New[T](pl, resp, options.FinalStateVia)
- } else if op.Applicable(resp) {
- // op poller must be checked before loc as it can also have a location header
- opr, err = op.New[T](pl, resp, options.FinalStateVia)
- } else if loc.Applicable(resp) {
- opr, err = loc.New[T](pl, resp)
- } else if body.Applicable(resp) {
- // must test body poller last as it's a subset of the other pollers.
- // TODO: this is ambiguous for PATCH/PUT if it returns a 200 with no polling headers (sync completion)
- opr, err = body.New[T](pl, resp)
- } else if m := resp.Request.Method; resp.StatusCode == http.StatusAccepted && (m == http.MethodDelete || m == http.MethodPost) {
- // if we get here it means we have a 202 with no polling headers.
- // for DELETE and POST this is a hard error per ARM RPC spec.
- return nil, errors.New("response is missing polling URL")
- } else {
- opr, err = pollers.NewNopPoller[T](resp)
- }
-
- if err != nil {
- return nil, err
- }
- return &Poller[T]{
- op: opr,
- resp: resp,
- result: result,
- tracer: options.Tracer,
- }, nil
-}
-
-// NewPollerFromResumeTokenOptions contains the optional parameters for NewPollerFromResumeToken.
-type NewPollerFromResumeTokenOptions[T any] struct {
- // Response contains a preconstructed response type.
- // The final payload will be unmarshaled into it and returned.
- Response *T
-
- // Handler[T] contains a custom polling implementation.
- Handler PollingHandler[T]
-
- // Tracer contains the Tracer from the client that's creating the Poller.
- Tracer tracing.Tracer
-}
-
-// NewPollerFromResumeToken creates a Poller from a resume token string.
-func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options *NewPollerFromResumeTokenOptions[T]) (*Poller[T], error) {
- if options == nil {
- options = &NewPollerFromResumeTokenOptions[T]{}
- }
- result := options.Response
- if result == nil {
- result = new(T)
- }
-
- if err := pollers.IsTokenValid[T](token); err != nil {
- return nil, err
- }
- raw, err := pollers.ExtractToken(token)
- if err != nil {
- return nil, err
- }
- var asJSON map[string]any
- if err := json.Unmarshal(raw, &asJSON); err != nil {
- return nil, err
- }
-
- opr := options.Handler
- // now rehydrate the poller based on the encoded poller type
- if fake.CanResume(asJSON) {
- opr, _ = fake.New[T](pl, nil)
- } else if opr != nil {
- log.Writef(log.EventLRO, "Resuming custom poller %T.", opr)
- } else if async.CanResume(asJSON) {
- opr, _ = async.New[T](pl, nil, "")
- } else if body.CanResume(asJSON) {
- opr, _ = body.New[T](pl, nil)
- } else if loc.CanResume(asJSON) {
- opr, _ = loc.New[T](pl, nil)
- } else if op.CanResume(asJSON) {
- opr, _ = op.New[T](pl, nil, "")
- } else {
- return nil, fmt.Errorf("unhandled poller token %s", string(raw))
- }
- if err := json.Unmarshal(raw, &opr); err != nil {
- return nil, err
- }
- return &Poller[T]{
- op: opr,
- result: result,
- tracer: options.Tracer,
- }, nil
-}
-
-// PollingHandler[T] abstracts the differences among poller implementations.
-type PollingHandler[T any] interface {
- // Done returns true if the LRO has reached a terminal state.
- Done() bool
-
- // Poll fetches the latest state of the LRO.
- Poll(context.Context) (*http.Response, error)
-
- // Result is called once the LRO has reached a terminal state. It populates the out parameter
- // with the result of the operation.
- Result(ctx context.Context, out *T) error
-}
-
-// Poller encapsulates a long-running operation, providing polling facilities until the operation reaches a terminal state.
-type Poller[T any] struct {
- op PollingHandler[T]
- resp *http.Response
- err error
- result *T
- tracer tracing.Tracer
- done bool
-}
-
-// PollUntilDoneOptions contains the optional values for the Poller[T].PollUntilDone() method.
-type PollUntilDoneOptions struct {
- // Frequency is the time to wait between polling intervals in absence of a Retry-After header. Allowed minimum is one second.
- // Pass zero to accept the default value (30s).
- Frequency time.Duration
-}
-
-// PollUntilDone will poll the service endpoint until a terminal state is reached, an error is received, or the context expires.
-// It internally uses Poll(), Done(), and Result() in its polling loop, sleeping for the specified duration between intervals.
-// options: pass nil to accept the default values.
-// NOTE: the default polling frequency is 30 seconds which works well for most operations. However, some operations might
-// benefit from a shorter or longer duration.
-func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOptions) (res T, err error) {
- if options == nil {
- options = &PollUntilDoneOptions{}
- }
- cp := *options
- if cp.Frequency == 0 {
- cp.Frequency = 30 * time.Second
- }
-
- ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.PollUntilDone", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil)
- defer func() { endSpan(err) }()
-
- // skip the floor check when executing tests so they don't take so long
- if isTest := flag.Lookup("test.v"); isTest == nil && cp.Frequency < time.Second {
- err = errors.New("polling frequency minimum is one second")
- return
- }
-
- start := time.Now()
- logPollUntilDoneExit := func(v any) {
- log.Writef(log.EventLRO, "END PollUntilDone() for %T: %v, total time: %s", p.op, v, time.Since(start))
- }
- log.Writef(log.EventLRO, "BEGIN PollUntilDone() for %T", p.op)
- if p.resp != nil {
- // initial check for a retry-after header existing on the initial response
- if retryAfter := shared.RetryAfter(p.resp); retryAfter > 0 {
- log.Writef(log.EventLRO, "initial Retry-After delay for %s", retryAfter.String())
- if err = shared.Delay(ctx, retryAfter); err != nil {
- logPollUntilDoneExit(err)
- return
- }
- }
- }
- // begin polling the endpoint until a terminal state is reached
- for {
- var resp *http.Response
- resp, err = p.Poll(ctx)
- if err != nil {
- logPollUntilDoneExit(err)
- return
- }
- if p.Done() {
- logPollUntilDoneExit("succeeded")
- res, err = p.Result(ctx)
- return
- }
- d := cp.Frequency
- if retryAfter := shared.RetryAfter(resp); retryAfter > 0 {
- log.Writef(log.EventLRO, "Retry-After delay for %s", retryAfter.String())
- d = retryAfter
- } else {
- log.Writef(log.EventLRO, "delay for %s", d.String())
- }
- if err = shared.Delay(ctx, d); err != nil {
- logPollUntilDoneExit(err)
- return
- }
- }
-}
-
-// Poll fetches the latest state of the LRO. It returns an HTTP response or error.
-// If Poll succeeds, the poller's state is updated and the HTTP response is returned.
-// If Poll fails, the poller's state is unmodified and the error is returned.
-// Calling Poll on an LRO that has reached a terminal state will return the last HTTP response.
-func (p *Poller[T]) Poll(ctx context.Context) (resp *http.Response, err error) {
- if p.Done() {
- // the LRO has reached a terminal state, don't poll again
- resp = p.resp
- return
- }
-
- ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.Poll", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil)
- defer func() { endSpan(err) }()
-
- resp, err = p.op.Poll(ctx)
- if err != nil {
- return
- }
- p.resp = resp
- return
-}
-
-// Done returns true if the LRO has reached a terminal state.
-// Once a terminal state is reached, call Result().
-func (p *Poller[T]) Done() bool {
- return p.op.Done()
-}
-
-// Result returns the result of the LRO and is meant to be used in conjunction with Poll and Done.
-// If the LRO completed successfully, a populated instance of T is returned.
-// If the LRO failed or was canceled, an *azcore.ResponseError error is returned.
-// Calling this on an LRO in a non-terminal state will return an error.
-func (p *Poller[T]) Result(ctx context.Context) (res T, err error) {
- if !p.Done() {
- err = errors.New("poller is in a non-terminal state")
- return
- }
- if p.done {
- // the result has already been retrieved, return the cached value
- if p.err != nil {
- err = p.err
- return
- }
- res = *p.result
- return
- }
-
- ctx, endSpan := StartSpan(ctx, fmt.Sprintf("%s.Result", shortenTypeName(reflect.TypeOf(*p).Name())), p.tracer, nil)
- defer func() { endSpan(err) }()
-
- err = p.op.Result(ctx, p.result)
- var respErr *exported.ResponseError
- if errors.As(err, &respErr) {
- if pollers.IsNonTerminalHTTPStatusCode(respErr.RawResponse) {
- // the request failed in a non-terminal way.
- // don't cache the error or mark the Poller as done
- return
- }
- // the LRO failed. record the error
- p.err = err
- } else if err != nil {
- // the call to Result failed, don't cache anything in this case
- return
- }
- p.done = true
- if p.err != nil {
- err = p.err
- return
- }
- res = *p.result
- return
-}
-
-// ResumeToken returns a value representing the poller that can be used to resume
-// the LRO at a later time. ResumeTokens are unique per service operation.
-// The token's format should be considered opaque and is subject to change.
-// Calling this on an LRO in a terminal state will return an error.
-func (p *Poller[T]) ResumeToken() (string, error) {
- if p.Done() {
- return "", errors.New("poller is in a terminal state")
- }
- tk, err := pollers.NewResumeToken[T](p.op)
- if err != nil {
- return "", err
- }
- return tk, err
-}
-
-// extracts the type name from the string returned from reflect.Value.Name()
-func shortenTypeName(s string) string {
- // the value is formatted as follows
- // Poller[module/Package.Type].Method
- // we want to shorten the generic type parameter string to Type
- // anything we don't recognize will be left as-is
- begin := strings.Index(s, "[")
- end := strings.Index(s, "]")
- if begin == -1 || end == -1 {
- return s
- }
-
- typeName := s[begin+1 : end]
- if i := strings.LastIndex(typeName, "."); i > -1 {
- typeName = typeName[i+1:]
- }
- return s[:begin+1] + typeName + s[end:]
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go
deleted file mode 100644
index 40ddc8d9..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go
+++ /dev/null
@@ -1,275 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "encoding/xml"
- "errors"
- "fmt"
- "io"
- "mime/multipart"
- "net/textproto"
- "net/url"
- "path"
- "strings"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
-)
-
-// Base64Encoding is usesd to specify which base-64 encoder/decoder to use when
-// encoding/decoding a slice of bytes to/from a string.
-type Base64Encoding = exported.Base64Encoding
-
-const (
- // Base64StdFormat uses base64.StdEncoding for encoding and decoding payloads.
- Base64StdFormat Base64Encoding = exported.Base64StdFormat
-
- // Base64URLFormat uses base64.RawURLEncoding for encoding and decoding payloads.
- Base64URLFormat Base64Encoding = exported.Base64URLFormat
-)
-
-// NewRequest creates a new policy.Request with the specified input.
-// The endpoint MUST be properly encoded before calling this function.
-func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*policy.Request, error) {
- return exported.NewRequest(ctx, httpMethod, endpoint)
-}
-
-// EncodeQueryParams will parse and encode any query parameters in the specified URL.
-// Any semicolons will automatically be escaped.
-func EncodeQueryParams(u string) (string, error) {
- before, after, found := strings.Cut(u, "?")
- if !found {
- return u, nil
- }
- // starting in Go 1.17, url.ParseQuery will reject semicolons in query params.
- // so, we must escape them first. note that this assumes that semicolons aren't
- // being used as query param separators which is per the current RFC.
- // for more info:
- // https://github.com/golang/go/issues/25192
- // https://github.com/golang/go/issues/50034
- qp, err := url.ParseQuery(strings.ReplaceAll(after, ";", "%3B"))
- if err != nil {
- return "", err
- }
- return before + "?" + qp.Encode(), nil
-}
-
-// JoinPaths concatenates multiple URL path segments into one path,
-// inserting path separation characters as required. JoinPaths will preserve
-// query parameters in the root path
-func JoinPaths(root string, paths ...string) string {
- if len(paths) == 0 {
- return root
- }
-
- qps := ""
- if strings.Contains(root, "?") {
- splitPath := strings.Split(root, "?")
- root, qps = splitPath[0], splitPath[1]
- }
-
- p := path.Join(paths...)
- // path.Join will remove any trailing slashes.
- // if one was provided, preserve it.
- if strings.HasSuffix(paths[len(paths)-1], "/") && !strings.HasSuffix(p, "/") {
- p += "/"
- }
-
- if qps != "" {
- p = p + "?" + qps
- }
-
- if strings.HasSuffix(root, "/") && strings.HasPrefix(p, "/") {
- root = root[:len(root)-1]
- } else if !strings.HasSuffix(root, "/") && !strings.HasPrefix(p, "/") {
- p = "/" + p
- }
- return root + p
-}
-
-// EncodeByteArray will base-64 encode the byte slice v.
-func EncodeByteArray(v []byte, format Base64Encoding) string {
- return exported.EncodeByteArray(v, format)
-}
-
-// MarshalAsByteArray will base-64 encode the byte slice v, then calls SetBody.
-// The encoded value is treated as a JSON string.
-func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) error {
- // send as a JSON string
- encode := fmt.Sprintf("\"%s\"", EncodeByteArray(v, format))
- // tsp generated code can set Content-Type so we must prefer that
- return exported.SetBody(req, exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON, false)
-}
-
-// MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody.
-func MarshalAsJSON(req *policy.Request, v any) error {
- b, err := json.Marshal(v)
- if err != nil {
- return fmt.Errorf("error marshalling type %T: %s", v, err)
- }
- // tsp generated code can set Content-Type so we must prefer that
- return exported.SetBody(req, exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON, false)
-}
-
-// MarshalAsXML calls xml.Marshal() to get the XML encoding of v then calls SetBody.
-func MarshalAsXML(req *policy.Request, v any) error {
- b, err := xml.Marshal(v)
- if err != nil {
- return fmt.Errorf("error marshalling type %T: %s", v, err)
- }
- // inclue the XML header as some services require it
- b = []byte(xml.Header + string(b))
- return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppXML)
-}
-
-// SetMultipartFormData writes the specified keys/values as multi-part form fields with the specified value.
-// File content must be specified as an [io.ReadSeekCloser] or [streaming.MultipartContent].
-// Byte slices will be treated as JSON. All other values are treated as string values.
-func SetMultipartFormData(req *policy.Request, formData map[string]any) error {
- body := bytes.Buffer{}
- writer := multipart.NewWriter(&body)
-
- writeContent := func(fieldname, filename string, src io.Reader) error {
- fd, err := writer.CreateFormFile(fieldname, filename)
- if err != nil {
- return err
- }
- // copy the data to the form file
- if _, err = io.Copy(fd, src); err != nil {
- return err
- }
- return nil
- }
-
- quoteEscaper := strings.NewReplacer("\\", "\\\\", `"`, "\\\"")
-
- writeMultipartContent := func(fieldname string, mpc streaming.MultipartContent) error {
- if mpc.Body == nil {
- return errors.New("streaming.MultipartContent.Body cannot be nil")
- }
-
- // use fieldname for the file name when unspecified
- filename := fieldname
-
- if mpc.ContentType == "" && mpc.Filename == "" {
- return writeContent(fieldname, filename, mpc.Body)
- }
- if mpc.Filename != "" {
- filename = mpc.Filename
- }
- // this is pretty much copied from multipart.Writer.CreateFormFile
- // but lets us set the caller provided Content-Type and filename
- h := make(textproto.MIMEHeader)
- h.Set("Content-Disposition",
- fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
- quoteEscaper.Replace(fieldname), quoteEscaper.Replace(filename)))
- contentType := "application/octet-stream"
- if mpc.ContentType != "" {
- contentType = mpc.ContentType
- }
- h.Set("Content-Type", contentType)
- fd, err := writer.CreatePart(h)
- if err != nil {
- return err
- }
- // copy the data to the form file
- if _, err = io.Copy(fd, mpc.Body); err != nil {
- return err
- }
- return nil
- }
-
- // the same as multipart.Writer.WriteField but lets us specify the Content-Type
- writeField := func(fieldname, contentType string, value string) error {
- h := make(textproto.MIMEHeader)
- h.Set("Content-Disposition",
- fmt.Sprintf(`form-data; name="%s"`, quoteEscaper.Replace(fieldname)))
- h.Set("Content-Type", contentType)
- fd, err := writer.CreatePart(h)
- if err != nil {
- return err
- }
- if _, err = fd.Write([]byte(value)); err != nil {
- return err
- }
- return nil
- }
-
- for k, v := range formData {
- if rsc, ok := v.(io.ReadSeekCloser); ok {
- if err := writeContent(k, k, rsc); err != nil {
- return err
- }
- continue
- } else if rscs, ok := v.([]io.ReadSeekCloser); ok {
- for _, rsc := range rscs {
- if err := writeContent(k, k, rsc); err != nil {
- return err
- }
- }
- continue
- } else if mpc, ok := v.(streaming.MultipartContent); ok {
- if err := writeMultipartContent(k, mpc); err != nil {
- return err
- }
- continue
- } else if mpcs, ok := v.([]streaming.MultipartContent); ok {
- for _, mpc := range mpcs {
- if err := writeMultipartContent(k, mpc); err != nil {
- return err
- }
- }
- continue
- }
-
- var content string
- contentType := shared.ContentTypeTextPlain
- switch tt := v.(type) {
- case []byte:
- // JSON, don't quote it
- content = string(tt)
- contentType = shared.ContentTypeAppJSON
- case string:
- content = tt
- default:
- // ensure the value is in string format
- content = fmt.Sprintf("%v", v)
- }
-
- if err := writeField(k, contentType, content); err != nil {
- return err
- }
- }
- if err := writer.Close(); err != nil {
- return err
- }
- return req.SetBody(exported.NopCloser(bytes.NewReader(body.Bytes())), writer.FormDataContentType())
-}
-
-// SkipBodyDownload will disable automatic downloading of the response body.
-func SkipBodyDownload(req *policy.Request) {
- req.SetOperationValue(bodyDownloadPolicyOpValues{Skip: true})
-}
-
-// CtxAPINameKey is used as a context key for adding/retrieving the API name.
-type CtxAPINameKey = shared.CtxAPINameKey
-
-// NewUUID returns a new UUID using the RFC4122 algorithm.
-func NewUUID() (string, error) {
- u, err := uuid.New()
- if err != nil {
- return "", err
- }
- return u.String(), nil
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go
deleted file mode 100644
index 048566e0..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go
+++ /dev/null
@@ -1,109 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "bytes"
- "encoding/json"
- "encoding/xml"
- "fmt"
- "io"
- "net/http"
-
- azexported "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/exported"
-)
-
-// Payload reads and returns the response body or an error.
-// On a successful read, the response body is cached.
-// Subsequent reads will access the cached value.
-func Payload(resp *http.Response) ([]byte, error) {
- return exported.Payload(resp, nil)
-}
-
-// HasStatusCode returns true if the Response's status code is one of the specified values.
-func HasStatusCode(resp *http.Response, statusCodes ...int) bool {
- return exported.HasStatusCode(resp, statusCodes...)
-}
-
-// UnmarshalAsByteArray will base-64 decode the received payload and place the result into the value pointed to by v.
-func UnmarshalAsByteArray(resp *http.Response, v *[]byte, format Base64Encoding) error {
- p, err := Payload(resp)
- if err != nil {
- return err
- }
- return DecodeByteArray(string(p), v, format)
-}
-
-// UnmarshalAsJSON calls json.Unmarshal() to unmarshal the received payload into the value pointed to by v.
-func UnmarshalAsJSON(resp *http.Response, v any) error {
- payload, err := Payload(resp)
- if err != nil {
- return err
- }
- // TODO: verify early exit is correct
- if len(payload) == 0 {
- return nil
- }
- err = removeBOM(resp)
- if err != nil {
- return err
- }
- err = json.Unmarshal(payload, v)
- if err != nil {
- err = fmt.Errorf("unmarshalling type %T: %s", v, err)
- }
- return err
-}
-
-// UnmarshalAsXML calls xml.Unmarshal() to unmarshal the received payload into the value pointed to by v.
-func UnmarshalAsXML(resp *http.Response, v any) error {
- payload, err := Payload(resp)
- if err != nil {
- return err
- }
- // TODO: verify early exit is correct
- if len(payload) == 0 {
- return nil
- }
- err = removeBOM(resp)
- if err != nil {
- return err
- }
- err = xml.Unmarshal(payload, v)
- if err != nil {
- err = fmt.Errorf("unmarshalling type %T: %s", v, err)
- }
- return err
-}
-
-// Drain reads the response body to completion then closes it. The bytes read are discarded.
-func Drain(resp *http.Response) {
- if resp != nil && resp.Body != nil {
- _, _ = io.Copy(io.Discard, resp.Body)
- resp.Body.Close()
- }
-}
-
-// removeBOM removes any byte-order mark prefix from the payload if present.
-func removeBOM(resp *http.Response) error {
- _, err := exported.Payload(resp, &exported.PayloadOptions{
- BytesModifier: func(b []byte) []byte {
- // UTF8
- return bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
- },
- })
- if err != nil {
- return err
- }
- return nil
-}
-
-// DecodeByteArray will base-64 decode the provided string into v.
-func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error {
- return azexported.DecodeByteArray(s, v, format)
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go
deleted file mode 100644
index 1c75d771..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go
+++ /dev/null
@@ -1,15 +0,0 @@
-//go:build !wasm
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "context"
- "net"
-)
-
-func defaultTransportDialContext(dialer *net.Dialer) func(context.Context, string, string) (net.Conn, error) {
- return dialer.DialContext
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go
deleted file mode 100644
index 3dc9eeec..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go
+++ /dev/null
@@ -1,15 +0,0 @@
-//go:build (js && wasm) || wasip1
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "context"
- "net"
-)
-
-func defaultTransportDialContext(dialer *net.Dialer) func(context.Context, string, string) (net.Conn, error) {
- return nil
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go
deleted file mode 100644
index 2124c1d4..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go
+++ /dev/null
@@ -1,48 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package runtime
-
-import (
- "crypto/tls"
- "net"
- "net/http"
- "time"
-
- "golang.org/x/net/http2"
-)
-
-var defaultHTTPClient *http.Client
-
-func init() {
- defaultTransport := &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- DialContext: defaultTransportDialContext(&net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 30 * time.Second,
- }),
- ForceAttemptHTTP2: true,
- MaxIdleConns: 100,
- MaxIdleConnsPerHost: 10,
- IdleConnTimeout: 90 * time.Second,
- TLSHandshakeTimeout: 10 * time.Second,
- ExpectContinueTimeout: 1 * time.Second,
- TLSClientConfig: &tls.Config{
- MinVersion: tls.VersionTLS12,
- Renegotiation: tls.RenegotiateFreelyAsClient,
- },
- }
- // TODO: evaluate removing this once https://github.com/golang/go/issues/59690 has been fixed
- if http2Transport, err := http2.ConfigureTransports(defaultTransport); err == nil {
- // if the connection has been idle for 10 seconds, send a ping frame for a health check
- http2Transport.ReadIdleTimeout = 10 * time.Second
- // if there's no response to the ping within the timeout, the connection will be closed
- http2Transport.PingTimeout = 5 * time.Second
- }
- defaultHTTPClient = &http.Client{
- Transport: defaultTransport,
- }
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go
deleted file mode 100644
index cadaef3d..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go
+++ /dev/null
@@ -1,9 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright 2017 Microsoft Corporation. All rights reserved.
-// Use of this source code is governed by an MIT
-// license that can be found in the LICENSE file.
-
-// Package streaming contains helpers for streaming IO operations and progress reporting.
-package streaming
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go
deleted file mode 100644
index 2468540b..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go
+++ /dev/null
@@ -1,89 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package streaming
-
-import (
- "io"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
-)
-
-type progress struct {
- rc io.ReadCloser
- rsc io.ReadSeekCloser
- pr func(bytesTransferred int64)
- offset int64
-}
-
-// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker.
-// In addition to adding a Close method to an io.ReadSeeker, this can also be used to wrap an
-// io.ReadSeekCloser with a no-op Close method to allow explicit control of when the io.ReedSeekCloser
-// has its underlying stream closed.
-func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser {
- return exported.NopCloser(rs)
-}
-
-// NewRequestProgress adds progress reporting to an HTTP request's body stream.
-func NewRequestProgress(body io.ReadSeekCloser, pr func(bytesTransferred int64)) io.ReadSeekCloser {
- return &progress{
- rc: body,
- rsc: body,
- pr: pr,
- offset: 0,
- }
-}
-
-// NewResponseProgress adds progress reporting to an HTTP response's body stream.
-func NewResponseProgress(body io.ReadCloser, pr func(bytesTransferred int64)) io.ReadCloser {
- return &progress{
- rc: body,
- rsc: nil,
- pr: pr,
- offset: 0,
- }
-}
-
-// Read reads a block of data from an inner stream and reports progress
-func (p *progress) Read(b []byte) (n int, err error) {
- n, err = p.rc.Read(b)
- if err != nil && err != io.EOF {
- return
- }
- p.offset += int64(n)
- // Invokes the user's callback method to report progress
- p.pr(p.offset)
- return
-}
-
-// Seek only expects a zero or from beginning.
-func (p *progress) Seek(offset int64, whence int) (int64, error) {
- // This should only ever be called with offset = 0 and whence = io.SeekStart
- n, err := p.rsc.Seek(offset, whence)
- if err == nil {
- p.offset = int64(n)
- }
- return n, err
-}
-
-// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it.
-func (p *progress) Close() error {
- return p.rc.Close()
-}
-
-// MultipartContent contains streaming content used in multipart/form payloads.
-type MultipartContent struct {
- // Body contains the required content body.
- Body io.ReadSeekCloser
-
- // ContentType optionally specifies the HTTP Content-Type for this Body.
- // The default value is application/octet-stream.
- ContentType string
-
- // Filename optionally specifies the filename for this Body.
- // The default value is the field name for the multipart/form section.
- Filename string
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go
deleted file mode 100644
index 80282d4a..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/constants.go
+++ /dev/null
@@ -1,41 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package tracing
-
-// SpanKind represents the role of a Span inside a Trace. Often, this defines how a Span will be processed and visualized by various backends.
-type SpanKind int
-
-const (
- // SpanKindInternal indicates the span represents an internal operation within an application.
- SpanKindInternal SpanKind = 1
-
- // SpanKindServer indicates the span covers server-side handling of a request.
- SpanKindServer SpanKind = 2
-
- // SpanKindClient indicates the span describes a request to a remote service.
- SpanKindClient SpanKind = 3
-
- // SpanKindProducer indicates the span was created by a messaging producer.
- SpanKindProducer SpanKind = 4
-
- // SpanKindConsumer indicates the span was created by a messaging consumer.
- SpanKindConsumer SpanKind = 5
-)
-
-// SpanStatus represents the status of a span.
-type SpanStatus int
-
-const (
- // SpanStatusUnset is the default status code.
- SpanStatusUnset SpanStatus = 0
-
- // SpanStatusError indicates the operation contains an error.
- SpanStatusError SpanStatus = 1
-
- // SpanStatusOK indicates the operation completed successfully.
- SpanStatusOK SpanStatus = 2
-)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go
deleted file mode 100644
index 1ade7c56..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing/tracing.go
+++ /dev/null
@@ -1,191 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-// Package tracing contains the definitions needed to support distributed tracing.
-package tracing
-
-import (
- "context"
-)
-
-// ProviderOptions contains the optional values when creating a Provider.
-type ProviderOptions struct {
- // for future expansion
-}
-
-// NewProvider creates a new Provider with the specified values.
-// - newTracerFn is the underlying implementation for creating Tracer instances
-// - options contains optional values; pass nil to accept the default value
-func NewProvider(newTracerFn func(name, version string) Tracer, options *ProviderOptions) Provider {
- return Provider{
- newTracerFn: newTracerFn,
- }
-}
-
-// Provider is the factory that creates Tracer instances.
-// It defaults to a no-op provider.
-type Provider struct {
- newTracerFn func(name, version string) Tracer
-}
-
-// NewTracer creates a new Tracer for the specified module name and version.
-// - module - the fully qualified name of the module
-// - version - the version of the module
-func (p Provider) NewTracer(module, version string) (tracer Tracer) {
- if p.newTracerFn != nil {
- tracer = p.newTracerFn(module, version)
- }
- return
-}
-
-/////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-// TracerOptions contains the optional values when creating a Tracer.
-type TracerOptions struct {
- // SpanFromContext contains the implementation for the Tracer.SpanFromContext method.
- SpanFromContext func(context.Context) Span
-}
-
-// NewTracer creates a Tracer with the specified values.
-// - newSpanFn is the underlying implementation for creating Span instances
-// - options contains optional values; pass nil to accept the default value
-func NewTracer(newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span), options *TracerOptions) Tracer {
- if options == nil {
- options = &TracerOptions{}
- }
- return Tracer{
- newSpanFn: newSpanFn,
- spanFromContextFn: options.SpanFromContext,
- }
-}
-
-// Tracer is the factory that creates Span instances.
-type Tracer struct {
- attrs []Attribute
- newSpanFn func(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span)
- spanFromContextFn func(ctx context.Context) Span
-}
-
-// Start creates a new span and a context.Context that contains it.
-// - ctx is the parent context for this span. If it contains a Span, the newly created span will be a child of that span, else it will be a root span
-// - spanName identifies the span within a trace, it's typically the fully qualified API name
-// - options contains optional values for the span, pass nil to accept any defaults
-func (t Tracer) Start(ctx context.Context, spanName string, options *SpanOptions) (context.Context, Span) {
- if t.newSpanFn != nil {
- opts := SpanOptions{}
- if options != nil {
- opts = *options
- }
- opts.Attributes = append(opts.Attributes, t.attrs...)
- return t.newSpanFn(ctx, spanName, &opts)
- }
- return ctx, Span{}
-}
-
-// SetAttributes sets attrs to be applied to each Span. If a key from attrs
-// already exists for an attribute of the Span it will be overwritten with
-// the value contained in attrs.
-func (t *Tracer) SetAttributes(attrs ...Attribute) {
- t.attrs = append(t.attrs, attrs...)
-}
-
-// Enabled returns true if this Tracer is capable of creating Spans.
-func (t Tracer) Enabled() bool {
- return t.newSpanFn != nil
-}
-
-// SpanFromContext returns the Span associated with the current context.
-// If the provided context has no Span, false is returned.
-func (t Tracer) SpanFromContext(ctx context.Context) Span {
- if t.spanFromContextFn != nil {
- return t.spanFromContextFn(ctx)
- }
- return Span{}
-}
-
-// SpanOptions contains optional settings for creating a span.
-type SpanOptions struct {
- // Kind indicates the kind of Span.
- Kind SpanKind
-
- // Attributes contains key-value pairs of attributes for the span.
- Attributes []Attribute
-}
-
-/////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-// SpanImpl abstracts the underlying implementation for Span,
-// allowing it to work with various tracing implementations.
-// Any zero-values will have their default, no-op behavior.
-type SpanImpl struct {
- // End contains the implementation for the Span.End method.
- End func()
-
- // SetAttributes contains the implementation for the Span.SetAttributes method.
- SetAttributes func(...Attribute)
-
- // AddEvent contains the implementation for the Span.AddEvent method.
- AddEvent func(string, ...Attribute)
-
- // SetStatus contains the implementation for the Span.SetStatus method.
- SetStatus func(SpanStatus, string)
-}
-
-// NewSpan creates a Span with the specified implementation.
-func NewSpan(impl SpanImpl) Span {
- return Span{
- impl: impl,
- }
-}
-
-// Span is a single unit of a trace. A trace can contain multiple spans.
-// A zero-value Span provides a no-op implementation.
-type Span struct {
- impl SpanImpl
-}
-
-// End terminates the span and MUST be called before the span leaves scope.
-// Any further updates to the span will be ignored after End is called.
-func (s Span) End() {
- if s.impl.End != nil {
- s.impl.End()
- }
-}
-
-// SetAttributes sets the specified attributes on the Span.
-// Any existing attributes with the same keys will have their values overwritten.
-func (s Span) SetAttributes(attrs ...Attribute) {
- if s.impl.SetAttributes != nil {
- s.impl.SetAttributes(attrs...)
- }
-}
-
-// AddEvent adds a named event with an optional set of attributes to the span.
-func (s Span) AddEvent(name string, attrs ...Attribute) {
- if s.impl.AddEvent != nil {
- s.impl.AddEvent(name, attrs...)
- }
-}
-
-// SetStatus sets the status on the span along with a description.
-func (s Span) SetStatus(code SpanStatus, desc string) {
- if s.impl.SetStatus != nil {
- s.impl.SetStatus(code, desc)
- }
-}
-
-/////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
-// Attribute is a key-value pair.
-type Attribute struct {
- // Key is the name of the attribute.
- Key string
-
- // Value is the attribute's value.
- // Types that are natively supported include int64, float64, int, bool, string.
- // Any other type will be formatted per rules of fmt.Sprintf("%v").
- Value any
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/.gitignore b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/.gitignore
deleted file mode 100644
index 8cdb9103..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-# live test artifacts
-Dockerfile
-k8s.yaml
-sshkey*
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
deleted file mode 100644
index 6d4b6feb..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
+++ /dev/null
@@ -1,551 +0,0 @@
-# Release History
-
-## 1.6.0 (2024-06-10)
-
-### Features Added
-* `NewOnBehalfOfCredentialWithClientAssertions` creates an on-behalf-of credential
- that authenticates with client assertions such as federated credentials
-
-### Breaking Changes
-> These changes affect only code written against a beta version such as v1.6.0-beta.4
-* Removed `AzurePipelinesCredential` and the persistent token caching API.
- They will return in v1.7.0-beta.1
-
-### Bugs Fixed
-* Managed identity bug fixes
-
-## 1.6.0-beta.4 (2024-05-14)
-
-### Features Added
-* `AzurePipelinesCredential` authenticates an Azure Pipeline service connection with
- workload identity federation
-
-## 1.6.0-beta.3 (2024-04-09)
-
-### Breaking Changes
-* `DefaultAzureCredential` now sends a probe request with no retries for IMDS managed identity
- environments to avoid excessive retry delays when the IMDS endpoint is not available. This
- should improve credential chain resolution for local development scenarios.
-
-### Bugs Fixed
-* `ManagedIdentityCredential` now specifies resource IDs correctly for Azure Container Instances
-
-## 1.5.2 (2024-04-09)
-
-### Bugs Fixed
-* `ManagedIdentityCredential` now specifies resource IDs correctly for Azure Container Instances
-
-### Other Changes
-* Restored v1.4.0 error behavior for empty tenant IDs
-* Upgraded dependencies
-
-## 1.6.0-beta.2 (2024-02-06)
-
-### Breaking Changes
-> These changes affect only code written against a beta version such as v1.6.0-beta.1
-* Replaced `ErrAuthenticationRequired` with `AuthenticationRequiredError`, a struct
- type that carries the `TokenRequestOptions` passed to the `GetToken` call which
- returned the error.
-
-### Bugs Fixed
-* Fixed more cases in which credential chains like `DefaultAzureCredential`
- should try their next credential after attempting managed identity
- authentication in a Docker Desktop container
-
-### Other Changes
-* `AzureCLICredential` uses the CLI's `expires_on` value for token expiration
-
-## 1.6.0-beta.1 (2024-01-17)
-
-### Features Added
-* Restored persistent token caching API first added in v1.5.0-beta.1
-* Added `AzureCLICredentialOptions.Subscription`
-
-## 1.5.1 (2024-01-17)
-
-### Bugs Fixed
-* `InteractiveBrowserCredential` handles `AdditionallyAllowedTenants` correctly
-
-## 1.5.0 (2024-01-16)
-
-### Breaking Changes
-> These changes affect only code written against a beta version such as v1.5.0-beta.1
-* Removed persistent token caching. It will return in v1.6.0-beta.1
-
-### Bugs Fixed
-* Credentials now preserve MSAL headers e.g. X-Client-Sku
-
-### Other Changes
-* Upgraded dependencies
-
-## 1.5.0-beta.2 (2023-11-07)
-
-### Features Added
-* `DefaultAzureCredential` and `ManagedIdentityCredential` support Azure ML managed identity
-* Added spans for distributed tracing.
-
-## 1.5.0-beta.1 (2023-10-10)
-
-### Features Added
-* Optional persistent token caching for most credentials. Set `TokenCachePersistenceOptions`
- on a credential's options to enable and configure this. See the package documentation for
- this version and [TOKEN_CACHING.md](https://aka.ms/azsdk/go/identity/caching) for more
- details.
-* `AzureDeveloperCLICredential` authenticates with the Azure Developer CLI (`azd`). This
- credential is also part of the `DefaultAzureCredential` authentication flow.
-
-## 1.4.0 (2023-10-10)
-
-### Bugs Fixed
-* `ManagedIdentityCredential` will now retry when IMDS responds 410 or 503
-
-## 1.4.0-beta.5 (2023-09-12)
-
-### Features Added
-* Service principal credentials can request CAE tokens
-
-### Breaking Changes
-> These changes affect only code written against a beta version such as v1.4.0-beta.4
-* Whether `GetToken` requests a CAE token is now determined by `TokenRequestOptions.EnableCAE`. Azure
- SDK clients which support CAE will set this option automatically. Credentials no longer request CAE
- tokens by default or observe the environment variable "AZURE_IDENTITY_DISABLE_CP1".
-
-### Bugs Fixed
-* Credential chains such as `DefaultAzureCredential` now try their next credential, if any, when
- managed identity authentication fails in a Docker Desktop container
- ([#21417](https://github.com/Azure/azure-sdk-for-go/issues/21417))
-
-## 1.4.0-beta.4 (2023-08-16)
-
-### Other Changes
-* Upgraded dependencies
-
-## 1.3.1 (2023-08-16)
-
-### Other Changes
-* Upgraded dependencies
-
-## 1.4.0-beta.3 (2023-08-08)
-
-### Bugs Fixed
-* One invocation of `AzureCLICredential.GetToken()` and `OnBehalfOfCredential.GetToken()`
- can no longer make two authentication attempts
-
-## 1.4.0-beta.2 (2023-07-14)
-
-### Other Changes
-* `DefaultAzureCredentialOptions.TenantID` applies to workload identity authentication
-* Upgraded dependencies
-
-## 1.4.0-beta.1 (2023-06-06)
-
-### Other Changes
-* Re-enabled CAE support as in v1.3.0-beta.3
-
-## 1.3.0 (2023-05-09)
-
-### Breaking Changes
-> These changes affect only code written against a beta version such as v1.3.0-beta.5
-* Renamed `NewOnBehalfOfCredentialFromCertificate` to `NewOnBehalfOfCredentialWithCertificate`
-* Renamed `NewOnBehalfOfCredentialFromSecret` to `NewOnBehalfOfCredentialWithSecret`
-
-### Other Changes
-* Upgraded to MSAL v1.0.0
-
-## 1.3.0-beta.5 (2023-04-11)
-
-### Breaking Changes
-> These changes affect only code written against a beta version such as v1.3.0-beta.4
-* Moved `NewWorkloadIdentityCredential()` parameters into `WorkloadIdentityCredentialOptions`.
- The constructor now reads default configuration from environment variables set by the Azure
- workload identity webhook by default.
- ([#20478](https://github.com/Azure/azure-sdk-for-go/pull/20478))
-* Removed CAE support. It will return in v1.4.0-beta.1
- ([#20479](https://github.com/Azure/azure-sdk-for-go/pull/20479))
-
-### Bugs Fixed
-* Fixed an issue in `DefaultAzureCredential` that could cause the managed identity endpoint check to fail in rare circumstances.
-
-## 1.3.0-beta.4 (2023-03-08)
-
-### Features Added
-* Added `WorkloadIdentityCredentialOptions.AdditionallyAllowedTenants` and `.DisableInstanceDiscovery`
-
-### Bugs Fixed
-* Credentials now synchronize within `GetToken()` so a single instance can be shared among goroutines
- ([#20044](https://github.com/Azure/azure-sdk-for-go/issues/20044))
-
-### Other Changes
-* Upgraded dependencies
-
-## 1.2.2 (2023-03-07)
-
-### Other Changes
-* Upgraded dependencies
-
-## 1.3.0-beta.3 (2023-02-07)
-
-### Features Added
-* By default, credentials set client capability "CP1" to enable support for
- [Continuous Access Evaluation (CAE)](https://learn.microsoft.com/entra/identity-platform/app-resilience-continuous-access-evaluation).
- This indicates to Microsoft Entra ID that your application can handle CAE claims challenges.
- You can disable this behavior by setting the environment variable "AZURE_IDENTITY_DISABLE_CP1" to "true".
-* `InteractiveBrowserCredentialOptions.LoginHint` enables pre-populating the login
- prompt with a username ([#15599](https://github.com/Azure/azure-sdk-for-go/pull/15599))
-* Service principal and user credentials support ADFS authentication on Azure Stack.
- Specify "adfs" as the credential's tenant.
-* Applications running in private or disconnected clouds can prevent credentials from
- requesting Microsoft Entra instance metadata by setting the `DisableInstanceDiscovery`
- field on credential options.
-* Many credentials can now be configured to authenticate in multiple tenants. The
- options types for these credentials have an `AdditionallyAllowedTenants` field
- that specifies additional tenants in which the credential may authenticate.
-
-## 1.3.0-beta.2 (2023-01-10)
-
-### Features Added
-* Added `OnBehalfOfCredential` to support the on-behalf-of flow
- ([#16642](https://github.com/Azure/azure-sdk-for-go/issues/16642))
-
-### Bugs Fixed
-* `AzureCLICredential` reports token expiration in local time (should be UTC)
-
-### Other Changes
-* `AzureCLICredential` imposes its default timeout only when the `Context`
- passed to `GetToken()` has no deadline
-* Added `NewCredentialUnavailableError()`. This function constructs an error indicating
- a credential can't authenticate and an encompassing `ChainedTokenCredential` should
- try its next credential, if any.
-
-## 1.3.0-beta.1 (2022-12-13)
-
-### Features Added
-* `WorkloadIdentityCredential` and `DefaultAzureCredential` support
- Workload Identity Federation on Kubernetes. `DefaultAzureCredential`
- support requires environment variable configuration as set by the
- Workload Identity webhook.
- ([#15615](https://github.com/Azure/azure-sdk-for-go/issues/15615))
-
-## 1.2.0 (2022-11-08)
-
-### Other Changes
-* This version includes all fixes and features from 1.2.0-beta.*
-
-## 1.2.0-beta.3 (2022-10-11)
-
-### Features Added
-* `ManagedIdentityCredential` caches tokens in memory
-
-### Bugs Fixed
-* `ClientCertificateCredential` sends only the leaf cert for SNI authentication
-
-## 1.2.0-beta.2 (2022-08-10)
-
-### Features Added
-* Added `ClientAssertionCredential` to enable applications to authenticate
- with custom client assertions
-
-### Other Changes
-* Updated AuthenticationFailedError with links to TROUBLESHOOTING.md for relevant errors
-* Upgraded `microsoft-authentication-library-for-go` requirement to v0.6.0
-
-## 1.2.0-beta.1 (2022-06-07)
-
-### Features Added
-* `EnvironmentCredential` reads certificate passwords from `AZURE_CLIENT_CERTIFICATE_PASSWORD`
- ([#17099](https://github.com/Azure/azure-sdk-for-go/pull/17099))
-
-## 1.1.0 (2022-06-07)
-
-### Features Added
-* `ClientCertificateCredential` and `ClientSecretCredential` support ESTS-R. First-party
- applications can set environment variable `AZURE_REGIONAL_AUTHORITY_NAME` with a
- region name.
- ([#15605](https://github.com/Azure/azure-sdk-for-go/issues/15605))
-
-## 1.0.1 (2022-06-07)
-
-### Other Changes
-* Upgrade `microsoft-authentication-library-for-go` requirement to v0.5.1
- ([#18176](https://github.com/Azure/azure-sdk-for-go/issues/18176))
-
-## 1.0.0 (2022-05-12)
-
-### Features Added
-* `DefaultAzureCredential` reads environment variable `AZURE_CLIENT_ID` for the
- client ID of a user-assigned managed identity
- ([#17293](https://github.com/Azure/azure-sdk-for-go/pull/17293))
-
-### Breaking Changes
-* Removed `AuthorizationCodeCredential`. Use `InteractiveBrowserCredential` instead
- to authenticate a user with the authorization code flow.
-* Instances of `AuthenticationFailedError` are now returned by pointer.
-* `GetToken()` returns `azcore.AccessToken` by value
-
-### Bugs Fixed
-* `AzureCLICredential` panics after receiving an unexpected error type
- ([#17490](https://github.com/Azure/azure-sdk-for-go/issues/17490))
-
-### Other Changes
-* `GetToken()` returns an error when the caller specifies no scope
-* Updated to the latest versions of `golang.org/x/crypto`, `azcore` and `internal`
-
-## 0.14.0 (2022-04-05)
-
-### Breaking Changes
-* This module now requires Go 1.18
-* Removed `AuthorityHost`. Credentials are now configured for sovereign or private
- clouds with the API in `azcore/cloud`, for example:
- ```go
- // before
- opts := azidentity.ClientSecretCredentialOptions{AuthorityHost: azidentity.AzureGovernment}
- cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, &opts)
-
- // after
- import "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
-
- opts := azidentity.ClientSecretCredentialOptions{}
- opts.Cloud = cloud.AzureGovernment
- cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, &opts)
- ```
-
-## 0.13.2 (2022-03-08)
-
-### Bugs Fixed
-* Prevented a data race in `DefaultAzureCredential` and `ChainedTokenCredential`
- ([#17144](https://github.com/Azure/azure-sdk-for-go/issues/17144))
-
-### Other Changes
-* Upgraded App Service managed identity version from 2017-09-01 to 2019-08-01
- ([#17086](https://github.com/Azure/azure-sdk-for-go/pull/17086))
-
-## 0.13.1 (2022-02-08)
-
-### Features Added
-* `EnvironmentCredential` supports certificate SNI authentication when
- `AZURE_CLIENT_SEND_CERTIFICATE_CHAIN` is "true".
- ([#16851](https://github.com/Azure/azure-sdk-for-go/pull/16851))
-
-### Bugs Fixed
-* `ManagedIdentityCredential.GetToken()` now returns an error when configured for
- a user assigned identity in Azure Cloud Shell (which doesn't support such identities)
- ([#16946](https://github.com/Azure/azure-sdk-for-go/pull/16946))
-
-### Other Changes
-* `NewDefaultAzureCredential()` logs non-fatal errors. These errors are also included in the
- error returned by `DefaultAzureCredential.GetToken()` when it's unable to acquire a token
- from any source. ([#15923](https://github.com/Azure/azure-sdk-for-go/issues/15923))
-
-## 0.13.0 (2022-01-11)
-
-### Breaking Changes
-* Replaced `AuthenticationFailedError.RawResponse()` with a field having the same name
-* Unexported `CredentialUnavailableError`
-* Instances of `ChainedTokenCredential` will now skip looping through the list of source credentials and re-use the first successful credential on subsequent calls to `GetToken`.
- * If `ChainedTokenCredentialOptions.RetrySources` is true, `ChainedTokenCredential` will continue to try all of the originally provided credentials each time the `GetToken` method is called.
- * `ChainedTokenCredential.successfulCredential` will contain a reference to the last successful credential.
- * `DefaultAzureCredenial` will also re-use the first successful credential on subsequent calls to `GetToken`.
- * `DefaultAzureCredential.chain.successfulCredential` will also contain a reference to the last successful credential.
-
-### Other Changes
-* `ManagedIdentityCredential` no longer probes IMDS before requesting a token
- from it. Also, an error response from IMDS no longer disables a credential
- instance. Following an error, a credential instance will continue to send
- requests to IMDS as necessary.
-* Adopted MSAL for user and service principal authentication
-* Updated `azcore` requirement to 0.21.0
-
-## 0.12.0 (2021-11-02)
-### Breaking Changes
-* Raised minimum go version to 1.16
-* Removed `NewAuthenticationPolicy()` from credentials. Clients should instead use azcore's
- `runtime.NewBearerTokenPolicy()` to construct a bearer token authorization policy.
-* The `AuthorityHost` field in credential options structs is now a custom type,
- `AuthorityHost`, with underlying type `string`
-* `NewChainedTokenCredential` has a new signature to accommodate a placeholder
- options struct:
- ```go
- // before
- cred, err := NewChainedTokenCredential(credA, credB)
-
- // after
- cred, err := NewChainedTokenCredential([]azcore.TokenCredential{credA, credB}, nil)
- ```
-* Removed `ExcludeAzureCLICredential`, `ExcludeEnvironmentCredential`, and `ExcludeMSICredential`
- from `DefaultAzureCredentialOptions`
-* `NewClientCertificateCredential` requires a `[]*x509.Certificate` and `crypto.PrivateKey` instead of
- a path to a certificate file. Added `ParseCertificates` to simplify getting these in common cases:
- ```go
- // before
- cred, err := NewClientCertificateCredential("tenant", "client-id", "/cert.pem", nil)
-
- // after
- certData, err := os.ReadFile("/cert.pem")
- certs, key, err := ParseCertificates(certData, password)
- cred, err := NewClientCertificateCredential(tenantID, clientID, certs, key, nil)
- ```
-* Removed `InteractiveBrowserCredentialOptions.ClientSecret` and `.Port`
-* Removed `AADAuthenticationFailedError`
-* Removed `id` parameter of `NewManagedIdentityCredential()`. User assigned identities are now
- specified by `ManagedIdentityCredentialOptions.ID`:
- ```go
- // before
- cred, err := NewManagedIdentityCredential("client-id", nil)
- // or, for a resource ID
- opts := &ManagedIdentityCredentialOptions{ID: ResourceID}
- cred, err := NewManagedIdentityCredential("/subscriptions/...", opts)
-
- // after
- clientID := ClientID("7cf7db0d-...")
- opts := &ManagedIdentityCredentialOptions{ID: clientID}
- // or, for a resource ID
- resID: ResourceID("/subscriptions/...")
- opts := &ManagedIdentityCredentialOptions{ID: resID}
- cred, err := NewManagedIdentityCredential(opts)
- ```
-* `DeviceCodeCredentialOptions.UserPrompt` has a new type: `func(context.Context, DeviceCodeMessage) error`
-* Credential options structs now embed `azcore.ClientOptions`. In addition to changing literal initialization
- syntax, this change renames `HTTPClient` fields to `Transport`.
-* Renamed `LogCredential` to `EventCredential`
-* `AzureCLICredential` no longer reads the environment variable `AZURE_CLI_PATH`
-* `NewManagedIdentityCredential` no longer reads environment variables `AZURE_CLIENT_ID` and
- `AZURE_RESOURCE_ID`. Use `ManagedIdentityCredentialOptions.ID` instead.
-* Unexported `AuthenticationFailedError` and `CredentialUnavailableError` structs. In their place are two
- interfaces having the same names.
-
-### Bugs Fixed
-* `AzureCLICredential.GetToken` no longer mutates its `opts.Scopes`
-
-### Features Added
-* Added connection configuration options to `DefaultAzureCredentialOptions`
-* `AuthenticationFailedError.RawResponse()` returns the HTTP response motivating the error,
- if available
-
-### Other Changes
-* `NewDefaultAzureCredential()` returns `*DefaultAzureCredential` instead of `*ChainedTokenCredential`
-* Added `TenantID` field to `DefaultAzureCredentialOptions` and `AzureCLICredentialOptions`
-
-## 0.11.0 (2021-09-08)
-### Breaking Changes
-* Unexported `AzureCLICredentialOptions.TokenProvider` and its type,
- `AzureCLITokenProvider`
-
-### Bug Fixes
-* `ManagedIdentityCredential.GetToken` returns `CredentialUnavailableError`
- when IMDS has no assigned identity, signaling `DefaultAzureCredential` to
- try other credentials
-
-
-## 0.10.0 (2021-08-30)
-### Breaking Changes
-* Update based on `azcore` refactor [#15383](https://github.com/Azure/azure-sdk-for-go/pull/15383)
-
-## 0.9.3 (2021-08-20)
-
-### Bugs Fixed
-* `ManagedIdentityCredential.GetToken` no longer mutates its `opts.Scopes`
-
-### Other Changes
-* Bumps version of `azcore` to `v0.18.1`
-
-
-## 0.9.2 (2021-07-23)
-### Features Added
-* Adding support for Service Fabric environment in `ManagedIdentityCredential`
-* Adding an option for using a resource ID instead of client ID in `ManagedIdentityCredential`
-
-
-## 0.9.1 (2021-05-24)
-### Features Added
-* Add LICENSE.txt and bump version information
-
-
-## 0.9.0 (2021-05-21)
-### Features Added
-* Add support for authenticating in Azure Stack environments
-* Enable user assigned identities for the IMDS scenario in `ManagedIdentityCredential`
-* Add scope to resource conversion in `GetToken()` on `ManagedIdentityCredential`
-
-
-## 0.8.0 (2021-01-20)
-### Features Added
-* Updating documentation
-
-
-## 0.7.1 (2021-01-04)
-### Features Added
-* Adding port option to `InteractiveBrowserCredential`
-
-
-## 0.7.0 (2020-12-11)
-### Features Added
-* Add `redirectURI` parameter back to authentication code flow
-
-
-## 0.6.1 (2020-12-09)
-### Features Added
-* Updating query parameter in `ManagedIdentityCredential` and updating datetime string for parsing managed identity access tokens.
-
-
-## 0.6.0 (2020-11-16)
-### Features Added
-* Remove `RedirectURL` parameter from auth code flow to align with the MSAL implementation which relies on the native client redirect URL.
-
-
-## 0.5.0 (2020-10-30)
-### Features Added
-* Flattening credential options
-
-
-## 0.4.3 (2020-10-21)
-### Features Added
-* Adding Azure Arc support in `ManagedIdentityCredential`
-
-
-## 0.4.2 (2020-10-16)
-### Features Added
-* Typo fixes
-
-
-## 0.4.1 (2020-10-16)
-### Features Added
-* Ensure authority hosts are only HTTPs
-
-
-## 0.4.0 (2020-10-16)
-### Features Added
-* Adding options structs for credentials
-
-
-## 0.3.0 (2020-10-09)
-### Features Added
-* Update `DeviceCodeCredential` callback
-
-
-## 0.2.2 (2020-10-09)
-### Features Added
-* Add `AuthorizationCodeCredential`
-
-
-## 0.2.1 (2020-10-06)
-### Features Added
-* Add `InteractiveBrowserCredential`
-
-
-## 0.2.0 (2020-09-11)
-### Features Added
-* Refactor `azidentity` on top of `azcore` refactor
-* Updated policies to conform to `policy.Policy` interface changes.
-* Updated non-retriable errors to conform to `azcore.NonRetriableError`.
-* Fixed calls to `Request.SetBody()` to include content type.
-* Switched endpoints to string types and removed extra parsing code.
-
-
-## 0.1.1 (2020-09-02)
-### Features Added
-* Add `AzureCLICredential` to `DefaultAzureCredential` chain
-
-
-## 0.1.0 (2020-07-23)
-### Features Added
-* Initial Release. Azure Identity library that provides Microsoft Entra token authentication support for the SDK.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt
deleted file mode 100644
index 48ea6616..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/LICENSE.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) Microsoft Corporation.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md
deleted file mode 100644
index 4404be82..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md
+++ /dev/null
@@ -1,307 +0,0 @@
-# Migrating from autorest/adal to azidentity
-
-`azidentity` provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/entra/fundamentals/new-name)) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead.
-
-This guide shows common authentication code using `autorest/adal` and its equivalent using `azidentity`.
-
-## Table of contents
-
-- [Acquire a token](#acquire-a-token)
-- [Client certificate authentication](#client-certificate-authentication)
-- [Client secret authentication](#client-secret-authentication)
-- [Configuration](#configuration)
-- [Device code authentication](#device-code-authentication)
-- [Managed identity](#managed-identity)
-- [Use azidentity credentials with older packages](#use-azidentity-credentials-with-older-packages)
-
-## Configuration
-
-### `autorest/adal`
-
-Token providers require a token audience (resource identifier) and an instance of `adal.OAuthConfig`, which requires a Microsoft Entra endpoint and tenant:
-
-```go
-import "github.com/Azure/go-autorest/autorest/adal"
-
-oauthCfg, err := adal.NewOAuthConfig("https://login.chinacloudapi.cn", tenantID)
-handle(err)
-
-spt, err := adal.NewServicePrincipalTokenWithSecret(
- *oauthCfg, clientID, "https://management.chinacloudapi.cn/", &adal.ServicePrincipalTokenSecret{ClientSecret: secret},
-)
-```
-
-### `azidentity`
-
-A credential instance can acquire tokens for any audience. The audience for each token is determined by the client requesting it. Credentials require endpoint configuration only for sovereign or private clouds. The `azcore/cloud` package has predefined configuration for sovereign clouds such as Azure China:
-
-```go
-import (
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
- "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
-)
-
-clientOpts := azcore.ClientOptions{Cloud: cloud.AzureChina}
-
-cred, err := azidentity.NewClientSecretCredential(
- tenantID, clientID, secret, &azidentity.ClientSecretCredentialOptions{ClientOptions: clientOpts},
-)
-handle(err)
-```
-
-## Client secret authentication
-
-### `autorest/adal`
-
-```go
-import (
- "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/adal"
-)
-
-oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID)
-handle(err)
-spt, err := adal.NewServicePrincipalTokenWithSecret(
- *oauthCfg, clientID, "https://management.azure.com/", &adal.ServicePrincipalTokenSecret{ClientSecret: secret},
-)
-handle(err)
-
-client := subscriptions.NewClient()
-client.Authorizer = autorest.NewBearerAuthorizer(spt)
-```
-
-### `azidentity`
-
-```go
-import (
- "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
- "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions"
-)
-
-cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil)
-handle(err)
-
-client, err := armsubscriptions.NewClient(cred, nil)
-handle(err)
-```
-
-## Client certificate authentication
-
-### `autorest/adal`
-
-```go
-import (
- "os"
-
- "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/adal"
-)
-certData, err := os.ReadFile("./example.pfx")
-handle(err)
-
-certificate, rsaPrivateKey, err := decodePkcs12(certData, "")
-handle(err)
-
-oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID)
-handle(err)
-
-spt, err := adal.NewServicePrincipalTokenFromCertificate(
- *oauthConfig, clientID, certificate, rsaPrivateKey, "https://management.azure.com/",
-)
-
-client := subscriptions.NewClient()
-client.Authorizer = autorest.NewBearerAuthorizer(spt)
-```
-
-### `azidentity`
-
-```go
-import (
- "os"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
- "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions"
-)
-
-certData, err := os.ReadFile("./example.pfx")
-handle(err)
-
-certs, key, err := azidentity.ParseCertificates(certData, nil)
-handle(err)
-
-cred, err = azidentity.NewClientCertificateCredential(tenantID, clientID, certs, key, nil)
-handle(err)
-
-client, err := armsubscriptions.NewClient(cred, nil)
-handle(err)
-```
-
-## Managed identity
-
-### `autorest/adal`
-
-```go
-import (
- "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/adal"
-)
-
-spt, err := adal.NewServicePrincipalTokenFromManagedIdentity("https://management.azure.com/", nil)
-handle(err)
-
-client := subscriptions.NewClient()
-client.Authorizer = autorest.NewBearerAuthorizer(spt)
-```
-
-### `azidentity`
-
-```go
-import (
- "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
- "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions"
-)
-
-cred, err := azidentity.NewManagedIdentityCredential(nil)
-handle(err)
-
-client, err := armsubscriptions.NewClient(cred, nil)
-handle(err)
-```
-
-### User-assigned identities
-
-`autorest/adal`:
-
-```go
-import "github.com/Azure/go-autorest/autorest/adal"
-
-opts := &adal.ManagedIdentityOptions{ClientID: "..."}
-spt, err := adal.NewServicePrincipalTokenFromManagedIdentity("https://management.azure.com/")
-handle(err)
-```
-
-`azidentity`:
-
-```go
-import "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
-
-opts := azidentity.ManagedIdentityCredentialOptions{ID: azidentity.ClientID("...")}
-cred, err := azidentity.NewManagedIdentityCredential(&opts)
-handle(err)
-```
-
-## Device code authentication
-
-### `autorest/adal`
-
-```go
-import (
- "fmt"
- "net/http"
-
- "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions"
- "github.com/Azure/go-autorest/autorest"
- "github.com/Azure/go-autorest/autorest/adal"
-)
-
-oauthClient := &http.Client{}
-oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID)
-handle(err)
-resource := "https://management.azure.com/"
-deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthCfg, clientID, resource)
-handle(err)
-
-// display instructions, wait for the user to authenticate
-fmt.Println(*deviceCode.Message)
-token, err := adal.WaitForUserCompletion(oauthClient, deviceCode)
-handle(err)
-
-spt, err := adal.NewServicePrincipalTokenFromManualToken(*oauthCfg, clientID, resource, *token)
-handle(err)
-
-client := subscriptions.NewClient()
-client.Authorizer = autorest.NewBearerAuthorizer(spt)
-```
-
-### `azidentity`
-
-```go
-import (
- "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
- "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armsubscriptions"
-)
-
-cred, err := azidentity.NewDeviceCodeCredential(nil)
-handle(err)
-
-client, err := armsubscriptions.NewSubscriptionsClient(cred, nil)
-handle(err)
-```
-
-`azidentity.DeviceCodeCredential` will guide a user through authentication, printing instructions to the console by default. The user prompt is customizable. For more information, see the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DeviceCodeCredential).
-
-## Acquire a token
-
-### `autorest/adal`
-
-```go
-import "github.com/Azure/go-autorest/autorest/adal"
-
-oauthCfg, err := adal.NewOAuthConfig("https://login.microsoftonline.com", tenantID)
-handle(err)
-
-spt, err := adal.NewServicePrincipalTokenWithSecret(
- *oauthCfg, clientID, "https://vault.azure.net", &adal.ServicePrincipalTokenSecret{ClientSecret: secret},
-)
-
-err = spt.Refresh()
-if err == nil {
- token := spt.Token
-}
-```
-
-### `azidentity`
-
-In ordinary usage, application code doesn't need to request tokens from credentials directly. Azure SDK clients handle token acquisition and refreshing internally. However, applications may call `GetToken()` to do so. All credential types have this method.
-
-```go
-import (
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
-)
-
-cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil)
-handle(err)
-
-tk, err := cred.GetToken(
- context.TODO(), policy.TokenRequestOptions{Scopes: []string{"https://vault.azure.net/.default"}},
-)
-if err == nil {
- token := tk.Token
-}
-```
-
-Note that `azidentity` credentials use the Microsoft Entra endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/permissions-consent-overview).
-
-## Use azidentity credentials with older packages
-
-The [azidext module](https://pkg.go.dev/github.com/jongio/azidext/go/azidext) provides an adapter for `azidentity` credential types. The adapter enables using the credential types with older Azure SDK clients. For example:
-
-```go
-import (
- "github.com/Azure/azure-sdk-for-go/sdk/azidentity"
- "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2018-06-01/subscriptions"
- "github.com/jongio/azidext/go/azidext"
-)
-
-cred, err := azidentity.NewClientSecretCredential(tenantID, clientID, secret, nil)
-handle(err)
-
-client := subscriptions.NewClient()
-client.Authorizer = azidext.NewTokenCredentialAdapter(cred, []string{"https://management.azure.com//.default"})
-```
-
-
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
deleted file mode 100644
index b5acff0e..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
+++ /dev/null
@@ -1,257 +0,0 @@
-# Azure Identity Client Module for Go
-
-The Azure Identity module provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/entra/fundamentals/new-name)) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication.
-
-[](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity)
-| [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity/)
-| [Source code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity)
-
-# Getting started
-
-## Install the module
-
-This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management.
-
-Install the Azure Identity module:
-
-```sh
-go get -u github.com/Azure/azure-sdk-for-go/sdk/azidentity
-```
-
-## Prerequisites
-
-- an [Azure subscription](https://azure.microsoft.com/free/)
-- Go 1.18
-
-### Authenticating during local development
-
-When debugging and executing code locally, developers typically use their own accounts to authenticate calls to Azure services. The `azidentity` module supports authenticating through developer tools to simplify local development.
-
-#### Authenticating via the Azure CLI
-
-`DefaultAzureCredential` and `AzureCLICredential` can authenticate as the user
-signed in to the [Azure CLI](https://learn.microsoft.com/cli/azure). To sign in to the Azure CLI, run `az login`. On a system with a default web browser, the Azure CLI will launch the browser to authenticate a user.
-
-When no default browser is available, `az login` will use the device code
-authentication flow. This can also be selected manually by running `az login --use-device-code`.
-
-#### Authenticate via the Azure Developer CLI
-
-Developers coding outside of an IDE can also use the [Azure Developer CLI](https://aka.ms/azure-dev) to authenticate. Applications using the `DefaultAzureCredential` or the `AzureDeveloperCLICredential` can use the account logged in to the Azure Developer CLI to authenticate calls in their application when running locally.
-
-To authenticate with the Azure Developer CLI, run `azd auth login`. On a system with a default web browser, `azd` will launch the browser to authenticate. On systems without a default web browser, run `azd auth login --use-device-code` to use the device code authentication flow.
-
-## Key concepts
-
-### Credentials
-
-A credential is a type which contains or can obtain the data needed for a
-service client to authenticate requests. Service clients across the Azure SDK
-accept a credential instance when they are constructed, and use that credential
-to authenticate requests.
-
-The `azidentity` module focuses on OAuth authentication with Microsoft Entra ID. It offers a variety of credential types capable of acquiring a Microsoft Entra access token. See [Credential Types](#credential-types "Credential Types") for a list of this module's credential types.
-
-### DefaultAzureCredential
-
-`DefaultAzureCredential` is appropriate for most apps that will be deployed to Azure. It combines common production credentials with development credentials. It attempts to authenticate via the following mechanisms in this order, stopping when one succeeds:
-
-
-
-1. **Environment** - `DefaultAzureCredential` will read account information specified via [environment variables](#environment-variables) and use it to authenticate.
-1. **Workload Identity** - If the app is deployed on Kubernetes with environment variables set by the workload identity webhook, `DefaultAzureCredential` will authenticate the configured identity.
-1. **Managed Identity** - If the app is deployed to an Azure host with managed identity enabled, `DefaultAzureCredential` will authenticate with it.
-1. **Azure CLI** - If a user or service principal has authenticated via the Azure CLI `az login` command, `DefaultAzureCredential` will authenticate that identity.
-1. **Azure Developer CLI** - If the developer has authenticated via the Azure Developer CLI `azd auth login` command, the `DefaultAzureCredential` will authenticate with that account.
-
-> Note: `DefaultAzureCredential` is intended to simplify getting started with the SDK by handling common scenarios with reasonable default behaviors. Developers who want more control or whose scenario isn't served by the default settings should use other credential types.
-
-## Managed Identity
-
-`DefaultAzureCredential` and `ManagedIdentityCredential` support
-[managed identity authentication](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview)
-in any hosting environment which supports managed identities, such as (this list is not exhaustive):
-* [Azure App Service](https://learn.microsoft.com/azure/app-service/overview-managed-identity)
-* [Azure Arc](https://learn.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)
-* [Azure Cloud Shell](https://learn.microsoft.com/azure/cloud-shell/msi-authorization)
-* [Azure Kubernetes Service](https://learn.microsoft.com/azure/aks/use-managed-identity)
-* [Azure Service Fabric](https://learn.microsoft.com/azure/service-fabric/concepts-managed-identity)
-* [Azure Virtual Machines](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/how-to-use-vm-token)
-
-## Examples
-
-- [Authenticate with DefaultAzureCredential](#authenticate-with-defaultazurecredential "Authenticate with DefaultAzureCredential")
-- [Define a custom authentication flow with ChainedTokenCredential](#define-a-custom-authentication-flow-with-chainedtokencredential "Define a custom authentication flow with ChainedTokenCredential")
-- [Specify a user-assigned managed identity for DefaultAzureCredential](#specify-a-user-assigned-managed-identity-for-defaultazurecredential)
-
-### Authenticate with DefaultAzureCredential
-
-This example demonstrates authenticating a client from the `armresources` module with `DefaultAzureCredential`.
-
-```go
-cred, err := azidentity.NewDefaultAzureCredential(nil)
-if err != nil {
- // handle error
-}
-
-client := armresources.NewResourceGroupsClient("subscription ID", cred, nil)
-```
-
-### Specify a user-assigned managed identity for DefaultAzureCredential
-
-To configure `DefaultAzureCredential` to authenticate a user-assigned managed identity, set the environment variable `AZURE_CLIENT_ID` to the identity's client ID.
-
-### Define a custom authentication flow with `ChainedTokenCredential`
-
-`DefaultAzureCredential` is generally the quickest way to get started developing apps for Azure. For more advanced scenarios, `ChainedTokenCredential` links multiple credential instances to be tried sequentially when authenticating. It will try each chained credential in turn until one provides a token or fails to authenticate due to an error.
-
-The following example demonstrates creating a credential, which will attempt to authenticate using managed identity. It will fall back to authenticating via the Azure CLI when a managed identity is unavailable.
-
-```go
-managed, err := azidentity.NewManagedIdentityCredential(nil)
-if err != nil {
- // handle error
-}
-azCLI, err := azidentity.NewAzureCLICredential(nil)
-if err != nil {
- // handle error
-}
-chain, err := azidentity.NewChainedTokenCredential([]azcore.TokenCredential{managed, azCLI}, nil)
-if err != nil {
- // handle error
-}
-
-client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
-```
-
-## Credential Types
-
-### Authenticating Azure Hosted Applications
-
-|Credential|Usage
-|-|-
-|[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps
-|[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials
-|[EnvironmentCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)|Authenticate a service principal or user configured by environment variables
-|[ManagedIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential)|Authenticate the managed identity of an Azure resource
-|[WorkloadIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#WorkloadIdentityCredential)|Authenticate a workload identity on Kubernetes
-
-### Authenticating Service Principals
-
-|Credential|Usage
-|-|-
-|[ClientAssertionCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientAssertionCredential)|Authenticate a service principal with a signed client assertion
-|[ClientCertificateCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientCertificateCredential)|Authenticate a service principal with a certificate
-|[ClientSecretCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ClientSecretCredential)|Authenticate a service principal with a secret
-
-### Authenticating Users
-
-|Credential|Usage
-|-|-
-|[InteractiveBrowserCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#InteractiveBrowserCredential)|Interactively authenticate a user with the default web browser
-|[DeviceCodeCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DeviceCodeCredential)|Interactively authenticate a user on a device with limited UI
-|[UsernamePasswordCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#UsernamePasswordCredential)|Authenticate a user with a username and password
-
-### Authenticating via Development Tools
-
-|Credential|Usage
-|-|-
-|[AzureCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureCLICredential)|Authenticate as the user signed in to the Azure CLI
-|[`AzureDeveloperCLICredential`](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureDeveloperCLICredential)|Authenticates as the user signed in to the Azure Developer CLI
-
-## Environment Variables
-
-`DefaultAzureCredential` and `EnvironmentCredential` can be configured with environment variables. Each type of authentication requires values for specific variables:
-
-#### Service principal with secret
-
-|variable name|value
-|-|-
-|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application
-|`AZURE_TENANT_ID`|ID of the application's Microsoft Entra tenant
-|`AZURE_CLIENT_SECRET`|one of the application's client secrets
-
-#### Service principal with certificate
-
-|variable name|value
-|-|-
-|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application
-|`AZURE_TENANT_ID`|ID of the application's Microsoft Entra tenant
-|`AZURE_CLIENT_CERTIFICATE_PATH`|path to a certificate file including private key
-|`AZURE_CLIENT_CERTIFICATE_PASSWORD`|password of the certificate file, if any
-
-#### Username and password
-
-|variable name|value
-|-|-
-|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application
-|`AZURE_USERNAME`|a username (usually an email address)
-|`AZURE_PASSWORD`|that user's password
-
-Configuration is attempted in the above order. For example, if values for a
-client secret and certificate are both present, the client secret will be used.
-
-## Token caching
-
-Token caching is an `azidentity` feature that allows apps to:
-
-* Cache tokens in memory (default) or on disk (opt-in).
-* Improve resilience and performance.
-* Reduce the number of requests made to Microsoft Entra ID to obtain access tokens.
-
-For more details, see the [token caching documentation](https://aka.ms/azsdk/go/identity/caching).
-
-## Troubleshooting
-
-### Error Handling
-
-Credentials return an `error` when they fail to authenticate or lack data they require to authenticate. For guidance on resolving errors from specific credential types, see the [troubleshooting guide](https://aka.ms/azsdk/go/identity/troubleshoot).
-
-For more details on handling specific Microsoft Entra errors, see the Microsoft Entra [error code documentation](https://learn.microsoft.com/entra/identity-platform/reference-error-codes).
-
-### Logging
-
-This module uses the classification-based logging implementation in `azcore`. To enable console logging for all SDK modules, set `AZURE_SDK_GO_LOGGING` to `all`. Use the `azcore/log` package to control log event output or to enable logs for `azidentity` only. For example:
-```go
-import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log"
-
-// print log output to stdout
-azlog.SetListener(func(event azlog.Event, s string) {
- fmt.Println(s)
-})
-
-// include only azidentity credential logs
-azlog.SetEvents(azidentity.EventAuthentication)
-```
-
-Credentials log basic information only, such as `GetToken` success or failure and errors. These log entries don't contain authentication secrets but may contain sensitive information.
-
-## Next steps
-
-Client and management modules listed on the [Azure SDK releases page](https://azure.github.io/azure-sdk/releases/latest/go.html) support authenticating with `azidentity` credential types. You can learn more about using these libraries in their documentation, which is linked from the release page.
-
-## Provide Feedback
-
-If you encounter bugs or have suggestions, please
-[open an issue](https://github.com/Azure/azure-sdk-for-go/issues).
-
-## Contributing
-
-This project welcomes contributions and suggestions. Most contributions require
-you to agree to a Contributor License Agreement (CLA) declaring that you have
-the right to, and actually do, grant us the rights to use your contribution.
-For details, visit [https://cla.microsoft.com](https://cla.microsoft.com).
-
-When you submit a pull request, a CLA-bot will automatically determine whether
-you need to provide a CLA and decorate the PR appropriately (e.g., label,
-comment). Simply follow the instructions provided by the bot. You will only
-need to do this once across all repos using our CLA.
-
-This project has adopted the
-[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
-For more information, see the
-[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
-or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any
-additional questions or comments.
-
-
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD
deleted file mode 100644
index f9cc4894..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD
+++ /dev/null
@@ -1,70 +0,0 @@
-## Token caching in the Azure Identity client module
-
-*Token caching* is a feature provided by the Azure Identity library that allows apps to:
-
-- Improve their resilience and performance.
-- Reduce the number of requests made to Microsoft Entra ID to obtain access tokens.
-- Reduce the number of times the user is prompted to authenticate.
-
-When an app needs to access a protected Azure resource, it typically needs to obtain an access token from Entra ID. Obtaining that token involves sending a request to Entra ID and may also involve prompting the user. Entra ID then validates the credentials provided in the request and issues an access token.
-
-Token caching, via the Azure Identity library, allows the app to store this access token [in memory](#in-memory-token-caching), where it's accessible to the current process, or [on disk](#persistent-token-caching) where it can be accessed across application or process invocations. The token can then be retrieved quickly and easily the next time the app needs to access the same resource. The app can avoid making another request to Entra ID, which reduces network traffic and improves resilience. Additionally, in scenarios where the app is authenticating users, token caching also avoids prompting the user each time new tokens are requested.
-
-### In-memory token caching
-
-*In-memory token caching* is the default option provided by the Azure Identity library. This caching approach allows apps to store access tokens in memory. With in-memory token caching, the library first determines if a valid access token for the requested resource is already stored in memory. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. If a valid token isn't found, the library will automatically acquire a token by sending a request to Entra ID. The in-memory token cache provided by the Azure Identity library is thread-safe.
-
-**Note:** When Azure Identity library credentials are used with Azure service libraries (for example, Azure Blob Storage), the in-memory token caching is active in the `Pipeline` layer as well. All `TokenCredential` implementations are supported there, including custom implementations external to the Azure Identity library.
-
-#### Caching cannot be disabled
-
-As there are many levels of caching, it's not possible disable in-memory caching. However, the in-memory cache may be cleared by creating a new credential instance.
-
-### Persistent token caching
-
-> Only azidentity v1.5.0-beta versions support persistent token caching
-
-*Persistent disk token caching* is an opt-in feature in the Azure Identity library. The feature allows apps to cache access tokens in an encrypted, persistent storage mechanism. As indicated in the following table, the storage mechanism differs across operating systems.
-
-| Operating system | Storage mechanism |
-|------------------|---------------------------------------|
-| Linux | kernel key retention service (keyctl) |
-| macOS | Keychain |
-| Windows | DPAPI |
-
-By default the token cache will protect any data which is persisted using the user data protection APIs available on the current platform.
-However, there are cases where no data protection is available, and applications may choose to allow storing the token cache in an unencrypted state by setting `TokenCachePersistenceOptions.AllowUnencryptedStorage` to `true`. This allows a credential to fall back to unencrypted storage if it can't encrypt the cache. However, we do not recommend using this storage method due to its significantly lower security measures. In addition, tokens are not encrypted solely to the current user, which could potentially allow unauthorized access to the cache by individuals with machine access.
-
-With persistent disk token caching enabled, the library first determines if a valid access token for the requested resource is already stored in the persistent cache. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. Additionally, the tokens are preserved across app runs, which:
-
-- Makes the app more resilient to failures.
-- Ensures the app can continue to function during an Entra ID outage or disruption.
-- Avoids having to prompt users to authenticate each time the process is restarted.
-
->IMPORTANT! The token cache contains sensitive data and **MUST** be protected to prevent compromising accounts. All application decisions regarding the persistence of the token cache must consider that a breach of its content will fully compromise all the accounts it contains.
-
-#### Example code
-
-See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.6.0-beta.2#pkg-overview) for example code demonstrating how to configure persistent caching and access cached data.
-
-### Credentials supporting token caching
-
-The following table indicates the state of in-memory and persistent caching in each credential type.
-
-**Note:** In-memory caching is activated by default. Persistent token caching needs to be enabled as shown in [this example](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#example-package-PersistentCache).
-
-| Credential | In-memory token caching | Persistent token caching |
-|--------------------------------|---------------------------------------------------------------------|--------------------------|
-| `AzureCLICredential` | Not Supported | Not Supported |
-| `AzureDeveloperCLICredential` | Not Supported | Not Supported |
-| `ClientAssertionCredential` | Supported | Supported |
-| `ClientCertificateCredential` | Supported | Supported |
-| `ClientSecretCredential` | Supported | Supported |
-| `DefaultAzureCredential` | Supported if the target credential in the default chain supports it | Not Supported |
-| `DeviceCodeCredential` | Supported | Supported |
-| `EnvironmentCredential` | Supported | Not Supported |
-| `InteractiveBrowserCredential` | Supported | Supported |
-| `ManagedIdentityCredential` | Supported | Not Supported |
-| `OnBehalfOfCredential` | Supported | Supported |
-| `UsernamePasswordCredential` | Supported | Supported |
-| `WorkloadIdentityCredential` | Supported | Supported |
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
deleted file mode 100644
index 3564e685..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
+++ /dev/null
@@ -1,231 +0,0 @@
-# Troubleshoot Azure Identity authentication issues
-
-This troubleshooting guide covers failure investigation techniques, common errors for the credential types in the `azidentity` module, and mitigation steps to resolve these errors.
-
-## Table of contents
-
-- [Handle azidentity errors](#handle-azidentity-errors)
- - [Permission issues](#permission-issues)
-- [Find relevant information in errors](#find-relevant-information-in-errors)
-- [Enable and configure logging](#enable-and-configure-logging)
-- [Troubleshoot AzureCLICredential authentication issues](#troubleshoot-azureclicredential-authentication-issues)
-- [Troubleshoot AzureDeveloperCLICredential authentication issues](#troubleshoot-azuredeveloperclicredential-authentication-issues)
-- [Troubleshoot ClientCertificateCredential authentication issues](#troubleshoot-clientcertificatecredential-authentication-issues)
-- [Troubleshoot ClientSecretCredential authentication issues](#troubleshoot-clientsecretcredential-authentication-issues)
-- [Troubleshoot DefaultAzureCredential authentication issues](#troubleshoot-defaultazurecredential-authentication-issues)
-- [Troubleshoot EnvironmentCredential authentication issues](#troubleshoot-environmentcredential-authentication-issues)
-- [Troubleshoot ManagedIdentityCredential authentication issues](#troubleshoot-managedidentitycredential-authentication-issues)
- - [Azure App Service and Azure Functions managed identity](#azure-app-service-and-azure-functions-managed-identity)
- - [Azure Kubernetes Service managed identity](#azure-kubernetes-service-managed-identity)
- - [Azure Virtual Machine managed identity](#azure-virtual-machine-managed-identity)
-- [Troubleshoot UsernamePasswordCredential authentication issues](#troubleshoot-usernamepasswordcredential-authentication-issues)
-- [Troubleshoot WorkloadIdentityCredential authentication issues](#troubleshoot-workloadidentitycredential-authentication-issues)
-- [Get additional help](#get-additional-help)
-
-## Handle azidentity errors
-
-Any service client method that makes a request to the service may return an error due to authentication failure. This is because the credential authenticates on the first call to the service and on any subsequent call that needs to refresh an access token. Authentication errors include a description of the failure and possibly an error message from Microsoft Entra ID. Depending on the application, these errors may or may not be recoverable.
-
-### Permission issues
-
-Service client errors with a status code of 401 or 403 often indicate that authentication succeeded but the caller doesn't have permission to access the specified API. Check the service documentation to determine which RBAC roles are needed for the request, and ensure the authenticated user or service principal has the appropriate role assignments.
-
-## Find relevant information in errors
-
-Authentication errors can include responses from Microsoft Entra ID and often contain information helpful in diagnosis. Consider the following error message:
-
-```
-ClientSecretCredential authentication failed
-POST https://login.microsoftonline.com/3c631bb7-a9f7-4343-a5ba-a615913/oauth2/v2.0/token
---------------------------------------------------------------------------------
-RESPONSE 401 Unauthorized
---------------------------------------------------------------------------------
-{
- "error": "invalid_client",
- "error_description": "AADSTS7000215: Invalid client secret provided. Ensure the secret being sent in the request is the client secret value, not the client secret ID, for a secret added to app '86be4c01-505b-45e9-bfc0-9b825fd84'.\r\nTrace ID: 03da4b8e-5ffe-48ca-9754-aff4276f0100\r\nCorrelation ID: 7b12f9bb-2eef-42e3-ad75-eee69ec9088d\r\nTimestamp: 2022-03-02 18:25:26Z",
- "error_codes": [
- 7000215
- ],
- "timestamp": "2022-03-02 18:25:26Z",
- "trace_id": "03da4b8e-5ffe-48ca-9754-aff4276f0100",
- "correlation_id": "7b12f9bb-2eef-42e3-ad75-eee69ec9088d",
- "error_uri": "https://login.microsoftonline.com/error?code=7000215"
-}
---------------------------------------------------------------------------------
-```
-
-This error contains several pieces of information:
-
-- __Failing Credential Type__: The type of credential that failed to authenticate. This can be helpful when diagnosing issues with chained credential types such as `DefaultAzureCredential` or `ChainedTokenCredential`.
-
-- __Microsoft Entra ID Error Code and Message__: The error code and message returned by Microsoft Entra ID. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/reference-error-codes#aadsts-error-codes) has more information on AADSTS error codes.
-
-- __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Microsoft Entra failures.
-
-### Enable and configure logging
-
-`azidentity` provides the same logging capabilities as the rest of the Azure SDK. The simplest way to see the logs to help debug authentication issues is to print credential logs to the console.
-```go
-import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log"
-
-// print log output to stdout
-azlog.SetListener(func(event azlog.Event, s string) {
- fmt.Println(s)
-})
-
-// include only azidentity credential logs
-azlog.SetEvents(azidentity.EventAuthentication)
-```
-
-
-## Troubleshoot DefaultAzureCredential authentication issues
-
-| Error |Description| Mitigation |
-|---|---|---|
-|"DefaultAzureCredential failed to acquire a token"|No credential in the `DefaultAzureCredential` chain provided a token|- [Enable logging](#enable-and-configure-logging) to get further diagnostic information.
- Consult the troubleshooting guide for underlying credential types for more information.
- [EnvironmentCredential](#troubleshoot-environmentcredential-authentication-issues)
- [ManagedIdentityCredential](#troubleshoot-managedidentitycredential-authentication-issues)
- [AzureCLICredential](#troubleshoot-azureclicredential-authentication-issues)
|
-|Error from the client with a status code of 401 or 403|Authentication succeeded but the authorizing Azure service responded with a 401 (Unauthorized), or 403 (Forbidden) status code|- [Enable logging](#enable-and-configure-logging) to determine which credential in the chain returned the authenticating token.
- If an unexpected credential is returning a token, check application configuration such as environment variables.
- Ensure the correct role is assigned to the authenticated identity. For example, a service specific role rather than the subscription Owner role.
|
-|"managed identity timed out"|`DefaultAzureCredential` sets a short timeout on its first managed identity authentication attempt to prevent very long timeouts during local development when no managed identity is available. That timeout causes this error in production when an application requests a token before the hosting environment is ready to provide one.|Use [ManagedIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential) directly, at least in production. It doesn't set a timeout on its authentication attempts.|
-
-## Troubleshoot EnvironmentCredential authentication issues
-
-| Error Message |Description| Mitigation |
-|---|---|---|
-|Missing or incomplete environment variable configuration|A valid combination of environment variables wasn't set|Ensure the appropriate environment variables are set for the intended authentication method as described in the [module documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)|
-
-
-## Troubleshoot ClientSecretCredential authentication issues
-
-| Error Code | Issue | Mitigation |
-|---|---|---|
-|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal#option-2-create-a-new-application-secret).|
-|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal#option-2-create-a-new-application-secret).|
-|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal).|
-
-
-## Troubleshoot ClientCertificateCredential authentication issues
-
-| Error Code | Description | Mitigation |
-|---|---|---|
-|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal#option-1-upload-a-certificate).|
-|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal).|
-
-
-## Troubleshoot UsernamePasswordCredential authentication issues
-
-| Error Code | Issue | Mitigation |
-|---|---|---|
-|AADSTS50126|The provided username or password is invalid.|Ensure the username and password provided to the credential constructor are valid.|
-
-
-## Troubleshoot ManagedIdentityCredential authentication issues
-
-`ManagedIdentityCredential` is designed to work on a variety of Azure hosts support managed identity. Configuration and troubleshooting vary from host to host. The below table lists the Azure hosts that can be assigned a managed identity and are supported by `ManagedIdentityCredential`.
-
-|Host Environment| | |
-|---|---|---|
-|Azure Virtual Machines and Scale Sets|[Configuration](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm)|[Troubleshooting](#azure-virtual-machine-managed-identity)|
-|Azure App Service and Azure Functions|[Configuration](https://learn.microsoft.com/azure/app-service/overview-managed-identity)|[Troubleshooting](#azure-app-service-and-azure-functions-managed-identity)|
-|Azure Kubernetes Service|[Configuration](https://azure.github.io/aad-pod-identity/docs/)|[Troubleshooting](#azure-kubernetes-service-managed-identity)|
-|Azure Arc|[Configuration](https://learn.microsoft.com/azure/azure-arc/servers/managed-identity-authentication)||
-|Azure Service Fabric|[Configuration](https://learn.microsoft.com/azure/service-fabric/concepts-managed-identity)||
-
-### Azure Virtual Machine managed identity
-
-| Error Message |Description| Mitigation |
-|---|---|---|
-|The requested identity hasn’t been assigned to this resource.|The IMDS endpoint responded with a status code of 400, indicating the requested identity isn’t assigned to the VM.|If using a user assigned identity, ensure the specified ID is correct.If using a system assigned identity, make sure it has been enabled as described in [managed identity documentation](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm#enable-system-assigned-managed-identity-on-an-existing-vm).|
-|The request failed due to a gateway error.|The request to the IMDS endpoint failed due to a gateway error, 502 or 504 status code.|IMDS doesn't support requests via proxy or gateway. Disable proxies or gateways running on the VM for requests to the IMDS endpoint `http://169.254.169.254`|
-|No response received from the managed identity endpoint.|No response was received for the request to IMDS or the request timed out.|- Ensure the VM is configured for managed identity as described in [managed identity documentation](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm).
- Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
|
-|Multiple attempts failed to obtain a token from the managed identity endpoint.|The credential has exhausted its retries for a token request.|- Refer to the error message for more details on specific failures.
- Ensure the VM is configured for managed identity as described in [managed identity documentation](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/qs-configure-portal-windows-vm).
- Verify the IMDS endpoint is reachable on the VM. See [below](#verify-imds-is-available-on-the-vm) for instructions.
|
-
-#### Verify IMDS is available on the VM
-
-If you have access to the VM, you can use `curl` to verify the managed identity endpoint is available.
-
-```sh
-curl 'http://169.254.169.254/metadata/identity/oauth2/token?resource=https://management.core.windows.net&api-version=2018-02-01' -H "Metadata: true"
-```
-
-> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security.
-
-### Azure App Service and Azure Functions managed identity
-
-| Error Message |Description| Mitigation |
-|---|---|---|
-|Get "`http://169.254.169.254/...`" i/o timeout|The App Service host hasn't set environment variables for managed identity configuration.|- Ensure the App Service is configured for managed identity as described in [App Service documentation](https://learn.microsoft.com/azure/app-service/overview-managed-identity).
- Verify the App Service environment is properly configured and the managed identity endpoint is available. See [below](#verify-the-app-service-managed-identity-endpoint-is-available) for instructions.
|
-
-#### Verify the App Service managed identity endpoint is available
-
-If you can SSH into the App Service, you can verify managed identity is available in the environment. First ensure the environment variables `IDENTITY_ENDPOINT` and `IDENTITY_SECRET` are set. Then you can verify the managed identity endpoint is available using `curl`.
-
-```sh
-curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-version=2019-08-01" -H "X-IDENTITY-HEADER: $IDENTITY_HEADER"
-```
-
-> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security.
-
-### Azure Kubernetes Service managed identity
-
-#### Pod Identity
-
-| Error Message |Description| Mitigation |
-|---|---|---|
-|"no azure identity found for request clientID"|The application attempted to authenticate before an identity was assigned to its pod|Verify the pod is labeled correctly. This also occurs when a correctly labeled pod authenticates before the identity is ready. To prevent initialization races, configure NMI to set the Retry-After header in its responses as described in [Pod Identity documentation](https://azure.github.io/aad-pod-identity/docs/configure/feature_flags/#set-retry-after-header-in-nmi-response).
-
-
-## Troubleshoot AzureCLICredential authentication issues
-
-| Error Message |Description| Mitigation |
-|---|---|---|
-|Azure CLI not found on path|The Azure CLI isn’t installed or isn't on the application's path.|- Ensure the Azure CLI is installed as described in [Azure CLI documentation](https://learn.microsoft.com/cli/azure/install-azure-cli).
- Validate the installation location is in the application's `PATH` environment variable.
|
-|Please run 'az login' to set up account|No account is currently logged into the Azure CLI, or the login has expired.|- Run `az login` to log into the Azure CLI. More information about Azure CLI authentication is available in the [Azure CLI documentation](https://learn.microsoft.com/cli/azure/authenticate-azure-cli).
- Verify that the Azure CLI can obtain tokens. See [below](#verify-the-azure-cli-can-obtain-tokens) for instructions.
|
-
-#### Verify the Azure CLI can obtain tokens
-
-You can manually verify that the Azure CLI can authenticate and obtain tokens. First, use the `account` command to verify the logged in account.
-
-```azurecli
-az account show
-```
-
-Once you've verified the Azure CLI is using the correct account, you can validate that it's able to obtain tokens for that account.
-
-```azurecli
-az account get-access-token --output json --resource https://management.core.windows.net
-```
-
-> This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security.
-
-
-## Troubleshoot AzureDeveloperCLICredential authentication issues
-
-| Error Message |Description| Mitigation |
-|---|---|---|
-|Azure Developer CLI not found on path|The Azure Developer CLI isn't installed or couldn't be found.|- Ensure the Azure Developer CLI is properly installed. See the installation instructions at [Install or update the Azure Developer CLI](https://learn.microsoft.com/azure/developer/azure-developer-cli/install-azd).
- Validate the installation location has been added to the `PATH` environment variable.
|
-|Please run "azd auth login"|No account is logged into the Azure Developer CLI, or the login has expired.|- Log in to the Azure Developer CLI using the `azd login` command.
- Validate that the Azure Developer CLI can obtain tokens. For instructions, see [Verify the Azure Developer CLI can obtain tokens](#verify-the-azure-developer-cli-can-obtain-tokens).
|
-
-#### Verify the Azure Developer CLI can obtain tokens
-
-You can manually verify that the Azure Developer CLI is properly authenticated and can obtain tokens. First, use the `config` command to verify the account that is currently logged in to the Azure Developer CLI.
-
-```sh
-azd config list
-```
-
-Once you've verified the Azure Developer CLI is using correct account, you can validate that it's able to obtain tokens for this account.
-
-```sh
-azd auth token --output json --scope https://management.core.windows.net/.default
-```
->Note that output of this command will contain a valid access token, and SHOULD NOT BE SHARED to avoid compromising account security.
-
-
-## Troubleshoot `WorkloadIdentityCredential` authentication issues
-
-| Error Message |Description| Mitigation |
-|---|---|---|
-|no client ID/tenant ID/token file specified|Incomplete configuration|In most cases these values are provided via environment variables set by Azure Workload Identity.- If your application runs on Azure Kubernetes Servide (AKS) or a cluster that has deployed the Azure Workload Identity admission webhook, check pod labels and service account configuration. See the [AKS documentation](https://learn.microsoft.com/azure/aks/workload-identity-deploy-cluster#disable-workload-identity) and [Azure Workload Identity troubleshooting guide](https://azure.github.io/azure-workload-identity/docs/troubleshooting.html) for more details.
- If your application isn't running on AKS or your cluster hasn't deployed the Workload Identity admission webhook, set these values in `WorkloadIdentityCredentialOptions`
-
-## Get additional help
-
-Additional information on ways to reach out for support can be found in [SUPPORT.md](https://github.com/Azure/azure-sdk-for-go/blob/main/SUPPORT.md).
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json
deleted file mode 100644
index bff0c44d..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json
+++ /dev/null
@@ -1,6 +0,0 @@
-{
- "AssetsRepo": "Azure/azure-sdk-assets",
- "AssetsRepoPrefixPath": "go",
- "TagPrefix": "go/azidentity",
- "Tag": "go/azidentity_087379b475"
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go
deleted file mode 100644
index ada4d650..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go
+++ /dev/null
@@ -1,95 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "net/url"
- "strings"
-
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public"
-)
-
-var supportedAuthRecordVersions = []string{"1.0"}
-
-// authenticationRecord is non-secret account information about an authenticated user that user credentials such as
-// [DeviceCodeCredential] and [InteractiveBrowserCredential] can use to access previously cached authentication
-// data. Call these credentials' Authenticate method to get an authenticationRecord for a user.
-type authenticationRecord struct {
- // Authority is the URL of the authority that issued the token.
- Authority string `json:"authority"`
-
- // ClientID is the ID of the application that authenticated the user.
- ClientID string `json:"clientId"`
-
- // HomeAccountID uniquely identifies the account.
- HomeAccountID string `json:"homeAccountId"`
-
- // TenantID identifies the tenant in which the user authenticated.
- TenantID string `json:"tenantId"`
-
- // Username is the user's preferred username.
- Username string `json:"username"`
-
- // Version of the AuthenticationRecord.
- Version string `json:"version"`
-}
-
-// UnmarshalJSON implements json.Unmarshaler for AuthenticationRecord
-func (a *authenticationRecord) UnmarshalJSON(b []byte) error {
- // Default unmarshaling is fine but we want to return an error if the record's version isn't supported i.e., we
- // want to inspect the unmarshalled values before deciding whether to return an error. Unmarshaling a formally
- // different type enables this by assigning all the fields without recursing into this method.
- type r authenticationRecord
- err := json.Unmarshal(b, (*r)(a))
- if err != nil {
- return err
- }
- if a.Version == "" {
- return errors.New("AuthenticationRecord must have a version")
- }
- for _, v := range supportedAuthRecordVersions {
- if a.Version == v {
- return nil
- }
- }
- return fmt.Errorf("unsupported AuthenticationRecord version %q. This module supports %v", a.Version, supportedAuthRecordVersions)
-}
-
-// account returns the AuthenticationRecord as an MSAL Account. The account is zero-valued when the AuthenticationRecord is zero-valued.
-func (a *authenticationRecord) account() public.Account {
- return public.Account{
- Environment: a.Authority,
- HomeAccountID: a.HomeAccountID,
- PreferredUsername: a.Username,
- }
-}
-
-func newAuthenticationRecord(ar public.AuthResult) (authenticationRecord, error) {
- u, err := url.Parse(ar.IDToken.Issuer)
- if err != nil {
- return authenticationRecord{}, fmt.Errorf("Authenticate expected a URL issuer but got %q", ar.IDToken.Issuer)
- }
- tenant := ar.IDToken.TenantID
- if tenant == "" {
- tenant = strings.Trim(u.Path, "/")
- }
- username := ar.IDToken.PreferredUsername
- if username == "" {
- username = ar.IDToken.UPN
- }
- return authenticationRecord{
- Authority: fmt.Sprintf("%s://%s", u.Scheme, u.Host),
- ClientID: ar.IDToken.Audience,
- HomeAccountID: ar.Account.HomeAccountID,
- TenantID: tenant,
- Username: username,
- Version: "1.0",
- }, nil
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go
deleted file mode 100644
index b0965036..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go
+++ /dev/null
@@ -1,190 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "os"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
- "github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public"
-)
-
-const (
- azureAdditionallyAllowedTenants = "AZURE_ADDITIONALLY_ALLOWED_TENANTS"
- azureAuthorityHost = "AZURE_AUTHORITY_HOST"
- azureClientCertificatePassword = "AZURE_CLIENT_CERTIFICATE_PASSWORD"
- azureClientCertificatePath = "AZURE_CLIENT_CERTIFICATE_PATH"
- azureClientID = "AZURE_CLIENT_ID"
- azureClientSecret = "AZURE_CLIENT_SECRET"
- azureFederatedTokenFile = "AZURE_FEDERATED_TOKEN_FILE"
- azurePassword = "AZURE_PASSWORD"
- azureRegionalAuthorityName = "AZURE_REGIONAL_AUTHORITY_NAME"
- azureTenantID = "AZURE_TENANT_ID"
- azureUsername = "AZURE_USERNAME"
-
- organizationsTenantID = "organizations"
- developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46"
- defaultSuffix = "/.default"
-
- traceNamespace = "Microsoft.Entra"
- traceOpGetToken = "GetToken"
- traceOpAuthenticate = "Authenticate"
-)
-
-var (
- // capability CP1 indicates the client application is capable of handling CAE claims challenges
- cp1 = []string{"CP1"}
- errInvalidTenantID = errors.New("invalid tenantID. You can locate your tenantID by following the instructions listed here: https://learn.microsoft.com/partner-center/find-ids-and-domain-names")
-)
-
-// tokenCachePersistenceOptions contains options for persistent token caching
-type tokenCachePersistenceOptions = internal.TokenCachePersistenceOptions
-
-// setAuthorityHost initializes the authority host for credentials. Precedence is:
-// 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user
-// 2. value of AZURE_AUTHORITY_HOST
-// 3. default: Azure Public Cloud
-func setAuthorityHost(cc cloud.Configuration) (string, error) {
- host := cc.ActiveDirectoryAuthorityHost
- if host == "" {
- if len(cc.Services) > 0 {
- return "", errors.New("missing ActiveDirectoryAuthorityHost for specified cloud")
- }
- host = cloud.AzurePublic.ActiveDirectoryAuthorityHost
- if envAuthorityHost := os.Getenv(azureAuthorityHost); envAuthorityHost != "" {
- host = envAuthorityHost
- }
- }
- u, err := url.Parse(host)
- if err != nil {
- return "", err
- }
- if u.Scheme != "https" {
- return "", errors.New("cannot use an authority host without https")
- }
- return host, nil
-}
-
-// resolveAdditionalTenants returns a copy of tenants, simplified when tenants contains a wildcard
-func resolveAdditionalTenants(tenants []string) []string {
- if len(tenants) == 0 {
- return nil
- }
- for _, t := range tenants {
- // a wildcard makes all other values redundant
- if t == "*" {
- return []string{"*"}
- }
- }
- cp := make([]string, len(tenants))
- copy(cp, tenants)
- return cp
-}
-
-// resolveTenant returns the correct tenant for a token request
-func resolveTenant(defaultTenant, specified, credName string, additionalTenants []string) (string, error) {
- if specified == "" || specified == defaultTenant {
- return defaultTenant, nil
- }
- if defaultTenant == "adfs" {
- return "", errors.New("ADFS doesn't support tenants")
- }
- if !validTenantID(specified) {
- return "", errInvalidTenantID
- }
- for _, t := range additionalTenants {
- if t == "*" || t == specified {
- return specified, nil
- }
- }
- return "", fmt.Errorf(`%s isn't configured to acquire tokens for tenant %q. To enable acquiring tokens for this tenant add it to the AdditionallyAllowedTenants on the credential options, or add "*" to allow acquiring tokens for any tenant`, credName, specified)
-}
-
-func alphanumeric(r rune) bool {
- return ('0' <= r && r <= '9') || ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z')
-}
-
-func validTenantID(tenantID string) bool {
- if len(tenantID) < 1 {
- return false
- }
- for _, r := range tenantID {
- if !(alphanumeric(r) || r == '.' || r == '-') {
- return false
- }
- }
- return true
-}
-
-func doForClient(client *azcore.Client, r *http.Request) (*http.Response, error) {
- req, err := runtime.NewRequest(r.Context(), r.Method, r.URL.String())
- if err != nil {
- return nil, err
- }
- if r.Body != nil && r.Body != http.NoBody {
- // create a rewindable body from the existing body as required
- var body io.ReadSeekCloser
- if rsc, ok := r.Body.(io.ReadSeekCloser); ok {
- body = rsc
- } else {
- b, err := io.ReadAll(r.Body)
- if err != nil {
- return nil, err
- }
- body = streaming.NopCloser(bytes.NewReader(b))
- }
- err = req.SetBody(body, r.Header.Get("Content-Type"))
- if err != nil {
- return nil, err
- }
- }
-
- // copy headers to the new request, ignoring any for which the new request has a value
- h := req.Raw().Header
- for key, vals := range r.Header {
- if _, has := h[key]; !has {
- for _, val := range vals {
- h.Add(key, val)
- }
- }
- }
-
- resp, err := client.Pipeline().Do(req)
- if err != nil {
- return nil, err
- }
- return resp, err
-}
-
-// enables fakes for test scenarios
-type msalConfidentialClient interface {
- AcquireTokenSilent(ctx context.Context, scopes []string, options ...confidential.AcquireSilentOption) (confidential.AuthResult, error)
- AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...confidential.AcquireByAuthCodeOption) (confidential.AuthResult, error)
- AcquireTokenByCredential(ctx context.Context, scopes []string, options ...confidential.AcquireByCredentialOption) (confidential.AuthResult, error)
- AcquireTokenOnBehalfOf(ctx context.Context, userAssertion string, scopes []string, options ...confidential.AcquireOnBehalfOfOption) (confidential.AuthResult, error)
-}
-
-// enables fakes for test scenarios
-type msalPublicClient interface {
- AcquireTokenSilent(ctx context.Context, scopes []string, options ...public.AcquireSilentOption) (public.AuthResult, error)
- AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username string, password string, options ...public.AcquireByUsernamePasswordOption) (public.AuthResult, error)
- AcquireTokenByDeviceCode(ctx context.Context, scopes []string, options ...public.AcquireByDeviceCodeOption) (public.DeviceCode, error)
- AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...public.AcquireByAuthCodeOption) (public.AuthResult, error)
- AcquireTokenInteractive(ctx context.Context, scopes []string, options ...public.AcquireInteractiveOption) (public.AuthResult, error)
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go
deleted file mode 100644
index b9976f5f..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go
+++ /dev/null
@@ -1,190 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "os"
- "os/exec"
- "runtime"
- "strings"
- "sync"
- "time"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
-)
-
-const credNameAzureCLI = "AzureCLICredential"
-
-type azTokenProvider func(ctx context.Context, scopes []string, tenant, subscription string) ([]byte, error)
-
-// AzureCLICredentialOptions contains optional parameters for AzureCLICredential.
-type AzureCLICredentialOptions struct {
- // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition
- // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the
- // logged in account can access.
- AdditionallyAllowedTenants []string
-
- // Subscription is the name or ID of a subscription. Set this to acquire tokens for an account other
- // than the Azure CLI's current account.
- Subscription string
-
- // TenantID identifies the tenant the credential should authenticate in.
- // Defaults to the CLI's default tenant, which is typically the home tenant of the logged in user.
- TenantID string
-
- // inDefaultChain is true when the credential is part of DefaultAzureCredential
- inDefaultChain bool
- // tokenProvider is used by tests to fake invoking az
- tokenProvider azTokenProvider
-}
-
-// init returns an instance of AzureCLICredentialOptions initialized with default values.
-func (o *AzureCLICredentialOptions) init() {
- if o.tokenProvider == nil {
- o.tokenProvider = defaultAzTokenProvider
- }
-}
-
-// AzureCLICredential authenticates as the identity logged in to the Azure CLI.
-type AzureCLICredential struct {
- mu *sync.Mutex
- opts AzureCLICredentialOptions
-}
-
-// NewAzureCLICredential constructs an AzureCLICredential. Pass nil to accept default options.
-func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredential, error) {
- cp := AzureCLICredentialOptions{}
- if options != nil {
- cp = *options
- }
- for _, r := range cp.Subscription {
- if !(alphanumeric(r) || r == '-' || r == '_' || r == ' ' || r == '.') {
- return nil, fmt.Errorf("%s: invalid Subscription %q", credNameAzureCLI, cp.Subscription)
- }
- }
- if cp.TenantID != "" && !validTenantID(cp.TenantID) {
- return nil, errInvalidTenantID
- }
- cp.init()
- cp.AdditionallyAllowedTenants = resolveAdditionalTenants(cp.AdditionallyAllowedTenants)
- return &AzureCLICredential{mu: &sync.Mutex{}, opts: cp}, nil
-}
-
-// GetToken requests a token from the Azure CLI. This credential doesn't cache tokens, so every call invokes the CLI.
-// This method is called automatically by Azure SDK clients.
-func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- at := azcore.AccessToken{}
- if len(opts.Scopes) != 1 {
- return at, errors.New(credNameAzureCLI + ": GetToken() requires exactly one scope")
- }
- if !validScope(opts.Scopes[0]) {
- return at, fmt.Errorf("%s.GetToken(): invalid scope %q", credNameAzureCLI, opts.Scopes[0])
- }
- tenant, err := resolveTenant(c.opts.TenantID, opts.TenantID, credNameAzureCLI, c.opts.AdditionallyAllowedTenants)
- if err != nil {
- return at, err
- }
- c.mu.Lock()
- defer c.mu.Unlock()
- b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant, c.opts.Subscription)
- if err == nil {
- at, err = c.createAccessToken(b)
- }
- if err != nil {
- err = unavailableIfInChain(err, c.opts.inDefaultChain)
- return at, err
- }
- msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", credNameAzureCLI, strings.Join(opts.Scopes, ", "))
- log.Write(EventAuthentication, msg)
- return at, nil
-}
-
-// defaultAzTokenProvider invokes the Azure CLI to acquire a token. It assumes
-// callers have verified that all string arguments are safe to pass to the CLI.
-var defaultAzTokenProvider azTokenProvider = func(ctx context.Context, scopes []string, tenantID, subscription string) ([]byte, error) {
- // pass the CLI a Microsoft Entra ID v1 resource because we don't know which CLI version is installed and older ones don't support v2 scopes
- resource := strings.TrimSuffix(scopes[0], defaultSuffix)
- // set a default timeout for this authentication iff the application hasn't done so already
- var cancel context.CancelFunc
- if _, hasDeadline := ctx.Deadline(); !hasDeadline {
- ctx, cancel = context.WithTimeout(ctx, cliTimeout)
- defer cancel()
- }
- commandLine := "az account get-access-token -o json --resource " + resource
- if tenantID != "" {
- commandLine += " --tenant " + tenantID
- }
- if subscription != "" {
- // subscription needs quotes because it may contain spaces
- commandLine += ` --subscription "` + subscription + `"`
- }
- var cliCmd *exec.Cmd
- if runtime.GOOS == "windows" {
- dir := os.Getenv("SYSTEMROOT")
- if dir == "" {
- return nil, newCredentialUnavailableError(credNameAzureCLI, "environment variable 'SYSTEMROOT' has no value")
- }
- cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine)
- cliCmd.Dir = dir
- } else {
- cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine)
- cliCmd.Dir = "/bin"
- }
- cliCmd.Env = os.Environ()
- var stderr bytes.Buffer
- cliCmd.Stderr = &stderr
-
- output, err := cliCmd.Output()
- if err != nil {
- msg := stderr.String()
- var exErr *exec.ExitError
- if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'az' is not recognized") {
- msg = "Azure CLI not found on path"
- }
- if msg == "" {
- msg = err.Error()
- }
- return nil, newCredentialUnavailableError(credNameAzureCLI, msg)
- }
-
- return output, nil
-}
-
-func (c *AzureCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) {
- t := struct {
- AccessToken string `json:"accessToken"`
- Expires_On int64 `json:"expires_on"`
- ExpiresOn string `json:"expiresOn"`
- }{}
- err := json.Unmarshal(tk, &t)
- if err != nil {
- return azcore.AccessToken{}, err
- }
-
- exp := time.Unix(t.Expires_On, 0)
- if t.Expires_On == 0 {
- exp, err = time.ParseInLocation("2006-01-02 15:04:05.999999", t.ExpiresOn, time.Local)
- if err != nil {
- return azcore.AccessToken{}, fmt.Errorf("%s: error parsing token expiration time %q: %v", credNameAzureCLI, t.ExpiresOn, err)
- }
- }
-
- converted := azcore.AccessToken{
- Token: t.AccessToken,
- ExpiresOn: exp.UTC(),
- }
- return converted, nil
-}
-
-var _ azcore.TokenCredential = (*AzureCLICredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go
deleted file mode 100644
index cbe7c4c2..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go
+++ /dev/null
@@ -1,169 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "os"
- "os/exec"
- "runtime"
- "strings"
- "sync"
- "time"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
-)
-
-const credNameAzureDeveloperCLI = "AzureDeveloperCLICredential"
-
-type azdTokenProvider func(ctx context.Context, scopes []string, tenant string) ([]byte, error)
-
-// AzureDeveloperCLICredentialOptions contains optional parameters for AzureDeveloperCLICredential.
-type AzureDeveloperCLICredentialOptions struct {
- // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition
- // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the
- // logged in account can access.
- AdditionallyAllowedTenants []string
-
- // TenantID identifies the tenant the credential should authenticate in. Defaults to the azd environment,
- // which is the tenant of the selected Azure subscription.
- TenantID string
-
- // inDefaultChain is true when the credential is part of DefaultAzureCredential
- inDefaultChain bool
- // tokenProvider is used by tests to fake invoking azd
- tokenProvider azdTokenProvider
-}
-
-// AzureDeveloperCLICredential authenticates as the identity logged in to the [Azure Developer CLI].
-//
-// [Azure Developer CLI]: https://learn.microsoft.com/azure/developer/azure-developer-cli/overview
-type AzureDeveloperCLICredential struct {
- mu *sync.Mutex
- opts AzureDeveloperCLICredentialOptions
-}
-
-// NewAzureDeveloperCLICredential constructs an AzureDeveloperCLICredential. Pass nil to accept default options.
-func NewAzureDeveloperCLICredential(options *AzureDeveloperCLICredentialOptions) (*AzureDeveloperCLICredential, error) {
- cp := AzureDeveloperCLICredentialOptions{}
- if options != nil {
- cp = *options
- }
- if cp.TenantID != "" && !validTenantID(cp.TenantID) {
- return nil, errInvalidTenantID
- }
- if cp.tokenProvider == nil {
- cp.tokenProvider = defaultAzdTokenProvider
- }
- return &AzureDeveloperCLICredential{mu: &sync.Mutex{}, opts: cp}, nil
-}
-
-// GetToken requests a token from the Azure Developer CLI. This credential doesn't cache tokens, so every call invokes azd.
-// This method is called automatically by Azure SDK clients.
-func (c *AzureDeveloperCLICredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- at := azcore.AccessToken{}
- if len(opts.Scopes) == 0 {
- return at, errors.New(credNameAzureDeveloperCLI + ": GetToken() requires at least one scope")
- }
- for _, scope := range opts.Scopes {
- if !validScope(scope) {
- return at, fmt.Errorf("%s.GetToken(): invalid scope %q", credNameAzureDeveloperCLI, scope)
- }
- }
- tenant, err := resolveTenant(c.opts.TenantID, opts.TenantID, credNameAzureDeveloperCLI, c.opts.AdditionallyAllowedTenants)
- if err != nil {
- return at, err
- }
- c.mu.Lock()
- defer c.mu.Unlock()
- b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant)
- if err == nil {
- at, err = c.createAccessToken(b)
- }
- if err != nil {
- err = unavailableIfInChain(err, c.opts.inDefaultChain)
- return at, err
- }
- msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", credNameAzureDeveloperCLI, strings.Join(opts.Scopes, ", "))
- log.Write(EventAuthentication, msg)
- return at, nil
-}
-
-// defaultAzTokenProvider invokes the Azure Developer CLI to acquire a token. It assumes
-// callers have verified that all string arguments are safe to pass to the CLI.
-var defaultAzdTokenProvider azdTokenProvider = func(ctx context.Context, scopes []string, tenant string) ([]byte, error) {
- // set a default timeout for this authentication iff the application hasn't done so already
- var cancel context.CancelFunc
- if _, hasDeadline := ctx.Deadline(); !hasDeadline {
- ctx, cancel = context.WithTimeout(ctx, cliTimeout)
- defer cancel()
- }
- commandLine := "azd auth token -o json"
- if tenant != "" {
- commandLine += " --tenant-id " + tenant
- }
- for _, scope := range scopes {
- commandLine += " --scope " + scope
- }
- var cliCmd *exec.Cmd
- if runtime.GOOS == "windows" {
- dir := os.Getenv("SYSTEMROOT")
- if dir == "" {
- return nil, newCredentialUnavailableError(credNameAzureDeveloperCLI, "environment variable 'SYSTEMROOT' has no value")
- }
- cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine)
- cliCmd.Dir = dir
- } else {
- cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine)
- cliCmd.Dir = "/bin"
- }
- cliCmd.Env = os.Environ()
- var stderr bytes.Buffer
- cliCmd.Stderr = &stderr
- output, err := cliCmd.Output()
- if err != nil {
- msg := stderr.String()
- var exErr *exec.ExitError
- if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'azd' is not recognized") {
- msg = "Azure Developer CLI not found on path"
- } else if strings.Contains(msg, "azd auth login") {
- msg = `please run "azd auth login" from a command prompt to authenticate before using this credential`
- }
- if msg == "" {
- msg = err.Error()
- }
- return nil, newCredentialUnavailableError(credNameAzureDeveloperCLI, msg)
- }
- return output, nil
-}
-
-func (c *AzureDeveloperCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) {
- t := struct {
- AccessToken string `json:"token"`
- ExpiresOn string `json:"expiresOn"`
- }{}
- err := json.Unmarshal(tk, &t)
- if err != nil {
- return azcore.AccessToken{}, err
- }
- exp, err := time.Parse("2006-01-02T15:04:05Z", t.ExpiresOn)
- if err != nil {
- return azcore.AccessToken{}, fmt.Errorf("error parsing token expiration time %q: %v", t.ExpiresOn, err)
- }
- return azcore.AccessToken{
- ExpiresOn: exp.UTC(),
- Token: t.AccessToken,
- }, nil
-}
-
-var _ azcore.TokenCredential = (*AzureDeveloperCLICredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go
deleted file mode 100644
index 2655543a..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "os"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
-)
-
-const (
- credNameAzurePipelines = "AzurePipelinesCredential"
- oidcAPIVersion = "7.1"
- systemAccessToken = "SYSTEM_ACCESSTOKEN"
- systemOIDCRequestURI = "SYSTEM_OIDCREQUESTURI"
-)
-
-// azurePipelinesCredential authenticates with workload identity federation in an Azure Pipeline. See
-// [Azure Pipelines documentation] for more information.
-//
-// [Azure Pipelines documentation]: https://learn.microsoft.com/azure/devops/pipelines/library/connect-to-azure?view=azure-devops#create-an-azure-resource-manager-service-connection-that-uses-workload-identity-federation
-type azurePipelinesCredential struct {
- connectionID, oidcURI, systemAccessToken string
- cred *ClientAssertionCredential
-}
-
-// azurePipelinesCredentialOptions contains optional parameters for AzurePipelinesCredential.
-type azurePipelinesCredentialOptions struct {
- azcore.ClientOptions
-
- // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens.
- // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the
- // application is registered.
- AdditionallyAllowedTenants []string
-
- // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
- // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
- // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
- // the application responsible for ensuring the configured authority is valid and trustworthy.
- DisableInstanceDiscovery bool
-}
-
-// newAzurePipelinesCredential is the constructor for AzurePipelinesCredential. In addition to its required arguments,
-// it reads a security token for the running build, which is required to authenticate the service connection, from the
-// environment variable SYSTEM_ACCESSTOKEN. See the [Azure Pipelines documentation] for an example showing how to set
-// this variable in build job YAML.
-//
-// [Azure Pipelines documentation]: https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken
-func newAzurePipelinesCredential(tenantID, clientID, serviceConnectionID string, options *azurePipelinesCredentialOptions) (*azurePipelinesCredential, error) {
- if options == nil {
- options = &azurePipelinesCredentialOptions{}
- }
- u := os.Getenv(systemOIDCRequestURI)
- if u == "" {
- return nil, fmt.Errorf("no value for environment variable %s. This should be set by Azure Pipelines", systemOIDCRequestURI)
- }
- sat := os.Getenv(systemAccessToken)
- if sat == "" {
- return nil, errors.New("no value for environment variable " + systemAccessToken)
- }
- a := azurePipelinesCredential{
- connectionID: serviceConnectionID,
- oidcURI: u,
- systemAccessToken: sat,
- }
- caco := ClientAssertionCredentialOptions{
- AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- }
- cred, err := NewClientAssertionCredential(tenantID, clientID, a.getAssertion, &caco)
- if err != nil {
- return nil, err
- }
- cred.client.name = credNameAzurePipelines
- a.cred = cred
- return &a, nil
-}
-
-// GetToken requests an access token from Microsoft Entra ID. Azure SDK clients call this method automatically.
-func (a *azurePipelinesCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- var err error
- ctx, endSpan := runtime.StartSpan(ctx, credNameAzurePipelines+"."+traceOpGetToken, a.cred.client.azClient.Tracer(), nil)
- defer func() { endSpan(err) }()
- tk, err := a.cred.GetToken(ctx, opts)
- return tk, err
-}
-
-func (a *azurePipelinesCredential) getAssertion(ctx context.Context) (string, error) {
- url := a.oidcURI + "?api-version=" + oidcAPIVersion + "&serviceConnectionId=" + a.connectionID
- url, err := runtime.EncodeQueryParams(url)
- if err != nil {
- return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't encode OIDC URL: "+err.Error(), nil, nil)
- }
- req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil)
- if err != nil {
- return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't create OIDC token request: "+err.Error(), nil, nil)
- }
- req.Header.Set("Authorization", "Bearer "+a.systemAccessToken)
- res, err := doForClient(a.cred.client.azClient, req)
- if err != nil {
- return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't send OIDC token request: "+err.Error(), nil, nil)
- }
- if res.StatusCode != http.StatusOK {
- msg := res.Status + " response from the OIDC endpoint. Check service connection ID and Pipeline configuration"
- // include the response because its body, if any, probably contains an error message.
- // OK responses aren't included with errors because they probably contain secrets
- return "", newAuthenticationFailedError(credNameAzurePipelines, msg, res, nil)
- }
- b, err := runtime.Payload(res)
- if err != nil {
- return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't read OIDC response content: "+err.Error(), nil, nil)
- }
- var r struct {
- OIDCToken string `json:"oidcToken"`
- }
- err = json.Unmarshal(b, &r)
- if err != nil {
- return "", newAuthenticationFailedError(credNameAzurePipelines, "unexpected response from OIDC endpoint", nil, nil)
- }
- return r.OIDCToken, nil
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go
deleted file mode 100644
index 6c35a941..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go
+++ /dev/null
@@ -1,138 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-import (
- "context"
- "errors"
- "fmt"
- "strings"
- "sync"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
-)
-
-// ChainedTokenCredentialOptions contains optional parameters for ChainedTokenCredential.
-type ChainedTokenCredentialOptions struct {
- // RetrySources configures how the credential uses its sources. When true, the credential always attempts to
- // authenticate through each source in turn, stopping when one succeeds. When false, the credential authenticates
- // only through this first successful source--it never again tries the sources which failed.
- RetrySources bool
-}
-
-// ChainedTokenCredential links together multiple credentials and tries them sequentially when authenticating. By default,
-// it tries all the credentials until one authenticates, after which it always uses that credential.
-type ChainedTokenCredential struct {
- cond *sync.Cond
- iterating bool
- name string
- retrySources bool
- sources []azcore.TokenCredential
- successfulCredential azcore.TokenCredential
-}
-
-// NewChainedTokenCredential creates a ChainedTokenCredential. Pass nil for options to accept defaults.
-func NewChainedTokenCredential(sources []azcore.TokenCredential, options *ChainedTokenCredentialOptions) (*ChainedTokenCredential, error) {
- if len(sources) == 0 {
- return nil, errors.New("sources must contain at least one TokenCredential")
- }
- for _, source := range sources {
- if source == nil { // cannot have a nil credential in the chain or else the application will panic when GetToken() is called on nil
- return nil, errors.New("sources cannot contain nil")
- }
- }
- cp := make([]azcore.TokenCredential, len(sources))
- copy(cp, sources)
- if options == nil {
- options = &ChainedTokenCredentialOptions{}
- }
- return &ChainedTokenCredential{
- cond: sync.NewCond(&sync.Mutex{}),
- name: "ChainedTokenCredential",
- retrySources: options.RetrySources,
- sources: cp,
- }, nil
-}
-
-// GetToken calls GetToken on the chained credentials in turn, stopping when one returns a token.
-// This method is called automatically by Azure SDK clients.
-func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- if !c.retrySources {
- // ensure only one goroutine at a time iterates the sources and perhaps sets c.successfulCredential
- c.cond.L.Lock()
- for {
- if c.successfulCredential != nil {
- c.cond.L.Unlock()
- return c.successfulCredential.GetToken(ctx, opts)
- }
- if !c.iterating {
- c.iterating = true
- // allow other goroutines to wait while this one iterates
- c.cond.L.Unlock()
- break
- }
- c.cond.Wait()
- }
- }
-
- var (
- err error
- errs []error
- successfulCredential azcore.TokenCredential
- token azcore.AccessToken
- unavailableErr credentialUnavailable
- )
- for _, cred := range c.sources {
- token, err = cred.GetToken(ctx, opts)
- if err == nil {
- log.Writef(EventAuthentication, "%s authenticated with %s", c.name, extractCredentialName(cred))
- successfulCredential = cred
- break
- }
- errs = append(errs, err)
- // continue to the next source iff this one returned credentialUnavailableError
- if !errors.As(err, &unavailableErr) {
- break
- }
- }
- if c.iterating {
- c.cond.L.Lock()
- // this is nil when all credentials returned an error
- c.successfulCredential = successfulCredential
- c.iterating = false
- c.cond.L.Unlock()
- c.cond.Broadcast()
- }
- // err is the error returned by the last GetToken call. It will be nil when that call succeeds
- if err != nil {
- // return credentialUnavailableError iff all sources did so; return AuthenticationFailedError otherwise
- msg := createChainedErrorMessage(errs)
- if errors.As(err, &unavailableErr) {
- err = newCredentialUnavailableError(c.name, msg)
- } else {
- res := getResponseFromError(err)
- err = newAuthenticationFailedError(c.name, msg, res, err)
- }
- }
- return token, err
-}
-
-func createChainedErrorMessage(errs []error) string {
- msg := "failed to acquire a token.\nAttempted credentials:"
- for _, err := range errs {
- msg += fmt.Sprintf("\n\t%s", err.Error())
- }
- return msg
-}
-
-func extractCredentialName(credential azcore.TokenCredential) string {
- return strings.TrimPrefix(fmt.Sprintf("%T", credential), "*azidentity.")
-}
-
-var _ azcore.TokenCredential = (*ChainedTokenCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml
deleted file mode 100644
index 4cd8c514..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml
+++ /dev/null
@@ -1,46 +0,0 @@
-# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file.
-trigger:
- branches:
- include:
- - main
- - feature/*
- - hotfix/*
- - release/*
- paths:
- include:
- - sdk/azidentity/
-
-pr:
- branches:
- include:
- - main
- - feature/*
- - hotfix/*
- - release/*
- paths:
- include:
- - sdk/azidentity/
-
-extends:
- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml
- parameters:
- CloudConfig:
- Public:
- SubscriptionConfigurations:
- - $(sub-config-azure-cloud-test-resources)
- - $(sub-config-identity-test-resources)
- EnvVars:
- SYSTEM_ACCESSTOKEN: $(System.AccessToken)
- RunLiveTests: true
- ServiceDirectory: azidentity
- UsePipelineProxy: false
-
- ${{ if endsWith(variables['Build.DefinitionName'], 'weekly') }}:
- MatrixConfigs:
- - Name: managed_identity_matrix
- GenerateVMJobs: true
- Path: sdk/azidentity/managed-identity-matrix.json
- Selection: sparse
- MatrixReplace:
- - Pool=.*LINUXPOOL.*/azsdk-pool-mms-ubuntu-2204-identitymsi
- - OSVmImage=.*LINUXNEXTVMIMAGE.*/azsdk-pool-mms-ubuntu-2204-1espt
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go
deleted file mode 100644
index b588750e..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go
+++ /dev/null
@@ -1,85 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-import (
- "context"
- "errors"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
-)
-
-const credNameAssertion = "ClientAssertionCredential"
-
-// ClientAssertionCredential authenticates an application with assertions provided by a callback function.
-// This credential is for advanced scenarios. [ClientCertificateCredential] has a more convenient API for
-// the most common assertion scenario, authenticating a service principal with a certificate. See
-// [Microsoft Entra ID documentation] for details of the assertion format.
-//
-// [Microsoft Entra ID documentation]: https://learn.microsoft.com/entra/identity-platform/certificate-credentials#assertion-format
-type ClientAssertionCredential struct {
- client *confidentialClient
-}
-
-// ClientAssertionCredentialOptions contains optional parameters for ClientAssertionCredential.
-type ClientAssertionCredentialOptions struct {
- azcore.ClientOptions
-
- // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens.
- // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the
- // application is registered.
- AdditionallyAllowedTenants []string
-
- // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
- // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
- // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
- // the application responsible for ensuring the configured authority is valid and trustworthy.
- DisableInstanceDiscovery bool
-
- // tokenCachePersistenceOptions enables persistent token caching when not nil.
- tokenCachePersistenceOptions *tokenCachePersistenceOptions
-}
-
-// NewClientAssertionCredential constructs a ClientAssertionCredential. The getAssertion function must be thread safe. Pass nil for options to accept defaults.
-func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(context.Context) (string, error), options *ClientAssertionCredentialOptions) (*ClientAssertionCredential, error) {
- if getAssertion == nil {
- return nil, errors.New("getAssertion must be a function that returns assertions")
- }
- if options == nil {
- options = &ClientAssertionCredentialOptions{}
- }
- cred := confidential.NewCredFromAssertionCallback(
- func(ctx context.Context, _ confidential.AssertionRequestOptions) (string, error) {
- return getAssertion(ctx)
- },
- )
- msalOpts := confidentialClientOptions{
- AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- tokenCachePersistenceOptions: options.tokenCachePersistenceOptions,
- }
- c, err := newConfidentialClient(tenantID, clientID, credNameAssertion, cred, msalOpts)
- if err != nil {
- return nil, err
- }
- return &ClientAssertionCredential{client: c}, nil
-}
-
-// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients.
-func (c *ClientAssertionCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- var err error
- ctx, endSpan := runtime.StartSpan(ctx, credNameAssertion+"."+traceOpGetToken, c.client.azClient.Tracer(), nil)
- defer func() { endSpan(err) }()
- tk, err := c.client.GetToken(ctx, opts)
- return tk, err
-}
-
-var _ azcore.TokenCredential = (*ClientAssertionCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go
deleted file mode 100644
index 80cd96b5..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go
+++ /dev/null
@@ -1,174 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-import (
- "context"
- "crypto"
- "crypto/x509"
- "encoding/pem"
- "errors"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
- "golang.org/x/crypto/pkcs12"
-)
-
-const credNameCert = "ClientCertificateCredential"
-
-// ClientCertificateCredentialOptions contains optional parameters for ClientCertificateCredential.
-type ClientCertificateCredentialOptions struct {
- azcore.ClientOptions
-
- // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens.
- // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the
- // application is registered.
- AdditionallyAllowedTenants []string
-
- // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
- // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
- // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
- // the application responsible for ensuring the configured authority is valid and trustworthy.
- DisableInstanceDiscovery bool
-
- // SendCertificateChain controls whether the credential sends the public certificate chain in the x5c
- // header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication.
- // Defaults to False.
- SendCertificateChain bool
-
- // tokenCachePersistenceOptions enables persistent token caching when not nil.
- tokenCachePersistenceOptions *tokenCachePersistenceOptions
-}
-
-// ClientCertificateCredential authenticates a service principal with a certificate.
-type ClientCertificateCredential struct {
- client *confidentialClient
-}
-
-// NewClientCertificateCredential constructs a ClientCertificateCredential. Pass nil for options to accept defaults. See
-// [ParseCertificates] for help loading a certificate.
-func NewClientCertificateCredential(tenantID string, clientID string, certs []*x509.Certificate, key crypto.PrivateKey, options *ClientCertificateCredentialOptions) (*ClientCertificateCredential, error) {
- if len(certs) == 0 {
- return nil, errors.New("at least one certificate is required")
- }
- if options == nil {
- options = &ClientCertificateCredentialOptions{}
- }
- cred, err := confidential.NewCredFromCert(certs, key)
- if err != nil {
- return nil, err
- }
- msalOpts := confidentialClientOptions{
- AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- SendX5C: options.SendCertificateChain,
- tokenCachePersistenceOptions: options.tokenCachePersistenceOptions,
- }
- c, err := newConfidentialClient(tenantID, clientID, credNameCert, cred, msalOpts)
- if err != nil {
- return nil, err
- }
- return &ClientCertificateCredential{client: c}, nil
-}
-
-// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients.
-func (c *ClientCertificateCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- var err error
- ctx, endSpan := runtime.StartSpan(ctx, credNameCert+"."+traceOpGetToken, c.client.azClient.Tracer(), nil)
- defer func() { endSpan(err) }()
- tk, err := c.client.GetToken(ctx, opts)
- return tk, err
-}
-
-// ParseCertificates loads certificates and a private key, in PEM or PKCS#12 format, for use with [NewClientCertificateCredential].
-// Pass nil for password if the private key isn't encrypted. This function has limitations, for example it can't decrypt keys in
-// PEM format or PKCS#12 certificates that use SHA256 for message authentication. If you encounter such limitations, consider
-// using another module to load the certificate and private key.
-func ParseCertificates(certData []byte, password []byte) ([]*x509.Certificate, crypto.PrivateKey, error) {
- var blocks []*pem.Block
- var err error
- if len(password) == 0 {
- blocks, err = loadPEMCert(certData)
- }
- if len(blocks) == 0 || err != nil {
- blocks, err = loadPKCS12Cert(certData, string(password))
- }
- if err != nil {
- return nil, nil, err
- }
- var certs []*x509.Certificate
- var pk crypto.PrivateKey
- for _, block := range blocks {
- switch block.Type {
- case "CERTIFICATE":
- c, err := x509.ParseCertificate(block.Bytes)
- if err != nil {
- return nil, nil, err
- }
- certs = append(certs, c)
- case "PRIVATE KEY":
- if pk != nil {
- return nil, nil, errors.New("certData contains multiple private keys")
- }
- pk, err = x509.ParsePKCS8PrivateKey(block.Bytes)
- if err != nil {
- pk, err = x509.ParsePKCS1PrivateKey(block.Bytes)
- }
- if err != nil {
- return nil, nil, err
- }
- case "RSA PRIVATE KEY":
- if pk != nil {
- return nil, nil, errors.New("certData contains multiple private keys")
- }
- pk, err = x509.ParsePKCS1PrivateKey(block.Bytes)
- if err != nil {
- return nil, nil, err
- }
- }
- }
- if len(certs) == 0 {
- return nil, nil, errors.New("found no certificate")
- }
- if pk == nil {
- return nil, nil, errors.New("found no private key")
- }
- return certs, pk, nil
-}
-
-func loadPEMCert(certData []byte) ([]*pem.Block, error) {
- blocks := []*pem.Block{}
- for {
- var block *pem.Block
- block, certData = pem.Decode(certData)
- if block == nil {
- break
- }
- blocks = append(blocks, block)
- }
- if len(blocks) == 0 {
- return nil, errors.New("didn't find any PEM blocks")
- }
- return blocks, nil
-}
-
-func loadPKCS12Cert(certData []byte, password string) ([]*pem.Block, error) {
- blocks, err := pkcs12.ToPEM(certData, password)
- if err != nil {
- return nil, err
- }
- if len(blocks) == 0 {
- // not mentioning PKCS12 in this message because we end up here when certData is garbage
- return nil, errors.New("didn't find any certificate content")
- }
- return blocks, err
-}
-
-var _ azcore.TokenCredential = (*ClientCertificateCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go
deleted file mode 100644
index 9e6772e9..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go
+++ /dev/null
@@ -1,75 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-import (
- "context"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
-)
-
-const credNameSecret = "ClientSecretCredential"
-
-// ClientSecretCredentialOptions contains optional parameters for ClientSecretCredential.
-type ClientSecretCredentialOptions struct {
- azcore.ClientOptions
-
- // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens.
- // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the
- // application is registered.
- AdditionallyAllowedTenants []string
-
- // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
- // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
- // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
- // the application responsible for ensuring the configured authority is valid and trustworthy.
- DisableInstanceDiscovery bool
-
- // tokenCachePersistenceOptions enables persistent token caching when not nil.
- tokenCachePersistenceOptions *tokenCachePersistenceOptions
-}
-
-// ClientSecretCredential authenticates an application with a client secret.
-type ClientSecretCredential struct {
- client *confidentialClient
-}
-
-// NewClientSecretCredential constructs a ClientSecretCredential. Pass nil for options to accept defaults.
-func NewClientSecretCredential(tenantID string, clientID string, clientSecret string, options *ClientSecretCredentialOptions) (*ClientSecretCredential, error) {
- if options == nil {
- options = &ClientSecretCredentialOptions{}
- }
- cred, err := confidential.NewCredFromSecret(clientSecret)
- if err != nil {
- return nil, err
- }
- msalOpts := confidentialClientOptions{
- AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- tokenCachePersistenceOptions: options.tokenCachePersistenceOptions,
- }
- c, err := newConfidentialClient(tenantID, clientID, credNameSecret, cred, msalOpts)
- if err != nil {
- return nil, err
- }
- return &ClientSecretCredential{client: c}, nil
-}
-
-// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients.
-func (c *ClientSecretCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- var err error
- ctx, endSpan := runtime.StartSpan(ctx, credNameSecret+"."+traceOpGetToken, c.client.azClient.Tracer(), nil)
- defer func() { endSpan(err) }()
- tk, err := c.client.GetToken(ctx, opts)
- return tk, err
-}
-
-var _ azcore.TokenCredential = (*ClientSecretCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go
deleted file mode 100644
index 3bd08c68..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go
+++ /dev/null
@@ -1,184 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-import (
- "context"
- "errors"
- "fmt"
- "net/http"
- "os"
- "strings"
- "sync"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
- "github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
-)
-
-type confidentialClientOptions struct {
- azcore.ClientOptions
-
- AdditionallyAllowedTenants []string
- // Assertion for on-behalf-of authentication
- Assertion string
- DisableInstanceDiscovery, SendX5C bool
- tokenCachePersistenceOptions *tokenCachePersistenceOptions
-}
-
-// confidentialClient wraps the MSAL confidential client
-type confidentialClient struct {
- cae, noCAE msalConfidentialClient
- caeMu, noCAEMu, clientMu *sync.Mutex
- clientID, tenantID string
- cred confidential.Credential
- host string
- name string
- opts confidentialClientOptions
- region string
- azClient *azcore.Client
-}
-
-func newConfidentialClient(tenantID, clientID, name string, cred confidential.Credential, opts confidentialClientOptions) (*confidentialClient, error) {
- if !validTenantID(tenantID) {
- return nil, errInvalidTenantID
- }
- host, err := setAuthorityHost(opts.Cloud)
- if err != nil {
- return nil, err
- }
- client, err := azcore.NewClient(module, version, runtime.PipelineOptions{
- Tracing: runtime.TracingOptions{
- Namespace: traceNamespace,
- },
- }, &opts.ClientOptions)
- if err != nil {
- return nil, err
- }
- opts.AdditionallyAllowedTenants = resolveAdditionalTenants(opts.AdditionallyAllowedTenants)
- return &confidentialClient{
- caeMu: &sync.Mutex{},
- clientID: clientID,
- clientMu: &sync.Mutex{},
- cred: cred,
- host: host,
- name: name,
- noCAEMu: &sync.Mutex{},
- opts: opts,
- region: os.Getenv(azureRegionalAuthorityName),
- tenantID: tenantID,
- azClient: client,
- }, nil
-}
-
-// GetToken requests an access token from MSAL, checking the cache first.
-func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenRequestOptions) (azcore.AccessToken, error) {
- if len(tro.Scopes) < 1 {
- return azcore.AccessToken{}, fmt.Errorf("%s.GetToken() requires at least one scope", c.name)
- }
- // we don't resolve the tenant for managed identities because they acquire tokens only from their home tenants
- if c.name != credNameManagedIdentity {
- tenant, err := c.resolveTenant(tro.TenantID)
- if err != nil {
- return azcore.AccessToken{}, err
- }
- tro.TenantID = tenant
- }
- client, mu, err := c.client(tro)
- if err != nil {
- return azcore.AccessToken{}, err
- }
- mu.Lock()
- defer mu.Unlock()
- var ar confidential.AuthResult
- if c.opts.Assertion != "" {
- ar, err = client.AcquireTokenOnBehalfOf(ctx, c.opts.Assertion, tro.Scopes, confidential.WithClaims(tro.Claims), confidential.WithTenantID(tro.TenantID))
- } else {
- ar, err = client.AcquireTokenSilent(ctx, tro.Scopes, confidential.WithClaims(tro.Claims), confidential.WithTenantID(tro.TenantID))
- if err != nil {
- ar, err = client.AcquireTokenByCredential(ctx, tro.Scopes, confidential.WithClaims(tro.Claims), confidential.WithTenantID(tro.TenantID))
- }
- }
- if err != nil {
- // We could get a credentialUnavailableError from managed identity authentication because in that case the error comes from our code.
- // We return it directly because it affects the behavior of credential chains. Otherwise, we return AuthenticationFailedError.
- var unavailableErr credentialUnavailable
- if !errors.As(err, &unavailableErr) {
- res := getResponseFromError(err)
- err = newAuthenticationFailedError(c.name, err.Error(), res, err)
- }
- } else {
- msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", c.name, strings.Join(ar.GrantedScopes, ", "))
- log.Write(EventAuthentication, msg)
- }
- return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err
-}
-
-func (c *confidentialClient) client(tro policy.TokenRequestOptions) (msalConfidentialClient, *sync.Mutex, error) {
- c.clientMu.Lock()
- defer c.clientMu.Unlock()
- if tro.EnableCAE {
- if c.cae == nil {
- client, err := c.newMSALClient(true)
- if err != nil {
- return nil, nil, err
- }
- c.cae = client
- }
- return c.cae, c.caeMu, nil
- }
- if c.noCAE == nil {
- client, err := c.newMSALClient(false)
- if err != nil {
- return nil, nil, err
- }
- c.noCAE = client
- }
- return c.noCAE, c.noCAEMu, nil
-}
-
-func (c *confidentialClient) newMSALClient(enableCAE bool) (msalConfidentialClient, error) {
- cache, err := internal.NewCache(c.opts.tokenCachePersistenceOptions, enableCAE)
- if err != nil {
- return nil, err
- }
- authority := runtime.JoinPaths(c.host, c.tenantID)
- o := []confidential.Option{
- confidential.WithAzureRegion(c.region),
- confidential.WithCache(cache),
- confidential.WithHTTPClient(c),
- }
- if enableCAE {
- o = append(o, confidential.WithClientCapabilities(cp1))
- }
- if c.opts.SendX5C {
- o = append(o, confidential.WithX5C())
- }
- if c.opts.DisableInstanceDiscovery || strings.ToLower(c.tenantID) == "adfs" {
- o = append(o, confidential.WithInstanceDiscovery(false))
- }
- return confidential.New(authority, c.clientID, c.cred, o...)
-}
-
-// resolveTenant returns the correct WithTenantID() argument for a token request given the client's
-// configuration, or an error when that configuration doesn't allow the specified tenant
-func (c *confidentialClient) resolveTenant(specified string) (string, error) {
- return resolveTenant(c.tenantID, specified, c.name, c.opts.AdditionallyAllowedTenants)
-}
-
-// these methods satisfy the MSAL ops.HTTPClient interface
-
-func (c *confidentialClient) CloseIdleConnections() {
- // do nothing
-}
-
-func (c *confidentialClient) Do(r *http.Request) (*http.Response, error) {
- return doForClient(c.azClient, r)
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go
deleted file mode 100644
index 551d3199..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go
+++ /dev/null
@@ -1,165 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-import (
- "context"
- "os"
- "strings"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
-)
-
-// DefaultAzureCredentialOptions contains optional parameters for DefaultAzureCredential.
-// These options may not apply to all credentials in the chain.
-type DefaultAzureCredentialOptions struct {
- // ClientOptions has additional options for credentials that use an Azure SDK HTTP pipeline. These options don't apply
- // to credential types that authenticate via external tools such as the Azure CLI.
- azcore.ClientOptions
-
- // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. Add
- // the wildcard value "*" to allow the credential to acquire tokens for any tenant. This value can also be
- // set as a semicolon delimited list of tenants in the environment variable AZURE_ADDITIONALLY_ALLOWED_TENANTS.
- AdditionallyAllowedTenants []string
- // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
- // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
- // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
- // the application responsible for ensuring the configured authority is valid and trustworthy.
- DisableInstanceDiscovery bool
- // TenantID sets the default tenant for authentication via the Azure CLI and workload identity.
- TenantID string
-}
-
-// DefaultAzureCredential is a default credential chain for applications that will deploy to Azure.
-// It combines credentials suitable for deployment with credentials suitable for local development.
-// It attempts to authenticate with each of these credential types, in the following order, stopping
-// when one provides a token:
-//
-// - [EnvironmentCredential]
-// - [WorkloadIdentityCredential], if environment variable configuration is set by the Azure workload
-// identity webhook. Use [WorkloadIdentityCredential] directly when not using the webhook or needing
-// more control over its configuration.
-// - [ManagedIdentityCredential]
-// - [AzureCLICredential]
-// - [AzureDeveloperCLICredential]
-//
-// Consult the documentation for these credential types for more information on how they authenticate.
-// Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for
-// every subsequent authentication.
-type DefaultAzureCredential struct {
- chain *ChainedTokenCredential
-}
-
-// NewDefaultAzureCredential creates a DefaultAzureCredential. Pass nil for options to accept defaults.
-func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*DefaultAzureCredential, error) {
- var creds []azcore.TokenCredential
- var errorMessages []string
-
- if options == nil {
- options = &DefaultAzureCredentialOptions{}
- }
- additionalTenants := options.AdditionallyAllowedTenants
- if len(additionalTenants) == 0 {
- if tenants := os.Getenv(azureAdditionallyAllowedTenants); tenants != "" {
- additionalTenants = strings.Split(tenants, ";")
- }
- }
-
- envCred, err := NewEnvironmentCredential(&EnvironmentCredentialOptions{
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- additionallyAllowedTenants: additionalTenants,
- })
- if err == nil {
- creds = append(creds, envCred)
- } else {
- errorMessages = append(errorMessages, "EnvironmentCredential: "+err.Error())
- creds = append(creds, &defaultCredentialErrorReporter{credType: "EnvironmentCredential", err: err})
- }
-
- wic, err := NewWorkloadIdentityCredential(&WorkloadIdentityCredentialOptions{
- AdditionallyAllowedTenants: additionalTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- TenantID: options.TenantID,
- })
- if err == nil {
- creds = append(creds, wic)
- } else {
- errorMessages = append(errorMessages, credNameWorkloadIdentity+": "+err.Error())
- creds = append(creds, &defaultCredentialErrorReporter{credType: credNameWorkloadIdentity, err: err})
- }
-
- o := &ManagedIdentityCredentialOptions{ClientOptions: options.ClientOptions, dac: true}
- if ID, ok := os.LookupEnv(azureClientID); ok {
- o.ID = ClientID(ID)
- }
- miCred, err := NewManagedIdentityCredential(o)
- if err == nil {
- creds = append(creds, miCred)
- } else {
- errorMessages = append(errorMessages, credNameManagedIdentity+": "+err.Error())
- creds = append(creds, &defaultCredentialErrorReporter{credType: credNameManagedIdentity, err: err})
- }
-
- cliCred, err := NewAzureCLICredential(&AzureCLICredentialOptions{AdditionallyAllowedTenants: additionalTenants, TenantID: options.TenantID})
- if err == nil {
- creds = append(creds, cliCred)
- } else {
- errorMessages = append(errorMessages, credNameAzureCLI+": "+err.Error())
- creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureCLI, err: err})
- }
-
- azdCred, err := NewAzureDeveloperCLICredential(&AzureDeveloperCLICredentialOptions{
- AdditionallyAllowedTenants: additionalTenants,
- TenantID: options.TenantID,
- })
- if err == nil {
- creds = append(creds, azdCred)
- } else {
- errorMessages = append(errorMessages, credNameAzureDeveloperCLI+": "+err.Error())
- creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureDeveloperCLI, err: err})
- }
-
- if len(errorMessages) > 0 {
- log.Writef(EventAuthentication, "NewDefaultAzureCredential failed to initialize some credentials:\n\t%s", strings.Join(errorMessages, "\n\t"))
- }
-
- chain, err := NewChainedTokenCredential(creds, nil)
- if err != nil {
- return nil, err
- }
- chain.name = "DefaultAzureCredential"
- return &DefaultAzureCredential{chain: chain}, nil
-}
-
-// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients.
-func (c *DefaultAzureCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- return c.chain.GetToken(ctx, opts)
-}
-
-var _ azcore.TokenCredential = (*DefaultAzureCredential)(nil)
-
-// defaultCredentialErrorReporter is a substitute for credentials that couldn't be constructed.
-// Its GetToken method always returns a credentialUnavailableError having the same message as
-// the error that prevented constructing the credential. This ensures the message is present
-// in the error returned by ChainedTokenCredential.GetToken()
-type defaultCredentialErrorReporter struct {
- credType string
- err error
-}
-
-func (d *defaultCredentialErrorReporter) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- if _, ok := d.err.(credentialUnavailable); ok {
- return azcore.AccessToken{}, d.err
- }
- return azcore.AccessToken{}, newCredentialUnavailableError(d.credType, d.err.Error())
-}
-
-var _ azcore.TokenCredential = (*defaultCredentialErrorReporter)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go
deleted file mode 100644
index be963d3a..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go
+++ /dev/null
@@ -1,38 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-import (
- "errors"
- "time"
-)
-
-// cliTimeout is the default timeout for authentication attempts via CLI tools
-const cliTimeout = 10 * time.Second
-
-// unavailableIfInChain returns err or, if the credential was invoked by DefaultAzureCredential, a
-// credentialUnavailableError having the same message. This ensures DefaultAzureCredential will try
-// the next credential in its chain (another developer credential).
-func unavailableIfInChain(err error, inDefaultChain bool) error {
- if err != nil && inDefaultChain {
- var unavailableErr credentialUnavailable
- if !errors.As(err, &unavailableErr) {
- err = newCredentialUnavailableError(credNameAzureDeveloperCLI, err.Error())
- }
- }
- return err
-}
-
-// validScope is for credentials authenticating via external tools. The authority validates scopes for all other credentials.
-func validScope(scope string) bool {
- for _, r := range scope {
- if !(alphanumeric(r) || r == '.' || r == '-' || r == '_' || r == '/' || r == ':') {
- return false
- }
- }
- return true
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go
deleted file mode 100644
index cd30bedd..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go
+++ /dev/null
@@ -1,138 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-import (
- "context"
- "fmt"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
-)
-
-const credNameDeviceCode = "DeviceCodeCredential"
-
-// DeviceCodeCredentialOptions contains optional parameters for DeviceCodeCredential.
-type DeviceCodeCredentialOptions struct {
- azcore.ClientOptions
-
- // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire
- // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant.
- AdditionallyAllowedTenants []string
-
- // authenticationRecord returned by a call to a credential's Authenticate method. Set this option
- // to enable the credential to use data from a previous authentication.
- authenticationRecord authenticationRecord
-
- // ClientID is the ID of the application users will authenticate to.
- // Defaults to the ID of an Azure development application.
- ClientID string
-
- // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate.
- // When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary
- // to acquire a token.
- disableAutomaticAuthentication bool
-
- // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
- // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
- // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
- // the application responsible for ensuring the configured authority is valid and trustworthy.
- DisableInstanceDiscovery bool
-
- // TenantID is the Microsoft Entra tenant the credential authenticates in. Defaults to the
- // "organizations" tenant, which can authenticate work and school accounts. Required for single-tenant
- // applications.
- TenantID string
-
- // tokenCachePersistenceOptions enables persistent token caching when not nil.
- tokenCachePersistenceOptions *tokenCachePersistenceOptions
-
- // UserPrompt controls how the credential presents authentication instructions. The credential calls
- // this function with authentication details when it receives a device code. By default, the credential
- // prints these details to stdout.
- UserPrompt func(context.Context, DeviceCodeMessage) error
-}
-
-func (o *DeviceCodeCredentialOptions) init() {
- if o.TenantID == "" {
- o.TenantID = organizationsTenantID
- }
- if o.ClientID == "" {
- o.ClientID = developerSignOnClientID
- }
- if o.UserPrompt == nil {
- o.UserPrompt = func(ctx context.Context, dc DeviceCodeMessage) error {
- fmt.Println(dc.Message)
- return nil
- }
- }
-}
-
-// DeviceCodeMessage contains the information a user needs to complete authentication.
-type DeviceCodeMessage struct {
- // UserCode is the user code returned by the service.
- UserCode string `json:"user_code"`
- // VerificationURL is the URL at which the user must authenticate.
- VerificationURL string `json:"verification_uri"`
- // Message is user instruction from Microsoft Entra ID.
- Message string `json:"message"`
-}
-
-// DeviceCodeCredential acquires tokens for a user via the device code flow, which has the
-// user browse to a Microsoft Entra URL, enter a code, and authenticate. It's useful
-// for authenticating a user in an environment without a web browser, such as an SSH session.
-// If a web browser is available, [InteractiveBrowserCredential] is more convenient because it
-// automatically opens a browser to the login page.
-type DeviceCodeCredential struct {
- client *publicClient
-}
-
-// NewDeviceCodeCredential creates a DeviceCodeCredential. Pass nil to accept default options.
-func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeCredential, error) {
- cp := DeviceCodeCredentialOptions{}
- if options != nil {
- cp = *options
- }
- cp.init()
- msalOpts := publicClientOptions{
- AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants,
- ClientOptions: cp.ClientOptions,
- DeviceCodePrompt: cp.UserPrompt,
- DisableAutomaticAuthentication: cp.disableAutomaticAuthentication,
- DisableInstanceDiscovery: cp.DisableInstanceDiscovery,
- Record: cp.authenticationRecord,
- TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions,
- }
- c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameDeviceCode, msalOpts)
- if err != nil {
- return nil, err
- }
- c.name = credNameDeviceCode
- return &DeviceCodeCredential{client: c}, nil
-}
-
-// Authenticate a user via the device code flow. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord.
-func (c *DeviceCodeCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) {
- var err error
- ctx, endSpan := runtime.StartSpan(ctx, credNameDeviceCode+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil)
- defer func() { endSpan(err) }()
- tk, err := c.client.Authenticate(ctx, opts)
- return tk, err
-}
-
-// GetToken requests an access token from Microsoft Entra ID. It will begin the device code flow and poll until the user completes authentication.
-// This method is called automatically by Azure SDK clients.
-func (c *DeviceCodeCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- var err error
- ctx, endSpan := runtime.StartSpan(ctx, credNameDeviceCode+"."+traceOpGetToken, c.client.azClient.Tracer(), nil)
- defer func() { endSpan(err) }()
- tk, err := c.client.GetToken(ctx, opts)
- return tk, err
-}
-
-var _ azcore.TokenCredential = (*DeviceCodeCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go
deleted file mode 100644
index b30f5474..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go
+++ /dev/null
@@ -1,167 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-import (
- "context"
- "errors"
- "fmt"
- "os"
- "strings"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
-)
-
-const envVarSendCertChain = "AZURE_CLIENT_SEND_CERTIFICATE_CHAIN"
-
-// EnvironmentCredentialOptions contains optional parameters for EnvironmentCredential
-type EnvironmentCredentialOptions struct {
- azcore.ClientOptions
-
- // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
- // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
- // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
- // the application responsible for ensuring the configured authority is valid and trustworthy.
- DisableInstanceDiscovery bool
- // additionallyAllowedTenants is used only by NewDefaultAzureCredential() to enable that constructor's explicit
- // option to override the value of AZURE_ADDITIONALLY_ALLOWED_TENANTS. Applications using EnvironmentCredential
- // directly should set that variable instead. This field should remain unexported to preserve this credential's
- // unambiguous "all configuration from environment variables" design.
- additionallyAllowedTenants []string
-}
-
-// EnvironmentCredential authenticates a service principal with a secret or certificate, or a user with a password, depending
-// on environment variable configuration. It reads configuration from these variables, in the following order:
-//
-// # Service principal with client secret
-//
-// AZURE_TENANT_ID: ID of the service principal's tenant. Also called its "directory" ID.
-//
-// AZURE_CLIENT_ID: the service principal's client ID
-//
-// AZURE_CLIENT_SECRET: one of the service principal's client secrets
-//
-// # Service principal with certificate
-//
-// AZURE_TENANT_ID: ID of the service principal's tenant. Also called its "directory" ID.
-//
-// AZURE_CLIENT_ID: the service principal's client ID
-//
-// AZURE_CLIENT_CERTIFICATE_PATH: path to a PEM or PKCS12 certificate file including the private key.
-//
-// AZURE_CLIENT_CERTIFICATE_PASSWORD: (optional) password for the certificate file.
-//
-// Note that this credential uses [ParseCertificates] to load the certificate and key from the file. If this
-// function isn't able to parse your certificate, use [ClientCertificateCredential] instead.
-//
-// # User with username and password
-//
-// AZURE_TENANT_ID: (optional) tenant to authenticate in. Defaults to "organizations".
-//
-// AZURE_CLIENT_ID: client ID of the application the user will authenticate to
-//
-// AZURE_USERNAME: a username (usually an email address)
-//
-// AZURE_PASSWORD: the user's password
-//
-// # Configuration for multitenant applications
-//
-// To enable multitenant authentication, set AZURE_ADDITIONALLY_ALLOWED_TENANTS with a semicolon delimited list of tenants
-// the credential may request tokens from in addition to the tenant specified by AZURE_TENANT_ID. Set
-// AZURE_ADDITIONALLY_ALLOWED_TENANTS to "*" to enable the credential to request a token from any tenant.
-type EnvironmentCredential struct {
- cred azcore.TokenCredential
-}
-
-// NewEnvironmentCredential creates an EnvironmentCredential. Pass nil to accept default options.
-func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*EnvironmentCredential, error) {
- if options == nil {
- options = &EnvironmentCredentialOptions{}
- }
- tenantID := os.Getenv(azureTenantID)
- if tenantID == "" {
- return nil, errors.New("missing environment variable AZURE_TENANT_ID")
- }
- clientID := os.Getenv(azureClientID)
- if clientID == "" {
- return nil, errors.New("missing environment variable " + azureClientID)
- }
- // tenants set by NewDefaultAzureCredential() override the value of AZURE_ADDITIONALLY_ALLOWED_TENANTS
- additionalTenants := options.additionallyAllowedTenants
- if len(additionalTenants) == 0 {
- if tenants := os.Getenv(azureAdditionallyAllowedTenants); tenants != "" {
- additionalTenants = strings.Split(tenants, ";")
- }
- }
- if clientSecret := os.Getenv(azureClientSecret); clientSecret != "" {
- log.Write(EventAuthentication, "EnvironmentCredential will authenticate with ClientSecretCredential")
- o := &ClientSecretCredentialOptions{
- AdditionallyAllowedTenants: additionalTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- }
- cred, err := NewClientSecretCredential(tenantID, clientID, clientSecret, o)
- if err != nil {
- return nil, err
- }
- return &EnvironmentCredential{cred: cred}, nil
- }
- if certPath := os.Getenv(azureClientCertificatePath); certPath != "" {
- log.Write(EventAuthentication, "EnvironmentCredential will authenticate with ClientCertificateCredential")
- certData, err := os.ReadFile(certPath)
- if err != nil {
- return nil, fmt.Errorf(`failed to read certificate file "%s": %v`, certPath, err)
- }
- var password []byte
- if v := os.Getenv(azureClientCertificatePassword); v != "" {
- password = []byte(v)
- }
- certs, key, err := ParseCertificates(certData, password)
- if err != nil {
- return nil, fmt.Errorf("failed to parse %q due to error %q. This may be due to a limitation of this module's certificate loader. Consider calling NewClientCertificateCredential instead", certPath, err.Error())
- }
- o := &ClientCertificateCredentialOptions{
- AdditionallyAllowedTenants: additionalTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- }
- if v, ok := os.LookupEnv(envVarSendCertChain); ok {
- o.SendCertificateChain = v == "1" || strings.ToLower(v) == "true"
- }
- cred, err := NewClientCertificateCredential(tenantID, clientID, certs, key, o)
- if err != nil {
- return nil, err
- }
- return &EnvironmentCredential{cred: cred}, nil
- }
- if username := os.Getenv(azureUsername); username != "" {
- if password := os.Getenv(azurePassword); password != "" {
- log.Write(EventAuthentication, "EnvironmentCredential will authenticate with UsernamePasswordCredential")
- o := &UsernamePasswordCredentialOptions{
- AdditionallyAllowedTenants: additionalTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- }
- cred, err := NewUsernamePasswordCredential(tenantID, clientID, username, password, o)
- if err != nil {
- return nil, err
- }
- return &EnvironmentCredential{cred: cred}, nil
- }
- return nil, errors.New("no value for AZURE_PASSWORD")
- }
- return nil, errors.New("incomplete environment variable configuration. Only AZURE_TENANT_ID and AZURE_CLIENT_ID are set")
-}
-
-// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients.
-func (c *EnvironmentCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- return c.cred.GetToken(ctx, opts)
-}
-
-var _ azcore.TokenCredential = (*EnvironmentCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go
deleted file mode 100644
index 698650bb..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go
+++ /dev/null
@@ -1,168 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo"
- msal "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
-)
-
-// getResponseFromError retrieves the response carried by
-// an AuthenticationFailedError or MSAL CallErr, if any
-func getResponseFromError(err error) *http.Response {
- var a *AuthenticationFailedError
- var c msal.CallErr
- var res *http.Response
- if errors.As(err, &c) {
- res = c.Resp
- } else if errors.As(err, &a) {
- res = a.RawResponse
- }
- return res
-}
-
-// AuthenticationFailedError indicates an authentication request has failed.
-type AuthenticationFailedError struct {
- // RawResponse is the HTTP response motivating the error, if available.
- RawResponse *http.Response
-
- credType string
- message string
- err error
-}
-
-func newAuthenticationFailedError(credType string, message string, resp *http.Response, err error) error {
- return &AuthenticationFailedError{credType: credType, message: message, RawResponse: resp, err: err}
-}
-
-// Error implements the error interface. Note that the message contents are not contractual and can change over time.
-func (e *AuthenticationFailedError) Error() string {
- if e.RawResponse == nil {
- return e.credType + ": " + e.message
- }
- msg := &bytes.Buffer{}
- fmt.Fprintf(msg, "%s authentication failed. %s\n", e.credType, e.message)
- if e.RawResponse.Request != nil {
- fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path)
- } else {
- // this happens when the response is created from a custom HTTP transporter,
- // which doesn't guarantee to bind the original request to the response
- fmt.Fprintln(msg, "Request information not available")
- }
- fmt.Fprintln(msg, "--------------------------------------------------------------------------------")
- fmt.Fprintf(msg, "RESPONSE %s\n", e.RawResponse.Status)
- fmt.Fprintln(msg, "--------------------------------------------------------------------------------")
- body, err := runtime.Payload(e.RawResponse)
- switch {
- case err != nil:
- fmt.Fprintf(msg, "Error reading response body: %v", err)
- case len(body) > 0:
- if err := json.Indent(msg, body, "", " "); err != nil {
- // failed to pretty-print so just dump it verbatim
- fmt.Fprint(msg, string(body))
- }
- default:
- fmt.Fprint(msg, "Response contained no body")
- }
- fmt.Fprintln(msg, "\n--------------------------------------------------------------------------------")
- var anchor string
- switch e.credType {
- case credNameAzureCLI:
- anchor = "azure-cli"
- case credNameAzureDeveloperCLI:
- anchor = "azd"
- case credNameCert:
- anchor = "client-cert"
- case credNameSecret:
- anchor = "client-secret"
- case credNameManagedIdentity:
- anchor = "managed-id"
- case credNameUserPassword:
- anchor = "username-password"
- case credNameWorkloadIdentity:
- anchor = "workload"
- }
- if anchor != "" {
- fmt.Fprintf(msg, "To troubleshoot, visit https://aka.ms/azsdk/go/identity/troubleshoot#%s", anchor)
- }
- return msg.String()
-}
-
-// NonRetriable indicates the request which provoked this error shouldn't be retried.
-func (*AuthenticationFailedError) NonRetriable() {
- // marker method
-}
-
-var _ errorinfo.NonRetriable = (*AuthenticationFailedError)(nil)
-
-// authenticationRequiredError indicates a credential's Authenticate method must be called to acquire a token
-// because the credential requires user interaction and is configured not to request it automatically.
-type authenticationRequiredError struct {
- credentialUnavailableError
-
- // TokenRequestOptions for the required token. Pass this to the credential's Authenticate method.
- TokenRequestOptions policy.TokenRequestOptions
-}
-
-func newauthenticationRequiredError(credType string, tro policy.TokenRequestOptions) error {
- return &authenticationRequiredError{
- credentialUnavailableError: credentialUnavailableError{
- credType + " can't acquire a token without user interaction. Call Authenticate to authenticate a user interactively",
- },
- TokenRequestOptions: tro,
- }
-}
-
-var (
- _ credentialUnavailable = (*authenticationRequiredError)(nil)
- _ errorinfo.NonRetriable = (*authenticationRequiredError)(nil)
-)
-
-type credentialUnavailable interface {
- error
- credentialUnavailable()
-}
-
-type credentialUnavailableError struct {
- message string
-}
-
-// newCredentialUnavailableError is an internal helper that ensures consistent error message formatting
-func newCredentialUnavailableError(credType, message string) error {
- msg := fmt.Sprintf("%s: %s", credType, message)
- return &credentialUnavailableError{msg}
-}
-
-// NewCredentialUnavailableError constructs an error indicating a credential can't attempt authentication
-// because it lacks required data or state. When [ChainedTokenCredential] receives this error it will try
-// its next credential, if any.
-func NewCredentialUnavailableError(message string) error {
- return &credentialUnavailableError{message}
-}
-
-// Error implements the error interface. Note that the message contents are not contractual and can change over time.
-func (e *credentialUnavailableError) Error() string {
- return e.message
-}
-
-// NonRetriable is a marker method indicating this error should not be retried. It has no implementation.
-func (*credentialUnavailableError) NonRetriable() {}
-
-func (*credentialUnavailableError) credentialUnavailable() {}
-
-var (
- _ credentialUnavailable = (*credentialUnavailableError)(nil)
- _ errorinfo.NonRetriable = (*credentialUnavailableError)(nil)
-)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work
deleted file mode 100644
index 04ea962b..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work
+++ /dev/null
@@ -1,6 +0,0 @@
-go 1.18
-
-use (
- .
- ./cache
-)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum
deleted file mode 100644
index c592f283..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum
+++ /dev/null
@@ -1,60 +0,0 @@
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1 h1:ODs3brnqQM99Tq1PffODpAViYv3Bf8zOg464MU7p5ew=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/keybase/dbus v0.0.0-20220506165403-5aa21ea2c23a/go.mod h1:YPNKjjE7Ubp9dTbnWvsP3HT+hYnY6TfXzubYTBeUxc8=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
-github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
-github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
-golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
-golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
-golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
-golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
-golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
-golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
-golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
-golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
-golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
-golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
-golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
-golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
-golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
-golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
-golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
-golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
-golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
-golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
-golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
-golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
-golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
-golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go
deleted file mode 100644
index 056785a8..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go
+++ /dev/null
@@ -1,118 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-import (
- "context"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
-)
-
-const credNameBrowser = "InteractiveBrowserCredential"
-
-// InteractiveBrowserCredentialOptions contains optional parameters for InteractiveBrowserCredential.
-type InteractiveBrowserCredentialOptions struct {
- azcore.ClientOptions
-
- // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire
- // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant.
- AdditionallyAllowedTenants []string
-
- // authenticationRecord returned by a call to a credential's Authenticate method. Set this option
- // to enable the credential to use data from a previous authentication.
- authenticationRecord authenticationRecord
-
- // ClientID is the ID of the application users will authenticate to.
- // Defaults to the ID of an Azure development application.
- ClientID string
-
- // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate.
- // When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary
- // to acquire a token.
- disableAutomaticAuthentication bool
-
- // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
- // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
- // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
- // the application responsible for ensuring the configured authority is valid and trustworthy.
- DisableInstanceDiscovery bool
-
- // LoginHint pre-populates the account prompt with a username. Users may choose to authenticate a different account.
- LoginHint string
-
- // RedirectURL is the URL Microsoft Entra ID will redirect to with the access token. This is required
- // only when setting ClientID, and must match a redirect URI in the application's registration.
- // Applications which have registered "http://localhost" as a redirect URI need not set this option.
- RedirectURL string
-
- // TenantID is the Microsoft Entra tenant the credential authenticates in. Defaults to the
- // "organizations" tenant, which can authenticate work and school accounts.
- TenantID string
-
- // tokenCachePersistenceOptions enables persistent token caching when not nil.
- tokenCachePersistenceOptions *tokenCachePersistenceOptions
-}
-
-func (o *InteractiveBrowserCredentialOptions) init() {
- if o.TenantID == "" {
- o.TenantID = organizationsTenantID
- }
- if o.ClientID == "" {
- o.ClientID = developerSignOnClientID
- }
-}
-
-// InteractiveBrowserCredential opens a browser to interactively authenticate a user.
-type InteractiveBrowserCredential struct {
- client *publicClient
-}
-
-// NewInteractiveBrowserCredential constructs a new InteractiveBrowserCredential. Pass nil to accept default options.
-func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOptions) (*InteractiveBrowserCredential, error) {
- cp := InteractiveBrowserCredentialOptions{}
- if options != nil {
- cp = *options
- }
- cp.init()
- msalOpts := publicClientOptions{
- AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants,
- ClientOptions: cp.ClientOptions,
- DisableAutomaticAuthentication: cp.disableAutomaticAuthentication,
- DisableInstanceDiscovery: cp.DisableInstanceDiscovery,
- LoginHint: cp.LoginHint,
- Record: cp.authenticationRecord,
- RedirectURL: cp.RedirectURL,
- TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions,
- }
- c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameBrowser, msalOpts)
- if err != nil {
- return nil, err
- }
- return &InteractiveBrowserCredential{client: c}, nil
-}
-
-// Authenticate a user via the default browser. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord.
-func (c *InteractiveBrowserCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) {
- var err error
- ctx, endSpan := runtime.StartSpan(ctx, credNameBrowser+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil)
- defer func() { endSpan(err) }()
- tk, err := c.client.Authenticate(ctx, opts)
- return tk, err
-}
-
-// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients.
-func (c *InteractiveBrowserCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- var err error
- ctx, endSpan := runtime.StartSpan(ctx, credNameBrowser+"."+traceOpGetToken, c.client.azClient.Tracer(), nil)
- defer func() { endSpan(err) }()
- tk, err := c.client.GetToken(ctx, opts)
- return tk, err
-}
-
-var _ azcore.TokenCredential = (*InteractiveBrowserCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go
deleted file mode 100644
index b1b4d5c8..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go
+++ /dev/null
@@ -1,18 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package internal
-
-// TokenCachePersistenceOptions contains options for persistent token caching
-type TokenCachePersistenceOptions struct {
- // AllowUnencryptedStorage controls whether the cache should fall back to storing its data in plain text
- // when encryption isn't possible. Setting this true doesn't disable encryption. The cache always attempts
- // encryption before falling back to plaintext storage.
- AllowUnencryptedStorage bool
-
- // Name identifies the cache. Set this to isolate data from other applications.
- Name string
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go
deleted file mode 100644
index c1498b46..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go
+++ /dev/null
@@ -1,31 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package internal
-
-import (
- "errors"
-
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache"
-)
-
-var errMissingImport = errors.New("import github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache to enable persistent caching")
-
-// NewCache constructs a persistent token cache when "o" isn't nil. Applications that intend to
-// use a persistent cache must first import the cache module, which will replace this function
-// with a platform-specific implementation.
-var NewCache = func(o *TokenCachePersistenceOptions, enableCAE bool) (cache.ExportReplace, error) {
- if o == nil {
- return nil, nil
- }
- return nil, errMissingImport
-}
-
-// CacheFilePath returns the path to the cache file for the given name.
-// Defining it in this package makes it available to azidentity tests.
-var CacheFilePath = func(name string) (string, error) {
- return "", errMissingImport
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go
deleted file mode 100644
index 1aa1e0fc..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/logging.go
+++ /dev/null
@@ -1,14 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-import "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
-
-// EventAuthentication entries contain information about authentication.
-// This includes information like the names of environment variables
-// used when obtaining credentials and the type of credential used.
-const EventAuthentication log.Event = "Authentication"
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json
deleted file mode 100644
index 1c379177..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "include": [
- {
- "Agent": {
- "msi_image": {
- "ArmTemplateParameters": "@{deployResources = $true}",
- "OSVmImage": "env:LINUXNEXTVMIMAGE",
- "Pool": "env:LINUXPOOL"
- }
- },
- "GoVersion": [
- "1.22.1"
- ],
- "IDENTITY_IMDS_AVAILABLE": "1"
- }
- ]
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go
deleted file mode 100644
index 6122cc70..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go
+++ /dev/null
@@ -1,501 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "net/url"
- "os"
- "path/filepath"
- "runtime"
- "strconv"
- "strings"
- "time"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
-)
-
-const (
- arcIMDSEndpoint = "IMDS_ENDPOINT"
- defaultIdentityClientID = "DEFAULT_IDENTITY_CLIENT_ID"
- identityEndpoint = "IDENTITY_ENDPOINT"
- identityHeader = "IDENTITY_HEADER"
- identityServerThumbprint = "IDENTITY_SERVER_THUMBPRINT"
- headerMetadata = "Metadata"
- imdsEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token"
- miResID = "mi_res_id"
- msiEndpoint = "MSI_ENDPOINT"
- msiResID = "msi_res_id"
- msiSecret = "MSI_SECRET"
- imdsAPIVersion = "2018-02-01"
- azureArcAPIVersion = "2019-08-15"
- qpClientID = "client_id"
- serviceFabricAPIVersion = "2019-07-01-preview"
-)
-
-var imdsProbeTimeout = time.Second
-
-type msiType int
-
-const (
- msiTypeAppService msiType = iota
- msiTypeAzureArc
- msiTypeAzureML
- msiTypeCloudShell
- msiTypeIMDS
- msiTypeServiceFabric
-)
-
-type managedIdentityClient struct {
- azClient *azcore.Client
- endpoint string
- id ManagedIDKind
- msiType msiType
- probeIMDS bool
-}
-
-// arcKeyDirectory returns the directory expected to contain Azure Arc keys
-var arcKeyDirectory = func() (string, error) {
- switch runtime.GOOS {
- case "linux":
- return "/var/opt/azcmagent/tokens", nil
- case "windows":
- pd := os.Getenv("ProgramData")
- if pd == "" {
- return "", errors.New("environment variable ProgramData has no value")
- }
- return filepath.Join(pd, "AzureConnectedMachineAgent", "Tokens"), nil
- default:
- return "", fmt.Errorf("unsupported OS %q", runtime.GOOS)
- }
-}
-
-type wrappedNumber json.Number
-
-func (n *wrappedNumber) UnmarshalJSON(b []byte) error {
- c := string(b)
- if c == "\"\"" {
- return nil
- }
- return json.Unmarshal(b, (*json.Number)(n))
-}
-
-// setIMDSRetryOptionDefaults sets zero-valued fields to default values appropriate for IMDS
-func setIMDSRetryOptionDefaults(o *policy.RetryOptions) {
- if o.MaxRetries == 0 {
- o.MaxRetries = 5
- }
- if o.MaxRetryDelay == 0 {
- o.MaxRetryDelay = 1 * time.Minute
- }
- if o.RetryDelay == 0 {
- o.RetryDelay = 2 * time.Second
- }
- if o.StatusCodes == nil {
- o.StatusCodes = []int{
- // IMDS docs recommend retrying 404, 410, 429 and 5xx
- // https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/how-to-use-vm-token#error-handling
- http.StatusNotFound, // 404
- http.StatusGone, // 410
- http.StatusTooManyRequests, // 429
- http.StatusInternalServerError, // 500
- http.StatusNotImplemented, // 501
- http.StatusBadGateway, // 502
- http.StatusServiceUnavailable, // 503
- http.StatusGatewayTimeout, // 504
- http.StatusHTTPVersionNotSupported, // 505
- http.StatusVariantAlsoNegotiates, // 506
- http.StatusInsufficientStorage, // 507
- http.StatusLoopDetected, // 508
- http.StatusNotExtended, // 510
- http.StatusNetworkAuthenticationRequired, // 511
- }
- }
- if o.TryTimeout == 0 {
- o.TryTimeout = 1 * time.Minute
- }
-}
-
-// newManagedIdentityClient creates a new instance of the ManagedIdentityClient with the ManagedIdentityCredentialOptions
-// that are passed into it along with a default pipeline.
-// options: ManagedIdentityCredentialOptions configure policies for the pipeline and the authority host that
-// will be used to retrieve tokens and authenticate
-func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*managedIdentityClient, error) {
- if options == nil {
- options = &ManagedIdentityCredentialOptions{}
- }
- cp := options.ClientOptions
- c := managedIdentityClient{id: options.ID, endpoint: imdsEndpoint, msiType: msiTypeIMDS}
- env := "IMDS"
- if endpoint, ok := os.LookupEnv(identityEndpoint); ok {
- if _, ok := os.LookupEnv(identityHeader); ok {
- if _, ok := os.LookupEnv(identityServerThumbprint); ok {
- env = "Service Fabric"
- c.endpoint = endpoint
- c.msiType = msiTypeServiceFabric
- } else {
- env = "App Service"
- c.endpoint = endpoint
- c.msiType = msiTypeAppService
- }
- } else if _, ok := os.LookupEnv(arcIMDSEndpoint); ok {
- env = "Azure Arc"
- c.endpoint = endpoint
- c.msiType = msiTypeAzureArc
- }
- } else if endpoint, ok := os.LookupEnv(msiEndpoint); ok {
- c.endpoint = endpoint
- if _, ok := os.LookupEnv(msiSecret); ok {
- env = "Azure ML"
- c.msiType = msiTypeAzureML
- } else {
- env = "Cloud Shell"
- c.msiType = msiTypeCloudShell
- }
- } else {
- c.probeIMDS = options.dac
- setIMDSRetryOptionDefaults(&cp.Retry)
- }
-
- client, err := azcore.NewClient(module, version, azruntime.PipelineOptions{
- Tracing: azruntime.TracingOptions{
- Namespace: traceNamespace,
- },
- }, &cp)
- if err != nil {
- return nil, err
- }
- c.azClient = client
-
- if log.Should(EventAuthentication) {
- log.Writef(EventAuthentication, "Managed Identity Credential will use %s managed identity", env)
- }
-
- return &c, nil
-}
-
-// provideToken acquires a token for MSAL's confidential.Client, which caches the token
-func (c *managedIdentityClient) provideToken(ctx context.Context, params confidential.TokenProviderParameters) (confidential.TokenProviderResult, error) {
- result := confidential.TokenProviderResult{}
- tk, err := c.authenticate(ctx, c.id, params.Scopes)
- if err == nil {
- result.AccessToken = tk.Token
- result.ExpiresInSeconds = int(time.Until(tk.ExpiresOn).Seconds())
- }
- return result, err
-}
-
-// authenticate acquires an access token
-func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKind, scopes []string) (azcore.AccessToken, error) {
- // no need to synchronize around this value because it's true only when DefaultAzureCredential constructed the client,
- // and in that case ChainedTokenCredential.GetToken synchronizes goroutines that would execute this block
- if c.probeIMDS {
- cx, cancel := context.WithTimeout(ctx, imdsProbeTimeout)
- defer cancel()
- cx = policy.WithRetryOptions(cx, policy.RetryOptions{MaxRetries: -1})
- req, err := azruntime.NewRequest(cx, http.MethodGet, c.endpoint)
- if err == nil {
- _, err = c.azClient.Pipeline().Do(req)
- }
- if err != nil {
- msg := err.Error()
- if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
- msg = "managed identity timed out. See https://aka.ms/azsdk/go/identity/troubleshoot#dac for more information"
- }
- return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, msg)
- }
- // send normal token requests from now on because something responded
- c.probeIMDS = false
- }
-
- msg, err := c.createAuthRequest(ctx, id, scopes)
- if err != nil {
- return azcore.AccessToken{}, err
- }
-
- resp, err := c.azClient.Pipeline().Do(msg)
- if err != nil {
- return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil, err)
- }
-
- if azruntime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) {
- return c.createAccessToken(resp)
- }
-
- if c.msiType == msiTypeIMDS {
- switch resp.StatusCode {
- case http.StatusBadRequest:
- if id != nil {
- return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp, nil)
- }
- msg := "failed to authenticate a system assigned identity"
- if body, err := azruntime.Payload(resp); err == nil && len(body) > 0 {
- msg += fmt.Sprintf(". The endpoint responded with %s", body)
- }
- return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, msg)
- case http.StatusForbidden:
- // Docker Desktop runs a proxy that responds 403 to IMDS token requests. If we get that response,
- // we return credentialUnavailableError so credential chains continue to their next credential
- body, err := azruntime.Payload(resp)
- if err == nil && strings.Contains(string(body), "unreachable") {
- return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("unexpected response %q", string(body)))
- }
- }
- }
-
- return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "authentication failed", resp, nil)
-}
-
-func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.AccessToken, error) {
- value := struct {
- // these are the only fields that we use
- Token string `json:"access_token,omitempty"`
- RefreshToken string `json:"refresh_token,omitempty"`
- ExpiresIn wrappedNumber `json:"expires_in,omitempty"` // this field should always return the number of seconds for which a token is valid
- ExpiresOn interface{} `json:"expires_on,omitempty"` // the value returned in this field varies between a number and a date string
- }{}
- if err := azruntime.UnmarshalAsJSON(res, &value); err != nil {
- return azcore.AccessToken{}, fmt.Errorf("internal AccessToken: %v", err)
- }
- if value.ExpiresIn != "" {
- expiresIn, err := json.Number(value.ExpiresIn).Int64()
- if err != nil {
- return azcore.AccessToken{}, err
- }
- return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Now().Add(time.Second * time.Duration(expiresIn)).UTC()}, nil
- }
- switch v := value.ExpiresOn.(type) {
- case float64:
- return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(v), 0).UTC()}, nil
- case string:
- if expiresOn, err := strconv.Atoi(v); err == nil {
- return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(expiresOn), 0).UTC()}, nil
- }
- return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "unexpected expires_on value: "+v, res, nil)
- default:
- msg := fmt.Sprintf("unsupported type received in expires_on: %T, %v", v, v)
- return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, msg, res, nil)
- }
-}
-
-func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
- switch c.msiType {
- case msiTypeIMDS:
- return c.createIMDSAuthRequest(ctx, id, scopes)
- case msiTypeAppService:
- return c.createAppServiceAuthRequest(ctx, id, scopes)
- case msiTypeAzureArc:
- // need to perform preliminary request to retreive the secret key challenge provided by the HIMDS service
- key, err := c.getAzureArcSecretKey(ctx, scopes)
- if err != nil {
- msg := fmt.Sprintf("failed to retreive secret key from the identity endpoint: %v", err)
- return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil, err)
- }
- return c.createAzureArcAuthRequest(ctx, id, scopes, key)
- case msiTypeAzureML:
- return c.createAzureMLAuthRequest(ctx, id, scopes)
- case msiTypeServiceFabric:
- return c.createServiceFabricAuthRequest(ctx, id, scopes)
- case msiTypeCloudShell:
- return c.createCloudShellAuthRequest(ctx, id, scopes)
- default:
- return nil, newCredentialUnavailableError(credNameManagedIdentity, "managed identity isn't supported in this environment")
- }
-}
-
-func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return nil, err
- }
- request.Raw().Header.Set(headerMetadata, "true")
- q := request.Raw().URL.Query()
- q.Add("api-version", imdsAPIVersion)
- q.Add("resource", strings.Join(scopes, " "))
- if id != nil {
- if id.idKind() == miResourceID {
- q.Add(msiResID, id.String())
- } else {
- q.Add(qpClientID, id.String())
- }
- }
- request.Raw().URL.RawQuery = q.Encode()
- return request, nil
-}
-
-func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return nil, err
- }
- request.Raw().Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeader))
- q := request.Raw().URL.Query()
- q.Add("api-version", "2019-08-01")
- q.Add("resource", scopes[0])
- if id != nil {
- if id.idKind() == miResourceID {
- q.Add(miResID, id.String())
- } else {
- q.Add(qpClientID, id.String())
- }
- }
- request.Raw().URL.RawQuery = q.Encode()
- return request, nil
-}
-
-func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return nil, err
- }
- request.Raw().Header.Set("secret", os.Getenv(msiSecret))
- q := request.Raw().URL.Query()
- q.Add("api-version", "2017-09-01")
- q.Add("resource", strings.Join(scopes, " "))
- q.Add("clientid", os.Getenv(defaultIdentityClientID))
- if id != nil {
- if id.idKind() == miResourceID {
- log.Write(EventAuthentication, "WARNING: Azure ML doesn't support specifying a managed identity by resource ID")
- q.Set("clientid", "")
- q.Set(miResID, id.String())
- } else {
- q.Set("clientid", id.String())
- }
- }
- request.Raw().URL.RawQuery = q.Encode()
- return request, nil
-}
-
-func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return nil, err
- }
- q := request.Raw().URL.Query()
- request.Raw().Header.Set("Accept", "application/json")
- request.Raw().Header.Set("Secret", os.Getenv(identityHeader))
- q.Add("api-version", serviceFabricAPIVersion)
- q.Add("resource", strings.Join(scopes, " "))
- if id != nil {
- log.Write(EventAuthentication, "WARNING: Service Fabric doesn't support selecting a user-assigned identity at runtime")
- if id.idKind() == miResourceID {
- q.Add(miResID, id.String())
- } else {
- q.Add(qpClientID, id.String())
- }
- }
- request.Raw().URL.RawQuery = q.Encode()
- return request, nil
-}
-
-func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resources []string) (string, error) {
- // create the request to retreive the secret key challenge provided by the HIMDS service
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return "", err
- }
- request.Raw().Header.Set(headerMetadata, "true")
- q := request.Raw().URL.Query()
- q.Add("api-version", azureArcAPIVersion)
- q.Add("resource", strings.Join(resources, " "))
- request.Raw().URL.RawQuery = q.Encode()
- // send the initial request to get the short-lived secret key
- response, err := c.azClient.Pipeline().Do(request)
- if err != nil {
- return "", err
- }
- // the endpoint is expected to return a 401 with the WWW-Authenticate header set to the location
- // of the secret key file. Any other status code indicates an error in the request.
- if response.StatusCode != 401 {
- msg := fmt.Sprintf("expected a 401 response, received %d", response.StatusCode)
- return "", newAuthenticationFailedError(credNameManagedIdentity, msg, response, nil)
- }
- header := response.Header.Get("WWW-Authenticate")
- if len(header) == 0 {
- return "", newAuthenticationFailedError(credNameManagedIdentity, "HIMDS response has no WWW-Authenticate header", nil, nil)
- }
- // the WWW-Authenticate header is expected in the following format: Basic realm=/some/file/path.key
- _, p, found := strings.Cut(header, "=")
- if !found {
- return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected WWW-Authenticate header from HIMDS: "+header, nil, nil)
- }
- expected, err := arcKeyDirectory()
- if err != nil {
- return "", err
- }
- if filepath.Dir(p) != expected || !strings.HasSuffix(p, ".key") {
- return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected file path from HIMDS service: "+p, nil, nil)
- }
- f, err := os.Stat(p)
- if err != nil {
- return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not stat %q: %v", p, err), nil, nil)
- }
- if s := f.Size(); s > 4096 {
- return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("key is too large (%d bytes)", s), nil, nil)
- }
- key, err := os.ReadFile(p)
- if err != nil {
- return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not read %q: %v", p, err), nil, nil)
- }
- return string(key), nil
-}
-
-func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, id ManagedIDKind, resources []string, key string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return nil, err
- }
- request.Raw().Header.Set(headerMetadata, "true")
- request.Raw().Header.Set("Authorization", fmt.Sprintf("Basic %s", key))
- q := request.Raw().URL.Query()
- q.Add("api-version", azureArcAPIVersion)
- q.Add("resource", strings.Join(resources, " "))
- if id != nil {
- log.Write(EventAuthentication, "WARNING: Azure Arc doesn't support user-assigned managed identities")
- if id.idKind() == miResourceID {
- q.Add(miResID, id.String())
- } else {
- q.Add(qpClientID, id.String())
- }
- }
- request.Raw().URL.RawQuery = q.Encode()
- return request, nil
-}
-
-func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodPost, c.endpoint)
- if err != nil {
- return nil, err
- }
- request.Raw().Header.Set(headerMetadata, "true")
- data := url.Values{}
- data.Set("resource", strings.Join(scopes, " "))
- dataEncoded := data.Encode()
- body := streaming.NopCloser(strings.NewReader(dataEncoded))
- if err := request.SetBody(body, "application/x-www-form-urlencoded"); err != nil {
- return nil, err
- }
- if id != nil {
- log.Write(EventAuthentication, "WARNING: Cloud Shell doesn't support user-assigned managed identities")
- q := request.Raw().URL.Query()
- if id.idKind() == miResourceID {
- q.Add(miResID, id.String())
- } else {
- q.Add(qpClientID, id.String())
- }
- }
- return request, nil
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go
deleted file mode 100644
index 13c043d8..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go
+++ /dev/null
@@ -1,128 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-import (
- "context"
- "fmt"
- "strings"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
-)
-
-const credNameManagedIdentity = "ManagedIdentityCredential"
-
-type managedIdentityIDKind int
-
-const (
- miClientID managedIdentityIDKind = 0
- miResourceID managedIdentityIDKind = 1
-)
-
-// ManagedIDKind identifies the ID of a managed identity as either a client or resource ID
-type ManagedIDKind interface {
- fmt.Stringer
- idKind() managedIdentityIDKind
-}
-
-// ClientID is the client ID of a user-assigned managed identity.
-type ClientID string
-
-func (ClientID) idKind() managedIdentityIDKind {
- return miClientID
-}
-
-// String returns the string value of the ID.
-func (c ClientID) String() string {
- return string(c)
-}
-
-// ResourceID is the resource ID of a user-assigned managed identity.
-type ResourceID string
-
-func (ResourceID) idKind() managedIdentityIDKind {
- return miResourceID
-}
-
-// String returns the string value of the ID.
-func (r ResourceID) String() string {
- return string(r)
-}
-
-// ManagedIdentityCredentialOptions contains optional parameters for ManagedIdentityCredential.
-type ManagedIdentityCredentialOptions struct {
- azcore.ClientOptions
-
- // ID is the ID of a managed identity the credential should authenticate. Set this field to use a specific identity
- // instead of the hosting environment's default. The value may be the identity's client ID or resource ID, but note that
- // some platforms don't accept resource IDs.
- ID ManagedIDKind
-
- // dac indicates whether the credential is part of DefaultAzureCredential. When true, and the environment doesn't have
- // configuration for a specific managed identity API, the credential tries to determine whether IMDS is available before
- // sending its first token request. It does this by sending a malformed request with a short timeout. Any response to that
- // request is taken to mean IMDS is available, in which case the credential will send ordinary token requests thereafter
- // with no special timeout. The purpose of this behavior is to prevent a very long timeout when IMDS isn't available.
- dac bool
-}
-
-// ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities.
-// This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a
-// user-assigned identity. See Microsoft Entra ID documentation for more information about managed identities:
-// https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview
-type ManagedIdentityCredential struct {
- client *confidentialClient
- mic *managedIdentityClient
-}
-
-// NewManagedIdentityCredential creates a ManagedIdentityCredential. Pass nil to accept default options.
-func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*ManagedIdentityCredential, error) {
- if options == nil {
- options = &ManagedIdentityCredentialOptions{}
- }
- mic, err := newManagedIdentityClient(options)
- if err != nil {
- return nil, err
- }
- cred := confidential.NewCredFromTokenProvider(mic.provideToken)
-
- // It's okay to give MSAL an invalid client ID because MSAL will use it only as part of a cache key.
- // ManagedIdentityClient handles all the details of authentication and won't receive this value from MSAL.
- clientID := "SYSTEM-ASSIGNED-MANAGED-IDENTITY"
- if options.ID != nil {
- clientID = options.ID.String()
- }
- // similarly, it's okay to give MSAL an incorrect tenant because MSAL won't use the value
- c, err := newConfidentialClient("common", clientID, credNameManagedIdentity, cred, confidentialClientOptions{
- ClientOptions: options.ClientOptions,
- })
- if err != nil {
- return nil, err
- }
- return &ManagedIdentityCredential{client: c, mic: mic}, nil
-}
-
-// GetToken requests an access token from the hosting environment. This method is called automatically by Azure SDK clients.
-func (c *ManagedIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- var err error
- ctx, endSpan := runtime.StartSpan(ctx, credNameManagedIdentity+"."+traceOpGetToken, c.client.azClient.Tracer(), nil)
- defer func() { endSpan(err) }()
-
- if len(opts.Scopes) != 1 {
- err = fmt.Errorf("%s.GetToken() requires exactly one scope", credNameManagedIdentity)
- return azcore.AccessToken{}, err
- }
- // managed identity endpoints require a Microsoft Entra ID v1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here
- opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)}
- tk, err := c.client.GetToken(ctx, opts)
- return tk, err
-}
-
-var _ azcore.TokenCredential = (*ManagedIdentityCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go
deleted file mode 100644
index 9dcc82f0..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go
+++ /dev/null
@@ -1,113 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-import (
- "context"
- "crypto"
- "crypto/x509"
- "errors"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
-)
-
-const credNameOBO = "OnBehalfOfCredential"
-
-// OnBehalfOfCredential authenticates a service principal via the on-behalf-of flow. This is typically used by
-// middle-tier services that authorize requests to other services with a delegated user identity. Because this
-// is not an interactive authentication flow, an application using it must have admin consent for any delegated
-// permissions before requesting tokens for them. See [Microsoft Entra ID documentation] for more details.
-//
-// [Microsoft Entra ID documentation]: https://learn.microsoft.com/entra/identity-platform/v2-oauth2-on-behalf-of-flow
-type OnBehalfOfCredential struct {
- client *confidentialClient
-}
-
-// OnBehalfOfCredentialOptions contains optional parameters for OnBehalfOfCredential
-type OnBehalfOfCredentialOptions struct {
- azcore.ClientOptions
-
- // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens.
- // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the
- // application is registered.
- AdditionallyAllowedTenants []string
-
- // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
- // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
- // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
- // the application responsible for ensuring the configured authority is valid and trustworthy.
- DisableInstanceDiscovery bool
-
- // SendCertificateChain applies only when the credential is configured to authenticate with a certificate.
- // This setting controls whether the credential sends the public certificate chain in the x5c header of each
- // token request's JWT. This is required for, and only used in, Subject Name/Issuer (SNI) authentication.
- SendCertificateChain bool
-}
-
-// NewOnBehalfOfCredentialWithCertificate constructs an OnBehalfOfCredential that authenticates with a certificate.
-// See [ParseCertificates] for help loading a certificate.
-func NewOnBehalfOfCredentialWithCertificate(tenantID, clientID, userAssertion string, certs []*x509.Certificate, key crypto.PrivateKey, options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) {
- cred, err := confidential.NewCredFromCert(certs, key)
- if err != nil {
- return nil, err
- }
- return newOnBehalfOfCredential(tenantID, clientID, userAssertion, cred, options)
-}
-
-// NewOnBehalfOfCredentialWithClientAssertions constructs an OnBehalfOfCredential that authenticates with client assertions.
-// userAssertion is the user's access token for the application. The getAssertion function should return client assertions
-// that authenticate the application to Microsoft Entra ID, such as federated credentials.
-func NewOnBehalfOfCredentialWithClientAssertions(tenantID, clientID, userAssertion string, getAssertion func(context.Context) (string, error), options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) {
- if getAssertion == nil {
- return nil, errors.New("getAssertion can't be nil. It must be a function that returns client assertions")
- }
- cred := confidential.NewCredFromAssertionCallback(func(ctx context.Context, _ confidential.AssertionRequestOptions) (string, error) {
- return getAssertion(ctx)
- })
- return newOnBehalfOfCredential(tenantID, clientID, userAssertion, cred, options)
-}
-
-// NewOnBehalfOfCredentialWithSecret constructs an OnBehalfOfCredential that authenticates with a client secret.
-func NewOnBehalfOfCredentialWithSecret(tenantID, clientID, userAssertion, clientSecret string, options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) {
- cred, err := confidential.NewCredFromSecret(clientSecret)
- if err != nil {
- return nil, err
- }
- return newOnBehalfOfCredential(tenantID, clientID, userAssertion, cred, options)
-}
-
-func newOnBehalfOfCredential(tenantID, clientID, userAssertion string, cred confidential.Credential, options *OnBehalfOfCredentialOptions) (*OnBehalfOfCredential, error) {
- if options == nil {
- options = &OnBehalfOfCredentialOptions{}
- }
- opts := confidentialClientOptions{
- AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
- Assertion: userAssertion,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- SendX5C: options.SendCertificateChain,
- }
- c, err := newConfidentialClient(tenantID, clientID, credNameOBO, cred, opts)
- if err != nil {
- return nil, err
- }
- return &OnBehalfOfCredential{c}, nil
-}
-
-// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients.
-func (o *OnBehalfOfCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- var err error
- ctx, endSpan := runtime.StartSpan(ctx, credNameOBO+"."+traceOpGetToken, o.client.azClient.Tracer(), nil)
- defer func() { endSpan(err) }()
- tk, err := o.client.GetToken(ctx, opts)
- return tk, err
-}
-
-var _ azcore.TokenCredential = (*OnBehalfOfCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go
deleted file mode 100644
index b3d22dbf..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go
+++ /dev/null
@@ -1,273 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-import (
- "context"
- "errors"
- "fmt"
- "net/http"
- "strings"
- "sync"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
- "github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public"
-
- // this import ensures well-known configurations in azcore/cloud have ARM audiences for Authenticate()
- _ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime"
-)
-
-type publicClientOptions struct {
- azcore.ClientOptions
-
- AdditionallyAllowedTenants []string
- DeviceCodePrompt func(context.Context, DeviceCodeMessage) error
- DisableAutomaticAuthentication bool
- DisableInstanceDiscovery bool
- LoginHint, RedirectURL string
- Record authenticationRecord
- TokenCachePersistenceOptions *tokenCachePersistenceOptions
- Username, Password string
-}
-
-// publicClient wraps the MSAL public client
-type publicClient struct {
- cae, noCAE msalPublicClient
- caeMu, noCAEMu, clientMu *sync.Mutex
- clientID, tenantID string
- defaultScope []string
- host string
- name string
- opts publicClientOptions
- record authenticationRecord
- azClient *azcore.Client
-}
-
-var errScopeRequired = errors.New("authenticating in this environment requires specifying a scope in TokenRequestOptions")
-
-func newPublicClient(tenantID, clientID, name string, o publicClientOptions) (*publicClient, error) {
- if !validTenantID(tenantID) {
- return nil, errInvalidTenantID
- }
- host, err := setAuthorityHost(o.Cloud)
- if err != nil {
- return nil, err
- }
- // if the application specified a cloud configuration, use its ARM audience as the default scope for Authenticate()
- audience := o.Cloud.Services[cloud.ResourceManager].Audience
- if audience == "" {
- // no cloud configuration, or no ARM audience, specified; try to map the host to a well-known one (all of which have a trailing slash)
- if !strings.HasSuffix(host, "/") {
- host += "/"
- }
- switch host {
- case cloud.AzureChina.ActiveDirectoryAuthorityHost:
- audience = cloud.AzureChina.Services[cloud.ResourceManager].Audience
- case cloud.AzureGovernment.ActiveDirectoryAuthorityHost:
- audience = cloud.AzureGovernment.Services[cloud.ResourceManager].Audience
- case cloud.AzurePublic.ActiveDirectoryAuthorityHost:
- audience = cloud.AzurePublic.Services[cloud.ResourceManager].Audience
- }
- }
- // if we didn't come up with an audience, the application will have to specify a scope for Authenticate()
- var defaultScope []string
- if audience != "" {
- defaultScope = []string{audience + defaultSuffix}
- }
- client, err := azcore.NewClient(module, version, runtime.PipelineOptions{
- Tracing: runtime.TracingOptions{
- Namespace: traceNamespace,
- },
- }, &o.ClientOptions)
- if err != nil {
- return nil, err
- }
- o.AdditionallyAllowedTenants = resolveAdditionalTenants(o.AdditionallyAllowedTenants)
- return &publicClient{
- caeMu: &sync.Mutex{},
- clientID: clientID,
- clientMu: &sync.Mutex{},
- defaultScope: defaultScope,
- host: host,
- name: name,
- noCAEMu: &sync.Mutex{},
- opts: o,
- record: o.Record,
- tenantID: tenantID,
- azClient: client,
- }, nil
-}
-
-func (p *publicClient) Authenticate(ctx context.Context, tro *policy.TokenRequestOptions) (authenticationRecord, error) {
- if tro == nil {
- tro = &policy.TokenRequestOptions{}
- }
- if len(tro.Scopes) == 0 {
- if p.defaultScope == nil {
- return authenticationRecord{}, errScopeRequired
- }
- tro.Scopes = p.defaultScope
- }
- client, mu, err := p.client(*tro)
- if err != nil {
- return authenticationRecord{}, err
- }
- mu.Lock()
- defer mu.Unlock()
- _, err = p.reqToken(ctx, client, *tro)
- if err == nil {
- scope := strings.Join(tro.Scopes, ", ")
- msg := fmt.Sprintf("%s.Authenticate() acquired a token for scope %q", p.name, scope)
- log.Write(EventAuthentication, msg)
- }
- return p.record, err
-}
-
-// GetToken requests an access token from MSAL, checking the cache first.
-func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOptions) (azcore.AccessToken, error) {
- if len(tro.Scopes) < 1 {
- return azcore.AccessToken{}, fmt.Errorf("%s.GetToken() requires at least one scope", p.name)
- }
- tenant, err := p.resolveTenant(tro.TenantID)
- if err != nil {
- return azcore.AccessToken{}, err
- }
- client, mu, err := p.client(tro)
- if err != nil {
- return azcore.AccessToken{}, err
- }
- mu.Lock()
- defer mu.Unlock()
- ar, err := client.AcquireTokenSilent(ctx, tro.Scopes, public.WithSilentAccount(p.record.account()), public.WithClaims(tro.Claims), public.WithTenantID(tenant))
- if err == nil {
- return p.token(ar, err)
- }
- if p.opts.DisableAutomaticAuthentication {
- return azcore.AccessToken{}, newauthenticationRequiredError(p.name, tro)
- }
- at, err := p.reqToken(ctx, client, tro)
- if err == nil {
- msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", p.name, strings.Join(ar.GrantedScopes, ", "))
- log.Write(EventAuthentication, msg)
- }
- return at, err
-}
-
-// reqToken requests a token from the MSAL public client. It's separate from GetToken() to enable Authenticate() to bypass the cache.
-func (p *publicClient) reqToken(ctx context.Context, c msalPublicClient, tro policy.TokenRequestOptions) (azcore.AccessToken, error) {
- tenant, err := p.resolveTenant(tro.TenantID)
- if err != nil {
- return azcore.AccessToken{}, err
- }
- var ar public.AuthResult
- switch p.name {
- case credNameBrowser:
- ar, err = c.AcquireTokenInteractive(ctx, tro.Scopes,
- public.WithClaims(tro.Claims),
- public.WithLoginHint(p.opts.LoginHint),
- public.WithRedirectURI(p.opts.RedirectURL),
- public.WithTenantID(tenant),
- )
- case credNameDeviceCode:
- dc, e := c.AcquireTokenByDeviceCode(ctx, tro.Scopes, public.WithClaims(tro.Claims), public.WithTenantID(tenant))
- if e != nil {
- return azcore.AccessToken{}, e
- }
- err = p.opts.DeviceCodePrompt(ctx, DeviceCodeMessage{
- Message: dc.Result.Message,
- UserCode: dc.Result.UserCode,
- VerificationURL: dc.Result.VerificationURL,
- })
- if err == nil {
- ar, err = dc.AuthenticationResult(ctx)
- }
- case credNameUserPassword:
- ar, err = c.AcquireTokenByUsernamePassword(ctx, tro.Scopes, p.opts.Username, p.opts.Password, public.WithClaims(tro.Claims), public.WithTenantID(tenant))
- default:
- return azcore.AccessToken{}, fmt.Errorf("unknown credential %q", p.name)
- }
- return p.token(ar, err)
-}
-
-func (p *publicClient) client(tro policy.TokenRequestOptions) (msalPublicClient, *sync.Mutex, error) {
- p.clientMu.Lock()
- defer p.clientMu.Unlock()
- if tro.EnableCAE {
- if p.cae == nil {
- client, err := p.newMSALClient(true)
- if err != nil {
- return nil, nil, err
- }
- p.cae = client
- }
- return p.cae, p.caeMu, nil
- }
- if p.noCAE == nil {
- client, err := p.newMSALClient(false)
- if err != nil {
- return nil, nil, err
- }
- p.noCAE = client
- }
- return p.noCAE, p.noCAEMu, nil
-}
-
-func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) {
- cache, err := internal.NewCache(p.opts.TokenCachePersistenceOptions, enableCAE)
- if err != nil {
- return nil, err
- }
- o := []public.Option{
- public.WithAuthority(runtime.JoinPaths(p.host, p.tenantID)),
- public.WithCache(cache),
- public.WithHTTPClient(p),
- }
- if enableCAE {
- o = append(o, public.WithClientCapabilities(cp1))
- }
- if p.opts.DisableInstanceDiscovery || strings.ToLower(p.tenantID) == "adfs" {
- o = append(o, public.WithInstanceDiscovery(false))
- }
- return public.New(p.clientID, o...)
-}
-
-func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToken, error) {
- if err == nil {
- p.record, err = newAuthenticationRecord(ar)
- } else {
- res := getResponseFromError(err)
- err = newAuthenticationFailedError(p.name, err.Error(), res, err)
- }
- return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err
-}
-
-// resolveTenant returns the correct WithTenantID() argument for a token request given the client's
-// configuration, or an error when that configuration doesn't allow the specified tenant
-func (p *publicClient) resolveTenant(specified string) (string, error) {
- t, err := resolveTenant(p.tenantID, specified, p.name, p.opts.AdditionallyAllowedTenants)
- if t == p.tenantID {
- // callers pass this value to MSAL's WithTenantID(). There's no need to redundantly specify
- // the client's default tenant and doing so is an error when that tenant is "organizations"
- t = ""
- }
- return t, err
-}
-
-// these methods satisfy the MSAL ops.HTTPClient interface
-
-func (p *publicClient) CloseIdleConnections() {
- // do nothing
-}
-
-func (p *publicClient) Do(r *http.Request) (*http.Response, error) {
- return doForClient(p.azClient, r)
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1
deleted file mode 100644
index a69bbce3..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1
+++ /dev/null
@@ -1,112 +0,0 @@
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License.
-
-# IMPORTANT: Do not invoke this file directly. Please instead run eng/common/TestResources/New-TestResources.ps1 from the repository root.
-
-param (
- [hashtable] $AdditionalParameters = @{},
- [hashtable] $DeploymentOutputs
-)
-
-$ErrorActionPreference = 'Stop'
-$PSNativeCommandUseErrorActionPreference = $true
-
-if ($CI) {
- if (!$AdditionalParameters['deployResources']) {
- Write-Host "Skipping post-provisioning script because resources weren't deployed"
- return
- }
- az login --service-principal -u $DeploymentOutputs['AZIDENTITY_CLIENT_ID'] -p $DeploymentOutputs['AZIDENTITY_CLIENT_SECRET'] --tenant $DeploymentOutputs['AZIDENTITY_TENANT_ID']
- az account set --subscription $DeploymentOutputs['AZIDENTITY_SUBSCRIPTION_ID']
-}
-
-Write-Host "Building container"
-$image = "$($DeploymentOutputs['AZIDENTITY_ACR_LOGIN_SERVER'])/azidentity-managed-id-test"
-Set-Content -Path "$PSScriptRoot/Dockerfile" -Value @"
-FROM mcr.microsoft.com/oss/go/microsoft/golang:latest as builder
-ENV GOARCH=amd64 GOWORK=off
-COPY . /azidentity
-WORKDIR /azidentity/testdata/managed-id-test
-RUN go mod tidy
-RUN go build -o /build/managed-id-test .
-RUN GOOS=windows go build -o /build/managed-id-test.exe .
-
-FROM mcr.microsoft.com/mirror/docker/library/alpine:3.16
-RUN apk add gcompat
-COPY --from=builder /build/* .
-RUN chmod +x managed-id-test
-CMD ["./managed-id-test"]
-"@
-# build from sdk/azidentity because we need that dir in the context (because the test app uses local azidentity)
-docker build -t $image "$PSScriptRoot"
-az acr login -n $DeploymentOutputs['AZIDENTITY_ACR_NAME']
-docker push $image
-
-$rg = $DeploymentOutputs['AZIDENTITY_RESOURCE_GROUP']
-
-# ACI is easier to provision here than in the bicep file because the image isn't available before now
-Write-Host "Deploying Azure Container Instance"
-$aciName = "azidentity-test"
-az container create -g $rg -n $aciName --image $image `
- --acr-identity $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) `
- --assign-identity [system] $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) `
- --role "Storage Blob Data Reader" `
- --scope $($DeploymentOutputs['AZIDENTITY_STORAGE_ID']) `
- -e AZIDENTITY_STORAGE_NAME=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME']) `
- AZIDENTITY_STORAGE_NAME_USER_ASSIGNED=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) `
- AZIDENTITY_USER_ASSIGNED_IDENTITY=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) `
- FUNCTIONS_CUSTOMHANDLER_PORT=80
-Write-Host "##vso[task.setvariable variable=AZIDENTITY_ACI_NAME;]$aciName"
-
-# Azure Functions deployment: copy the Windows binary from the Docker image, deploy it in a zip
-Write-Host "Deploying to Azure Functions"
-$container = docker create $image
-docker cp ${container}:managed-id-test.exe "$PSScriptRoot/testdata/managed-id-test/"
-docker rm -v $container
-Compress-Archive -Path "$PSScriptRoot/testdata/managed-id-test/*" -DestinationPath func.zip -Force
-az functionapp deploy -g $rg -n $DeploymentOutputs['AZIDENTITY_FUNCTION_NAME'] --src-path func.zip --type zip
-
-Write-Host "Creating federated identity"
-$aksName = $DeploymentOutputs['AZIDENTITY_AKS_NAME']
-$idName = $DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_NAME']
-$issuer = az aks show -g $rg -n $aksName --query "oidcIssuerProfile.issuerUrl" -otsv
-$podName = "azidentity-test"
-$serviceAccountName = "workload-identity-sa"
-az identity federated-credential create -g $rg --identity-name $idName --issuer $issuer --name $idName --subject system:serviceaccount:default:$serviceAccountName
-Write-Host "Deploying to AKS"
-az aks get-credentials -g $rg -n $aksName
-az aks update --attach-acr $DeploymentOutputs['AZIDENTITY_ACR_NAME'] -g $rg -n $aksName
-Set-Content -Path "$PSScriptRoot/k8s.yaml" -Value @"
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- annotations:
- azure.workload.identity/client-id: $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID'])
- name: $serviceAccountName
- namespace: default
----
-apiVersion: v1
-kind: Pod
-metadata:
- name: $podName
- namespace: default
- labels:
- app: $podName
- azure.workload.identity/use: "true"
-spec:
- serviceAccountName: $serviceAccountName
- containers:
- - name: $podName
- image: $image
- env:
- - name: AZIDENTITY_STORAGE_NAME
- value: $($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED'])
- - name: AZIDENTITY_USE_WORKLOAD_IDENTITY
- value: "true"
- - name: FUNCTIONS_CUSTOMHANDLER_PORT
- value: "80"
- nodeSelector:
- kubernetes.io/os: linux
-"@
-kubectl apply -f "$PSScriptRoot/k8s.yaml"
-Write-Host "##vso[task.setvariable variable=AZIDENTITY_POD_NAME;]$podName"
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1
deleted file mode 100644
index 58766d0a..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright (c) Microsoft Corporation. All rights reserved.
-# Licensed under the MIT License.
-
-# IMPORTANT: Do not invoke this file directly. Please instead run eng/common/TestResources/New-TestResources.ps1 from the repository root.
-
-[CmdletBinding(SupportsShouldProcess = $true, ConfirmImpact = 'Medium')]
-param (
- [hashtable] $AdditionalParameters = @{},
-
- # Captures any arguments from eng/New-TestResources.ps1 not declared here (no parameter errors).
- [Parameter(ValueFromRemainingArguments = $true)]
- $RemainingArguments
-)
-
-if (-not (Test-Path "$PSScriptRoot/sshkey.pub")) {
- ssh-keygen -t rsa -b 4096 -f "$PSScriptRoot/sshkey" -N '' -C ''
-}
-$templateFileParameters['sshPubKey'] = Get-Content "$PSScriptRoot/sshkey.pub"
-
-if (!$CI) {
- # TODO: Remove this once auto-cloud config downloads are supported locally
- Write-Host "Skipping cert setup in local testing mode"
- return
-}
-
-if ($null -eq $EnvironmentVariables -or $EnvironmentVariables.Count -eq 0) {
- throw "EnvironmentVariables must be set in the calling script New-TestResources.ps1"
-}
-
-$tmp = $env:TEMP ? $env:TEMP : [System.IO.Path]::GetTempPath()
-$pfxPath = Join-Path $tmp "test.pfx"
-$pemPath = Join-Path $tmp "test.pem"
-
-Write-Host "Creating identity test files: $pfxPath $pemPath"
-
-[System.Convert]::FromBase64String($EnvironmentVariables['PFX_CONTENTS']) | Set-Content -Path $pfxPath -AsByteStream
-Set-Content -Path $pemPath -Value $EnvironmentVariables['PEM_CONTENTS']
-
-# Set for pipeline
-Write-Host "##vso[task.setvariable variable=IDENTITY_SP_CERT_PFX;]$pfxPath"
-Write-Host "##vso[task.setvariable variable=IDENTITY_SP_CERT_PEM;]$pemPath"
-# Set for local
-$env:IDENTITY_SP_CERT_PFX = $pfxPath
-$env:IDENTITY_SP_CERT_PEM = $pemPath
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep
deleted file mode 100644
index 2a216529..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT License.
-
-@description('Kubernetes cluster admin user name.')
-param adminUser string = 'azureuser'
-
-@minLength(6)
-@maxLength(23)
-@description('The base resource name.')
-param baseName string = resourceGroup().name
-
-@description('Whether to deploy resources. When set to false, this file deploys nothing.')
-param deployResources bool = false
-
-param sshPubKey string = ''
-
-@description('The location of the resource. By default, this is the same as the resource group.')
-param location string = resourceGroup().location
-
-// https://learn.microsoft.com/azure/role-based-access-control/built-in-roles
-var acrPull = subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '7f951dda-4ed3-4680-a7ca-43fe172d538d')
-var blobReader = subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '2a2b9908-6ea1-4ae2-8e65-a410df84e7d1')
-
-resource sa 'Microsoft.Storage/storageAccounts@2021-08-01' = if (deployResources) {
- kind: 'StorageV2'
- location: location
- name: 'sa${uniqueString(baseName)}'
- properties: {
- accessTier: 'Hot'
- }
- sku: {
- name: 'Standard_LRS'
- }
-}
-
-resource saUserAssigned 'Microsoft.Storage/storageAccounts@2021-08-01' = if (deployResources) {
- kind: 'StorageV2'
- location: location
- name: 'sa2${uniqueString(baseName)}'
- properties: {
- accessTier: 'Hot'
- }
- sku: {
- name: 'Standard_LRS'
- }
-}
-
-resource usermgdid 'Microsoft.ManagedIdentity/userAssignedIdentities@2018-11-30' = if (deployResources) {
- location: location
- name: baseName
-}
-
-resource acrPullContainerInstance 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) {
- name: guid(resourceGroup().id, acrPull, 'containerInstance')
- properties: {
- principalId: deployResources ? usermgdid.properties.principalId : ''
- principalType: 'ServicePrincipal'
- roleDefinitionId: acrPull
- }
- scope: containerRegistry
-}
-
-resource blobRoleUserAssigned 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) {
- scope: saUserAssigned
- name: guid(resourceGroup().id, blobReader, usermgdid.id)
- properties: {
- principalId: deployResources ? usermgdid.properties.principalId : ''
- principalType: 'ServicePrincipal'
- roleDefinitionId: blobReader
- }
-}
-
-resource blobRoleFunc 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (deployResources) {
- name: guid(resourceGroup().id, blobReader, 'azfunc')
- properties: {
- principalId: deployResources ? azfunc.identity.principalId : ''
- roleDefinitionId: blobReader
- principalType: 'ServicePrincipal'
- }
- scope: sa
-}
-
-resource containerRegistry 'Microsoft.ContainerRegistry/registries@2023-01-01-preview' = if (deployResources) {
- location: location
- name: uniqueString(resourceGroup().id)
- properties: {
- adminUserEnabled: true
- }
- sku: {
- name: 'Basic'
- }
-}
-
-resource farm 'Microsoft.Web/serverfarms@2021-03-01' = if (deployResources) {
- kind: 'app'
- location: location
- name: '${baseName}_asp'
- properties: {}
- sku: {
- capacity: 1
- family: 'B'
- name: 'B1'
- size: 'B1'
- tier: 'Basic'
- }
-}
-
-resource azfunc 'Microsoft.Web/sites@2021-03-01' = if (deployResources) {
- identity: {
- type: 'SystemAssigned, UserAssigned'
- userAssignedIdentities: {
- '${deployResources ? usermgdid.id : ''}': {}
- }
- }
- kind: 'functionapp'
- location: location
- name: '${baseName}func'
- properties: {
- enabled: true
- httpsOnly: true
- keyVaultReferenceIdentity: 'SystemAssigned'
- serverFarmId: farm.id
- siteConfig: {
- alwaysOn: true
- appSettings: [
- {
- name: 'AZIDENTITY_STORAGE_NAME'
- value: deployResources ? sa.name : null
- }
- {
- name: 'AZIDENTITY_STORAGE_NAME_USER_ASSIGNED'
- value: deployResources ? saUserAssigned.name : null
- }
- {
- name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY'
- value: deployResources ? usermgdid.id : null
- }
- {
- name: 'AzureWebJobsStorage'
- value: 'DefaultEndpointsProtocol=https;AccountName=${deployResources ? sa.name : ''};EndpointSuffix=${deployResources ? environment().suffixes.storage : ''};AccountKey=${deployResources ? sa.listKeys().keys[0].value : ''}'
- }
- {
- name: 'FUNCTIONS_EXTENSION_VERSION'
- value: '~4'
- }
- {
- name: 'FUNCTIONS_WORKER_RUNTIME'
- value: 'custom'
- }
- {
- name: 'WEBSITE_CONTENTAZUREFILECONNECTIONSTRING'
- value: 'DefaultEndpointsProtocol=https;AccountName=${deployResources ? sa.name : ''};EndpointSuffix=${deployResources ? environment().suffixes.storage : ''};AccountKey=${deployResources ? sa.listKeys().keys[0].value : ''}'
- }
- {
- name: 'WEBSITE_CONTENTSHARE'
- value: toLower('${baseName}-func')
- }
- ]
- http20Enabled: true
- minTlsVersion: '1.2'
- }
- }
-}
-
-resource aks 'Microsoft.ContainerService/managedClusters@2023-06-01' = if (deployResources) {
- name: baseName
- location: location
- identity: {
- type: 'SystemAssigned'
- }
- properties: {
- agentPoolProfiles: [
- {
- count: 1
- enableAutoScaling: false
- kubeletDiskType: 'OS'
- mode: 'System'
- name: 'agentpool'
- osDiskSizeGB: 128
- osDiskType: 'Managed'
- osSKU: 'Ubuntu'
- osType: 'Linux'
- type: 'VirtualMachineScaleSets'
- vmSize: 'Standard_D2s_v3'
- }
- ]
- dnsPrefix: 'identitytest'
- enableRBAC: true
- linuxProfile: {
- adminUsername: adminUser
- ssh: {
- publicKeys: [
- {
- keyData: sshPubKey
- }
- ]
- }
- }
- oidcIssuerProfile: {
- enabled: true
- }
- securityProfile: {
- workloadIdentity: {
- enabled: true
- }
- }
- }
-}
-
-output AZIDENTITY_ACR_LOGIN_SERVER string = deployResources ? containerRegistry.properties.loginServer : ''
-output AZIDENTITY_ACR_NAME string = deployResources ? containerRegistry.name : ''
-output AZIDENTITY_AKS_NAME string = deployResources ? aks.name : ''
-output AZIDENTITY_FUNCTION_NAME string = deployResources ? azfunc.name : ''
-output AZIDENTITY_STORAGE_ID string = deployResources ? sa.id : ''
-output AZIDENTITY_STORAGE_NAME string = deployResources ? sa.name : ''
-output AZIDENTITY_STORAGE_NAME_USER_ASSIGNED string = deployResources ? saUserAssigned.name : ''
-output AZIDENTITY_USER_ASSIGNED_IDENTITY string = deployResources ? usermgdid.id : ''
-output AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID string = deployResources ? usermgdid.properties.clientId : ''
-output AZIDENTITY_USER_ASSIGNED_IDENTITY_NAME string = deployResources ? usermgdid.name : ''
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go
deleted file mode 100644
index 294ed81e..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go
+++ /dev/null
@@ -1,90 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-import (
- "context"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
-)
-
-const credNameUserPassword = "UsernamePasswordCredential"
-
-// UsernamePasswordCredentialOptions contains optional parameters for UsernamePasswordCredential.
-type UsernamePasswordCredentialOptions struct {
- azcore.ClientOptions
-
- // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens.
- // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the
- // application is registered.
- AdditionallyAllowedTenants []string
-
- // authenticationRecord returned by a call to a credential's Authenticate method. Set this option
- // to enable the credential to use data from a previous authentication.
- authenticationRecord authenticationRecord
-
- // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
- // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
- // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
- // the application responsible for ensuring the configured authority is valid and trustworthy.
- DisableInstanceDiscovery bool
-
- // tokenCachePersistenceOptions enables persistent token caching when not nil.
- tokenCachePersistenceOptions *tokenCachePersistenceOptions
-}
-
-// UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication,
-// because it's less secure than other authentication flows. This credential is not interactive, so it isn't compatible
-// with any form of multi-factor authentication, and the application must already have user or admin consent.
-// This credential can only authenticate work and school accounts; it can't authenticate Microsoft accounts.
-type UsernamePasswordCredential struct {
- client *publicClient
-}
-
-// NewUsernamePasswordCredential creates a UsernamePasswordCredential. clientID is the ID of the application the user
-// will authenticate to. Pass nil for options to accept defaults.
-func NewUsernamePasswordCredential(tenantID string, clientID string, username string, password string, options *UsernamePasswordCredentialOptions) (*UsernamePasswordCredential, error) {
- if options == nil {
- options = &UsernamePasswordCredentialOptions{}
- }
- opts := publicClientOptions{
- AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- Password: password,
- Record: options.authenticationRecord,
- TokenCachePersistenceOptions: options.tokenCachePersistenceOptions,
- Username: username,
- }
- c, err := newPublicClient(tenantID, clientID, credNameUserPassword, opts)
- if err != nil {
- return nil, err
- }
- return &UsernamePasswordCredential{client: c}, err
-}
-
-// Authenticate the user. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord.
-func (c *UsernamePasswordCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) {
- var err error
- ctx, endSpan := runtime.StartSpan(ctx, credNameUserPassword+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil)
- defer func() { endSpan(err) }()
- tk, err := c.client.Authenticate(ctx, opts)
- return tk, err
-}
-
-// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients.
-func (c *UsernamePasswordCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- var err error
- ctx, endSpan := runtime.StartSpan(ctx, credNameUserPassword+"."+traceOpGetToken, c.client.azClient.Tracer(), nil)
- defer func() { endSpan(err) }()
- tk, err := c.client.GetToken(ctx, opts)
- return tk, err
-}
-
-var _ azcore.TokenCredential = (*UsernamePasswordCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go
deleted file mode 100644
index 459ef64c..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go
+++ /dev/null
@@ -1,18 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-const (
- // UserAgent is the string to be used in the user agent string when making requests.
- component = "azidentity"
-
- // module is the fully qualified name of the module used in telemetry and distributed tracing.
- module = "github.com/Azure/azure-sdk-for-go/sdk/" + component
-
- // Version is the semantic version (see http://semver.org) of this module.
- version = "v1.6.0"
-)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go
deleted file mode 100644
index 3e43e788..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go
+++ /dev/null
@@ -1,131 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package azidentity
-
-import (
- "context"
- "errors"
- "os"
- "sync"
- "time"
-
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
-)
-
-const credNameWorkloadIdentity = "WorkloadIdentityCredential"
-
-// WorkloadIdentityCredential supports Azure workload identity on Kubernetes.
-// See [Azure Kubernetes Service documentation] for more information.
-//
-// [Azure Kubernetes Service documentation]: https://learn.microsoft.com/azure/aks/workload-identity-overview
-type WorkloadIdentityCredential struct {
- assertion, file string
- cred *ClientAssertionCredential
- expires time.Time
- mtx *sync.RWMutex
-}
-
-// WorkloadIdentityCredentialOptions contains optional parameters for WorkloadIdentityCredential.
-type WorkloadIdentityCredentialOptions struct {
- azcore.ClientOptions
-
- // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens.
- // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the
- // application is registered.
- AdditionallyAllowedTenants []string
- // ClientID of the service principal. Defaults to the value of the environment variable AZURE_CLIENT_ID.
- ClientID string
- // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
- // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
- // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
- // the application responsible for ensuring the configured authority is valid and trustworthy.
- DisableInstanceDiscovery bool
- // TenantID of the service principal. Defaults to the value of the environment variable AZURE_TENANT_ID.
- TenantID string
- // TokenFilePath is the path of a file containing a Kubernetes service account token. Defaults to the value of the
- // environment variable AZURE_FEDERATED_TOKEN_FILE.
- TokenFilePath string
-}
-
-// NewWorkloadIdentityCredential constructs a WorkloadIdentityCredential. Service principal configuration is read
-// from environment variables as set by the Azure workload identity webhook. Set options to override those values.
-func NewWorkloadIdentityCredential(options *WorkloadIdentityCredentialOptions) (*WorkloadIdentityCredential, error) {
- if options == nil {
- options = &WorkloadIdentityCredentialOptions{}
- }
- ok := false
- clientID := options.ClientID
- if clientID == "" {
- if clientID, ok = os.LookupEnv(azureClientID); !ok {
- return nil, errors.New("no client ID specified. Check pod configuration or set ClientID in the options")
- }
- }
- file := options.TokenFilePath
- if file == "" {
- if file, ok = os.LookupEnv(azureFederatedTokenFile); !ok {
- return nil, errors.New("no token file specified. Check pod configuration or set TokenFilePath in the options")
- }
- }
- tenantID := options.TenantID
- if tenantID == "" {
- if tenantID, ok = os.LookupEnv(azureTenantID); !ok {
- return nil, errors.New("no tenant ID specified. Check pod configuration or set TenantID in the options")
- }
- }
- w := WorkloadIdentityCredential{file: file, mtx: &sync.RWMutex{}}
- caco := ClientAssertionCredentialOptions{
- AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- }
- cred, err := NewClientAssertionCredential(tenantID, clientID, w.getAssertion, &caco)
- if err != nil {
- return nil, err
- }
- // we want "WorkloadIdentityCredential" in log messages, not "ClientAssertionCredential"
- cred.client.name = credNameWorkloadIdentity
- w.cred = cred
- return &w, nil
-}
-
-// GetToken requests an access token from Microsoft Entra ID. Azure SDK clients call this method automatically.
-func (w *WorkloadIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
- var err error
- ctx, endSpan := runtime.StartSpan(ctx, credNameWorkloadIdentity+"."+traceOpGetToken, w.cred.client.azClient.Tracer(), nil)
- defer func() { endSpan(err) }()
- tk, err := w.cred.GetToken(ctx, opts)
- return tk, err
-}
-
-// getAssertion returns the specified file's content, which is expected to be a Kubernetes service account token.
-// Kubernetes is responsible for updating the file as service account tokens expire.
-func (w *WorkloadIdentityCredential) getAssertion(context.Context) (string, error) {
- w.mtx.RLock()
- if w.expires.Before(time.Now()) {
- // ensure only one goroutine at a time updates the assertion
- w.mtx.RUnlock()
- w.mtx.Lock()
- defer w.mtx.Unlock()
- // double check because another goroutine may have acquired the write lock first and done the update
- if now := time.Now(); w.expires.Before(now) {
- content, err := os.ReadFile(w.file)
- if err != nil {
- return "", err
- }
- w.assertion = string(content)
- // Kubernetes rotates service account tokens when they reach 80% of their total TTL. The shortest TTL
- // is 1 hour. That implies the token we just read is valid for at least 12 minutes (20% of 1 hour),
- // but we add some margin for safety.
- w.expires = now.Add(10 * time.Minute)
- }
- } else {
- defer w.mtx.RUnlock()
- }
- return w.assertion, nil
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt
deleted file mode 100644
index 48ea6616..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) Microsoft Corporation.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go
deleted file mode 100644
index 245af7d2..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go
+++ /dev/null
@@ -1,51 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package diag
-
-import (
- "fmt"
- "runtime"
- "strings"
-)
-
-// Caller returns the file and line number of a frame on the caller's stack.
-// If the funtion fails an empty string is returned.
-// skipFrames - the number of frames to skip when determining the caller.
-// Passing a value of 0 will return the immediate caller of this function.
-func Caller(skipFrames int) string {
- if pc, file, line, ok := runtime.Caller(skipFrames + 1); ok {
- // the skipFrames + 1 is to skip ourselves
- frame := runtime.FuncForPC(pc)
- return fmt.Sprintf("%s()\n\t%s:%d", frame.Name(), file, line)
- }
- return ""
-}
-
-// StackTrace returns a formatted stack trace string.
-// If the funtion fails an empty string is returned.
-// skipFrames - the number of stack frames to skip before composing the trace string.
-// totalFrames - the maximum number of stack frames to include in the trace string.
-func StackTrace(skipFrames, totalFrames int) string {
- pcCallers := make([]uintptr, totalFrames)
- if frames := runtime.Callers(skipFrames, pcCallers); frames == 0 {
- return ""
- }
- frames := runtime.CallersFrames(pcCallers)
- sb := strings.Builder{}
- for {
- frame, more := frames.Next()
- sb.WriteString(frame.Function)
- sb.WriteString("()\n\t")
- sb.WriteString(frame.File)
- sb.WriteRune(':')
- sb.WriteString(fmt.Sprintf("%d\n", frame.Line))
- if !more {
- break
- }
- }
- return sb.String()
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go
deleted file mode 100644
index 66bf13e5..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go
+++ /dev/null
@@ -1,7 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package diag
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go
deleted file mode 100644
index 8c6eacb6..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go
+++ /dev/null
@@ -1,7 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package errorinfo
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go
deleted file mode 100644
index 8ee66b52..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go
+++ /dev/null
@@ -1,46 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package errorinfo
-
-// NonRetriable represents a non-transient error. This works in
-// conjunction with the retry policy, indicating that the error condition
-// is idempotent, so no retries will be attempted.
-// Use errors.As() to access this interface in the error chain.
-type NonRetriable interface {
- error
- NonRetriable()
-}
-
-// NonRetriableError marks the specified error as non-retriable.
-// This function takes an error as input and returns a new error that is marked as non-retriable.
-func NonRetriableError(err error) error {
- return &nonRetriableError{err}
-}
-
-// nonRetriableError is a struct that embeds the error interface.
-// It is used to represent errors that should not be retried.
-type nonRetriableError struct {
- error
-}
-
-// Error method for nonRetriableError struct.
-// It returns the error message of the embedded error.
-func (p *nonRetriableError) Error() string {
- return p.error.Error()
-}
-
-// NonRetriable is a marker method for nonRetriableError struct.
-// Non-functional and indicates that the error is non-retriable.
-func (*nonRetriableError) NonRetriable() {
- // marker method
-}
-
-// Unwrap method for nonRetriableError struct.
-// It returns the original error that was marked as non-retriable.
-func (p *nonRetriableError) Unwrap() error {
- return p.error
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go
deleted file mode 100644
index 9948f604..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/exported/exported.go
+++ /dev/null
@@ -1,129 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package exported
-
-import (
- "errors"
- "io"
- "net/http"
-)
-
-// HasStatusCode returns true if the Response's status code is one of the specified values.
-// Exported as runtime.HasStatusCode().
-func HasStatusCode(resp *http.Response, statusCodes ...int) bool {
- if resp == nil {
- return false
- }
- for _, sc := range statusCodes {
- if resp.StatusCode == sc {
- return true
- }
- }
- return false
-}
-
-// PayloadOptions contains the optional values for the Payload func.
-// NOT exported but used by azcore.
-type PayloadOptions struct {
- // BytesModifier receives the downloaded byte slice and returns an updated byte slice.
- // Use this to modify the downloaded bytes in a payload (e.g. removing a BOM).
- BytesModifier func([]byte) []byte
-}
-
-// Payload reads and returns the response body or an error.
-// On a successful read, the response body is cached.
-// Subsequent reads will access the cached value.
-// Exported as runtime.Payload() WITHOUT the opts parameter.
-func Payload(resp *http.Response, opts *PayloadOptions) ([]byte, error) {
- if resp.Body == nil {
- // this shouldn't happen in real-world scenarios as a
- // response with no body should set it to http.NoBody
- return nil, nil
- }
- modifyBytes := func(b []byte) []byte { return b }
- if opts != nil && opts.BytesModifier != nil {
- modifyBytes = opts.BytesModifier
- }
-
- // r.Body won't be a nopClosingBytesReader if downloading was skipped
- if buf, ok := resp.Body.(*nopClosingBytesReader); ok {
- bytesBody := modifyBytes(buf.Bytes())
- buf.Set(bytesBody)
- return bytesBody, nil
- }
-
- bytesBody, err := io.ReadAll(resp.Body)
- resp.Body.Close()
- if err != nil {
- return nil, err
- }
-
- bytesBody = modifyBytes(bytesBody)
- resp.Body = &nopClosingBytesReader{s: bytesBody}
- return bytesBody, nil
-}
-
-// PayloadDownloaded returns true if the response body has already been downloaded.
-// This implies that the Payload() func above has been previously called.
-// NOT exported but used by azcore.
-func PayloadDownloaded(resp *http.Response) bool {
- _, ok := resp.Body.(*nopClosingBytesReader)
- return ok
-}
-
-// nopClosingBytesReader is an io.ReadSeekCloser around a byte slice.
-// It also provides direct access to the byte slice to avoid rereading.
-type nopClosingBytesReader struct {
- s []byte
- i int64
-}
-
-// Bytes returns the underlying byte slice.
-func (r *nopClosingBytesReader) Bytes() []byte {
- return r.s
-}
-
-// Close implements the io.Closer interface.
-func (*nopClosingBytesReader) Close() error {
- return nil
-}
-
-// Read implements the io.Reader interface.
-func (r *nopClosingBytesReader) Read(b []byte) (n int, err error) {
- if r.i >= int64(len(r.s)) {
- return 0, io.EOF
- }
- n = copy(b, r.s[r.i:])
- r.i += int64(n)
- return
-}
-
-// Set replaces the existing byte slice with the specified byte slice and resets the reader.
-func (r *nopClosingBytesReader) Set(b []byte) {
- r.s = b
- r.i = 0
-}
-
-// Seek implements the io.Seeker interface.
-func (r *nopClosingBytesReader) Seek(offset int64, whence int) (int64, error) {
- var i int64
- switch whence {
- case io.SeekStart:
- i = offset
- case io.SeekCurrent:
- i = r.i + offset
- case io.SeekEnd:
- i = int64(len(r.s)) + offset
- default:
- return 0, errors.New("nopClosingBytesReader: invalid whence")
- }
- if i < 0 {
- return 0, errors.New("nopClosingBytesReader: negative position")
- }
- r.i = i
- return i, nil
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go
deleted file mode 100644
index d7876d29..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go
+++ /dev/null
@@ -1,7 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package log
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go
deleted file mode 100644
index 4f1dcf1b..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go
+++ /dev/null
@@ -1,104 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package log
-
-import (
- "fmt"
- "os"
- "time"
-)
-
-///////////////////////////////////////////////////////////////////////////////////////////////////
-// NOTE: The following are exported as public surface area from azcore. DO NOT MODIFY
-///////////////////////////////////////////////////////////////////////////////////////////////////
-
-// Event is used to group entries. Each group can be toggled on or off.
-type Event string
-
-// SetEvents is used to control which events are written to
-// the log. By default all log events are writen.
-func SetEvents(cls ...Event) {
- log.cls = cls
-}
-
-// SetListener will set the Logger to write to the specified listener.
-func SetListener(lst func(Event, string)) {
- log.lst = lst
-}
-
-///////////////////////////////////////////////////////////////////////////////////////////////////
-// END PUBLIC SURFACE AREA
-///////////////////////////////////////////////////////////////////////////////////////////////////
-
-// Should returns true if the specified log event should be written to the log.
-// By default all log events will be logged. Call SetEvents() to limit
-// the log events for logging.
-// If no listener has been set this will return false.
-// Calling this method is useful when the message to log is computationally expensive
-// and you want to avoid the overhead if its log event is not enabled.
-func Should(cls Event) bool {
- if log.lst == nil {
- return false
- }
- if log.cls == nil || len(log.cls) == 0 {
- return true
- }
- for _, c := range log.cls {
- if c == cls {
- return true
- }
- }
- return false
-}
-
-// Write invokes the underlying listener with the specified event and message.
-// If the event shouldn't be logged or there is no listener then Write does nothing.
-func Write(cls Event, message string) {
- if !Should(cls) {
- return
- }
- log.lst(cls, message)
-}
-
-// Writef invokes the underlying listener with the specified event and formatted message.
-// If the event shouldn't be logged or there is no listener then Writef does nothing.
-func Writef(cls Event, format string, a ...interface{}) {
- if !Should(cls) {
- return
- }
- log.lst(cls, fmt.Sprintf(format, a...))
-}
-
-// TestResetEvents is used for TESTING PURPOSES ONLY.
-func TestResetEvents() {
- log.cls = nil
-}
-
-// logger controls which events to log and writing to the underlying log.
-type logger struct {
- cls []Event
- lst func(Event, string)
-}
-
-// the process-wide logger
-var log logger
-
-func init() {
- initLogging()
-}
-
-// split out for testing purposes
-func initLogging() {
- if cls := os.Getenv("AZURE_SDK_GO_LOGGING"); cls == "all" {
- // cls could be enhanced to support a comma-delimited list of log events
- log.lst = func(cls Event, msg string) {
- // simple console logger, it writes to stderr in the following format:
- // [time-stamp] Event: message
- fmt.Fprintf(os.Stderr, "[%s] %s: %s\n", time.Now().Format(time.StampMicro), cls, msg)
- }
- }
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go
deleted file mode 100644
index db826962..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/poller/util.go
+++ /dev/null
@@ -1,155 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package poller
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "net/url"
- "strings"
-
- "github.com/Azure/azure-sdk-for-go/sdk/internal/exported"
-)
-
-// the well-known set of LRO status/provisioning state values.
-const (
- StatusSucceeded = "Succeeded"
- StatusCanceled = "Canceled"
- StatusFailed = "Failed"
- StatusInProgress = "InProgress"
-)
-
-// these are non-conformant states that we've seen in the wild.
-// we support them for back-compat.
-const (
- StatusCancelled = "Cancelled"
- StatusCompleted = "Completed"
-)
-
-// IsTerminalState returns true if the LRO's state is terminal.
-func IsTerminalState(s string) bool {
- return Failed(s) || Succeeded(s)
-}
-
-// Failed returns true if the LRO's state is terminal failure.
-func Failed(s string) bool {
- return strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled) || strings.EqualFold(s, StatusCancelled)
-}
-
-// Succeeded returns true if the LRO's state is terminal success.
-func Succeeded(s string) bool {
- return strings.EqualFold(s, StatusSucceeded) || strings.EqualFold(s, StatusCompleted)
-}
-
-// returns true if the LRO response contains a valid HTTP status code
-func StatusCodeValid(resp *http.Response) bool {
- return exported.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusCreated, http.StatusNoContent)
-}
-
-// IsValidURL verifies that the URL is valid and absolute.
-func IsValidURL(s string) bool {
- u, err := url.Parse(s)
- return err == nil && u.IsAbs()
-}
-
-// ErrNoBody is returned if the response didn't contain a body.
-var ErrNoBody = errors.New("the response did not contain a body")
-
-// GetJSON reads the response body into a raw JSON object.
-// It returns ErrNoBody if there was no content.
-func GetJSON(resp *http.Response) (map[string]any, error) {
- body, err := exported.Payload(resp, nil)
- if err != nil {
- return nil, err
- }
- if len(body) == 0 {
- return nil, ErrNoBody
- }
- // unmarshall the body to get the value
- var jsonBody map[string]any
- if err = json.Unmarshal(body, &jsonBody); err != nil {
- return nil, err
- }
- return jsonBody, nil
-}
-
-// provisioningState returns the provisioning state from the response or the empty string.
-func provisioningState(jsonBody map[string]any) string {
- jsonProps, ok := jsonBody["properties"]
- if !ok {
- return ""
- }
- props, ok := jsonProps.(map[string]any)
- if !ok {
- return ""
- }
- rawPs, ok := props["provisioningState"]
- if !ok {
- return ""
- }
- ps, ok := rawPs.(string)
- if !ok {
- return ""
- }
- return ps
-}
-
-// status returns the status from the response or the empty string.
-func status(jsonBody map[string]any) string {
- rawStatus, ok := jsonBody["status"]
- if !ok {
- return ""
- }
- status, ok := rawStatus.(string)
- if !ok {
- return ""
- }
- return status
-}
-
-// GetStatus returns the LRO's status from the response body.
-// Typically used for Azure-AsyncOperation flows.
-// If there is no status in the response body the empty string is returned.
-func GetStatus(resp *http.Response) (string, error) {
- jsonBody, err := GetJSON(resp)
- if err != nil {
- return "", err
- }
- return status(jsonBody), nil
-}
-
-// GetProvisioningState returns the LRO's state from the response body.
-// If there is no state in the response body the empty string is returned.
-func GetProvisioningState(resp *http.Response) (string, error) {
- jsonBody, err := GetJSON(resp)
- if err != nil {
- return "", err
- }
- return provisioningState(jsonBody), nil
-}
-
-// GetResourceLocation returns the LRO's resourceLocation value from the response body.
-// Typically used for Operation-Location flows.
-// If there is no resourceLocation in the response body the empty string is returned.
-func GetResourceLocation(resp *http.Response) (string, error) {
- jsonBody, err := GetJSON(resp)
- if err != nil {
- return "", err
- }
- v, ok := jsonBody["resourceLocation"]
- if !ok {
- // it might be ok if the field doesn't exist, the caller must make that determination
- return "", nil
- }
- vv, ok := v.(string)
- if !ok {
- return "", fmt.Errorf("the resourceLocation value %v was not in string format", v)
- }
- return vv, nil
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go
deleted file mode 100644
index 238ef42e..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go
+++ /dev/null
@@ -1,123 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package temporal
-
-import (
- "sync"
- "time"
-)
-
-// AcquireResource abstracts a method for refreshing a temporal resource.
-type AcquireResource[TResource, TState any] func(state TState) (newResource TResource, newExpiration time.Time, err error)
-
-// Resource is a temporal resource (usually a credential) that requires periodic refreshing.
-type Resource[TResource, TState any] struct {
- // cond is used to synchronize access to the shared resource embodied by the remaining fields
- cond *sync.Cond
-
- // acquiring indicates that some thread/goroutine is in the process of acquiring/updating the resource
- acquiring bool
-
- // resource contains the value of the shared resource
- resource TResource
-
- // expiration indicates when the shared resource expires; it is 0 if the resource was never acquired
- expiration time.Time
-
- // lastAttempt indicates when a thread/goroutine last attempted to acquire/update the resource
- lastAttempt time.Time
-
- // acquireResource is the callback function that actually acquires the resource
- acquireResource AcquireResource[TResource, TState]
-}
-
-// NewResource creates a new Resource that uses the specified AcquireResource for refreshing.
-func NewResource[TResource, TState any](ar AcquireResource[TResource, TState]) *Resource[TResource, TState] {
- return &Resource[TResource, TState]{cond: sync.NewCond(&sync.Mutex{}), acquireResource: ar}
-}
-
-// Get returns the underlying resource.
-// If the resource is fresh, no refresh is performed.
-func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) {
- // If the resource is expiring within this time window, update it eagerly.
- // This allows other threads/goroutines to keep running by using the not-yet-expired
- // resource value while one thread/goroutine updates the resource.
- const window = 5 * time.Minute // This example updates the resource 5 minutes prior to expiration
- const backoff = 30 * time.Second // Minimum wait time between eager update attempts
-
- now, acquire, expired := time.Now(), false, false
-
- // acquire exclusive lock
- er.cond.L.Lock()
- resource := er.resource
-
- for {
- expired = er.expiration.IsZero() || er.expiration.Before(now)
- if expired {
- // The resource was never acquired or has expired
- if !er.acquiring {
- // If another thread/goroutine is not acquiring/updating the resource, this thread/goroutine will do it
- er.acquiring, acquire = true, true
- break
- }
- // Getting here means that this thread/goroutine will wait for the updated resource
- } else if er.expiration.Add(-window).Before(now) {
- // The resource is valid but is expiring within the time window
- if !er.acquiring && er.lastAttempt.Add(backoff).Before(now) {
- // If another thread/goroutine is not acquiring/renewing the resource, and none has attempted
- // to do so within the last 30 seconds, this thread/goroutine will do it
- er.acquiring, acquire = true, true
- break
- }
- // This thread/goroutine will use the existing resource value while another updates it
- resource = er.resource
- break
- } else {
- // The resource is not close to expiring, this thread/goroutine should use its current value
- resource = er.resource
- break
- }
- // If we get here, wait for the new resource value to be acquired/updated
- er.cond.Wait()
- }
- er.cond.L.Unlock() // Release the lock so no threads/goroutines are blocked
-
- var err error
- if acquire {
- // This thread/goroutine has been selected to acquire/update the resource
- var expiration time.Time
- var newValue TResource
- er.lastAttempt = now
- newValue, expiration, err = er.acquireResource(state)
-
- // Atomically, update the shared resource's new value & expiration.
- er.cond.L.Lock()
- if err == nil {
- // Update resource & expiration, return the new value
- resource = newValue
- er.resource, er.expiration = resource, expiration
- } else if !expired {
- // An eager update failed. Discard the error and return the current--still valid--resource value
- err = nil
- }
- er.acquiring = false // Indicate that no thread/goroutine is currently acquiring the resource
-
- // Wake up any waiting threads/goroutines since there is a resource they can ALL use
- er.cond.L.Unlock()
- er.cond.Broadcast()
- }
- return resource, err // Return the resource this thread/goroutine can use
-}
-
-// Expire marks the resource as expired, ensuring it's refreshed on the next call to Get().
-func (er *Resource[TResource, TState]) Expire() {
- er.cond.L.Lock()
- defer er.cond.L.Unlock()
-
- // Reset the expiration as if we never got this resource to begin with
- er.expiration = time.Time{}
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go
deleted file mode 100644
index a3824bee..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go
+++ /dev/null
@@ -1,7 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package uuid
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go
deleted file mode 100644
index 278ac9cd..00000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go
+++ /dev/null
@@ -1,76 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package uuid
-
-import (
- "crypto/rand"
- "errors"
- "fmt"
- "strconv"
-)
-
-// The UUID reserved variants.
-const (
- reservedRFC4122 byte = 0x40
-)
-
-// A UUID representation compliant with specification in RFC4122 document.
-type UUID [16]byte
-
-// New returns a new UUID using the RFC4122 algorithm.
-func New() (UUID, error) {
- u := UUID{}
- // Set all bits to pseudo-random values.
- // NOTE: this takes a process-wide lock
- _, err := rand.Read(u[:])
- if err != nil {
- return u, err
- }
- u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122)
-
- var version byte = 4
- u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4)
- return u, nil
-}
-
-// String returns the UUID in "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" format.
-func (u UUID) String() string {
- return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
-}
-
-// Parse parses a string formatted as "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
-// or "{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}" into a UUID.
-func Parse(s string) (UUID, error) {
- var uuid UUID
- // ensure format
- switch len(s) {
- case 36:
- // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- case 38:
- // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
- s = s[1:37]
- default:
- return uuid, errors.New("invalid UUID format")
- }
- if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
- return uuid, errors.New("invalid UUID format")
- }
- // parse chunks
- for i, x := range [16]int{
- 0, 2, 4, 6,
- 9, 11,
- 14, 16,
- 19, 21,
- 24, 26, 28, 30, 32, 34} {
- b, err := strconv.ParseUint(s[x:x+2], 16, 8)
- if err != nil {
- return uuid, fmt.Errorf("invalid UUID format: %s", err)
- }
- uuid[i] = byte(b)
- }
- return uuid, nil
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE
deleted file mode 100644
index 3d8b93bc..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
- MIT License
-
- Copyright (c) Microsoft Corporation.
-
- Permission is hereby granted, free of charge, to any person obtaining a copy
- of this software and associated documentation files (the "Software"), to deal
- in the Software without restriction, including without limitation the rights
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the Software is
- furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in all
- copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- SOFTWARE
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go
deleted file mode 100644
index 19210883..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache/cache.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-/*
-Package cache allows third parties to implement external storage for caching token data
-for distributed systems or multiple local applications access.
-
-The data stored and extracted will represent the entire cache. Therefore it is recommended
-one msal instance per user. This data is considered opaque and there are no guarantees to
-implementers on the format being passed.
-*/
-package cache
-
-import "context"
-
-// Marshaler marshals data from an internal cache to bytes that can be stored.
-type Marshaler interface {
- Marshal() ([]byte, error)
-}
-
-// Unmarshaler unmarshals data from a storage medium into the internal cache, overwriting it.
-type Unmarshaler interface {
- Unmarshal([]byte) error
-}
-
-// Serializer can serialize the cache to binary or from binary into the cache.
-type Serializer interface {
- Marshaler
- Unmarshaler
-}
-
-// ExportHints are suggestions for storing data.
-type ExportHints struct {
- // PartitionKey is a suggested key for partitioning the cache
- PartitionKey string
-}
-
-// ReplaceHints are suggestions for loading data.
-type ReplaceHints struct {
- // PartitionKey is a suggested key for partitioning the cache
- PartitionKey string
-}
-
-// ExportReplace exports and replaces in-memory cache data. It doesn't support nil Context or
-// define the outcome of passing one. A Context without a timeout must receive a default timeout
-// specified by the implementor. Retries must be implemented inside the implementation.
-type ExportReplace interface {
- // Replace replaces the cache with what is in external storage. Implementors should honor
- // Context cancellations and return context.Canceled or context.DeadlineExceeded in those cases.
- Replace(ctx context.Context, cache Unmarshaler, hints ReplaceHints) error
- // Export writes the binary representation of the cache (cache.Marshal()) to external storage.
- // This is considered opaque. Context cancellations should be honored as in Replace.
- Export(ctx context.Context, cache Marshaler, hints ExportHints) error
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go
deleted file mode 100644
index f8628605..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go
+++ /dev/null
@@ -1,719 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-/*
-Package confidential provides a client for authentication of "confidential" applications.
-A "confidential" application is defined as an app that run on servers. They are considered
-difficult to access and for that reason capable of keeping an application secret.
-Confidential clients can hold configuration-time secrets.
-*/
-package confidential
-
-import (
- "context"
- "crypto"
- "crypto/rsa"
- "crypto/x509"
- "encoding/base64"
- "encoding/pem"
- "errors"
- "fmt"
-
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared"
-)
-
-/*
-Design note:
-
-confidential.Client uses base.Client as an embedded type. base.Client statically assigns its attributes
-during creation. As it doesn't have any pointers in it, anything borrowed from it, such as
-Base.AuthParams is a copy that is free to be manipulated here.
-
-Duplicate Calls shared between public.Client and this package:
-There is some duplicate call options provided here that are the same as in public.Client . This
-is a design choices. Go proverb(https://www.youtube.com/watch?v=PAAkCSZUG1c&t=9m28s):
-"a little copying is better than a little dependency". Yes, we could have another package with
-shared options (fail). That divides like 2 options from all others which makes the user look
-through more docs. We can have all clients in one package, but I think separate packages
-here makes for better naming (public.Client vs client.PublicClient). So I chose a little
-duplication.
-
-.Net People, Take note on X509:
-This uses x509.Certificates and private keys. x509 does not store private keys. .Net
-has a x509.Certificate2 abstraction that has private keys, but that just a strange invention.
-As such I've put a PEM decoder into here.
-*/
-
-// TODO(msal): This should have example code for each method on client using Go's example doc framework.
-// base usage details should be include in the package documentation.
-
-// AuthResult contains the results of one token acquisition operation.
-// For details see https://aka.ms/msal-net-authenticationresult
-type AuthResult = base.AuthResult
-
-type AuthenticationScheme = authority.AuthenticationScheme
-
-type Account = shared.Account
-
-// CertFromPEM converts a PEM file (.pem or .key) for use with [NewCredFromCert]. The file
-// must contain the public certificate and the private key. If a PEM block is encrypted and
-// password is not an empty string, it attempts to decrypt the PEM blocks using the password.
-// Multiple certs are due to certificate chaining for use cases like TLS that sign from root to leaf.
-func CertFromPEM(pemData []byte, password string) ([]*x509.Certificate, crypto.PrivateKey, error) {
- var certs []*x509.Certificate
- var priv crypto.PrivateKey
- for {
- block, rest := pem.Decode(pemData)
- if block == nil {
- break
- }
-
- //nolint:staticcheck // x509.IsEncryptedPEMBlock and x509.DecryptPEMBlock are deprecated. They are used here only to support a usecase.
- if x509.IsEncryptedPEMBlock(block) {
- b, err := x509.DecryptPEMBlock(block, []byte(password))
- if err != nil {
- return nil, nil, fmt.Errorf("could not decrypt encrypted PEM block: %v", err)
- }
- block, _ = pem.Decode(b)
- if block == nil {
- return nil, nil, fmt.Errorf("encounter encrypted PEM block that did not decode")
- }
- }
-
- switch block.Type {
- case "CERTIFICATE":
- cert, err := x509.ParseCertificate(block.Bytes)
- if err != nil {
- return nil, nil, fmt.Errorf("block labelled 'CERTIFICATE' could not be parsed by x509: %v", err)
- }
- certs = append(certs, cert)
- case "PRIVATE KEY":
- if priv != nil {
- return nil, nil, errors.New("found multiple private key blocks")
- }
-
- var err error
- priv, err = x509.ParsePKCS8PrivateKey(block.Bytes)
- if err != nil {
- return nil, nil, fmt.Errorf("could not decode private key: %v", err)
- }
- case "RSA PRIVATE KEY":
- if priv != nil {
- return nil, nil, errors.New("found multiple private key blocks")
- }
- var err error
- priv, err = x509.ParsePKCS1PrivateKey(block.Bytes)
- if err != nil {
- return nil, nil, fmt.Errorf("could not decode private key: %v", err)
- }
- }
- pemData = rest
- }
-
- if len(certs) == 0 {
- return nil, nil, fmt.Errorf("no certificates found")
- }
-
- if priv == nil {
- return nil, nil, fmt.Errorf("no private key found")
- }
-
- return certs, priv, nil
-}
-
-// AssertionRequestOptions has required information for client assertion claims
-type AssertionRequestOptions = exported.AssertionRequestOptions
-
-// Credential represents the credential used in confidential client flows.
-type Credential struct {
- secret string
-
- cert *x509.Certificate
- key crypto.PrivateKey
- x5c []string
-
- assertionCallback func(context.Context, AssertionRequestOptions) (string, error)
-
- tokenProvider func(context.Context, TokenProviderParameters) (TokenProviderResult, error)
-}
-
-// toInternal returns the accesstokens.Credential that is used internally. The current structure of the
-// code requires that client.go, requests.go and confidential.go share a credential type without
-// having import recursion. That requires the type used between is in a shared package. Therefore
-// we have this.
-func (c Credential) toInternal() (*accesstokens.Credential, error) {
- if c.secret != "" {
- return &accesstokens.Credential{Secret: c.secret}, nil
- }
- if c.cert != nil {
- if c.key == nil {
- return nil, errors.New("missing private key for certificate")
- }
- return &accesstokens.Credential{Cert: c.cert, Key: c.key, X5c: c.x5c}, nil
- }
- if c.key != nil {
- return nil, errors.New("missing certificate for private key")
- }
- if c.assertionCallback != nil {
- return &accesstokens.Credential{AssertionCallback: c.assertionCallback}, nil
- }
- if c.tokenProvider != nil {
- return &accesstokens.Credential{TokenProvider: c.tokenProvider}, nil
- }
- return nil, errors.New("invalid credential")
-}
-
-// NewCredFromSecret creates a Credential from a secret.
-func NewCredFromSecret(secret string) (Credential, error) {
- if secret == "" {
- return Credential{}, errors.New("secret can't be empty string")
- }
- return Credential{secret: secret}, nil
-}
-
-// NewCredFromAssertionCallback creates a Credential that invokes a callback to get assertions
-// authenticating the application. The callback must be thread safe.
-func NewCredFromAssertionCallback(callback func(context.Context, AssertionRequestOptions) (string, error)) Credential {
- return Credential{assertionCallback: callback}
-}
-
-// NewCredFromCert creates a Credential from a certificate or chain of certificates and an RSA private key
-// as returned by [CertFromPEM].
-func NewCredFromCert(certs []*x509.Certificate, key crypto.PrivateKey) (Credential, error) {
- cred := Credential{key: key}
- k, ok := key.(*rsa.PrivateKey)
- if !ok {
- return cred, errors.New("key must be an RSA key")
- }
- for _, cert := range certs {
- if cert == nil {
- // not returning an error here because certs may still contain a sufficient cert/key pair
- continue
- }
- certKey, ok := cert.PublicKey.(*rsa.PublicKey)
- if ok && k.E == certKey.E && k.N.Cmp(certKey.N) == 0 {
- // We know this is the signing cert because its public key matches the given private key.
- // This cert must be first in x5c.
- cred.cert = cert
- cred.x5c = append([]string{base64.StdEncoding.EncodeToString(cert.Raw)}, cred.x5c...)
- } else {
- cred.x5c = append(cred.x5c, base64.StdEncoding.EncodeToString(cert.Raw))
- }
- }
- if cred.cert == nil {
- return cred, errors.New("key doesn't match any certificate")
- }
- return cred, nil
-}
-
-// TokenProviderParameters is the authentication parameters passed to token providers
-type TokenProviderParameters = exported.TokenProviderParameters
-
-// TokenProviderResult is the authentication result returned by custom token providers
-type TokenProviderResult = exported.TokenProviderResult
-
-// NewCredFromTokenProvider creates a Credential from a function that provides access tokens. The function
-// must be concurrency safe. This is intended only to allow the Azure SDK to cache MSI tokens. It isn't
-// useful to applications in general because the token provider must implement all authentication logic.
-func NewCredFromTokenProvider(provider func(context.Context, TokenProviderParameters) (TokenProviderResult, error)) Credential {
- return Credential{tokenProvider: provider}
-}
-
-// AutoDetectRegion instructs MSAL Go to auto detect region for Azure regional token service.
-func AutoDetectRegion() string {
- return "TryAutoDetect"
-}
-
-// Client is a representation of authentication client for confidential applications as defined in the
-// package doc. A new Client should be created PER SERVICE USER.
-// For more information, visit https://docs.microsoft.com/azure/active-directory/develop/msal-client-applications
-type Client struct {
- base base.Client
- cred *accesstokens.Credential
-}
-
-// clientOptions are optional settings for New(). These options are set using various functions
-// returning Option calls.
-type clientOptions struct {
- accessor cache.ExportReplace
- authority, azureRegion string
- capabilities []string
- disableInstanceDiscovery, sendX5C bool
- httpClient ops.HTTPClient
-}
-
-// Option is an optional argument to New().
-type Option func(o *clientOptions)
-
-// WithCache provides an accessor that will read and write authentication data to an externally managed cache.
-func WithCache(accessor cache.ExportReplace) Option {
- return func(o *clientOptions) {
- o.accessor = accessor
- }
-}
-
-// WithClientCapabilities allows configuring one or more client capabilities such as "CP1"
-func WithClientCapabilities(capabilities []string) Option {
- return func(o *clientOptions) {
- // there's no danger of sharing the slice's underlying memory with the application because
- // this slice is simply passed to base.WithClientCapabilities, which copies its data
- o.capabilities = capabilities
- }
-}
-
-// WithHTTPClient allows for a custom HTTP client to be set.
-func WithHTTPClient(httpClient ops.HTTPClient) Option {
- return func(o *clientOptions) {
- o.httpClient = httpClient
- }
-}
-
-// WithX5C specifies if x5c claim(public key of the certificate) should be sent to STS to enable Subject Name Issuer Authentication.
-func WithX5C() Option {
- return func(o *clientOptions) {
- o.sendX5C = true
- }
-}
-
-// WithInstanceDiscovery set to false to disable authority validation (to support private cloud scenarios)
-func WithInstanceDiscovery(enabled bool) Option {
- return func(o *clientOptions) {
- o.disableInstanceDiscovery = !enabled
- }
-}
-
-// WithAzureRegion sets the region(preferred) or Confidential.AutoDetectRegion() for auto detecting region.
-// Region names as per https://azure.microsoft.com/en-ca/global-infrastructure/geographies/.
-// See https://aka.ms/region-map for more details on region names.
-// The region value should be short region name for the region where the service is deployed.
-// For example "centralus" is short name for region Central US.
-// Not all auth flows can use the regional token service.
-// Service To Service (client credential flow) tokens can be obtained from the regional service.
-// Requires configuration at the tenant level.
-// Auto-detection works on a limited number of Azure artifacts (VMs, Azure functions).
-// If auto-detection fails, the non-regional endpoint will be used.
-// If an invalid region name is provided, the non-regional endpoint MIGHT be used or the token request MIGHT fail.
-func WithAzureRegion(val string) Option {
- return func(o *clientOptions) {
- o.azureRegion = val
- }
-}
-
-// New is the constructor for Client. authority is the URL of a token authority such as "https://login.microsoftonline.com/".
-// If the Client will connect directly to AD FS, use "adfs" for the tenant. clientID is the application's client ID (also called its
-// "application ID").
-func New(authority, clientID string, cred Credential, options ...Option) (Client, error) {
- internalCred, err := cred.toInternal()
- if err != nil {
- return Client{}, err
- }
-
- opts := clientOptions{
- authority: authority,
- // if the caller specified a token provider, it will handle all details of authentication, using Client only as a token cache
- disableInstanceDiscovery: cred.tokenProvider != nil,
- httpClient: shared.DefaultClient,
- }
- for _, o := range options {
- o(&opts)
- }
- baseOpts := []base.Option{
- base.WithCacheAccessor(opts.accessor),
- base.WithClientCapabilities(opts.capabilities),
- base.WithInstanceDiscovery(!opts.disableInstanceDiscovery),
- base.WithRegionDetection(opts.azureRegion),
- base.WithX5C(opts.sendX5C),
- }
- base, err := base.New(clientID, opts.authority, oauth.New(opts.httpClient), baseOpts...)
- if err != nil {
- return Client{}, err
- }
- base.AuthParams.IsConfidentialClient = true
-
- return Client{base: base, cred: internalCred}, nil
-}
-
-// authCodeURLOptions contains options for AuthCodeURL
-type authCodeURLOptions struct {
- claims, loginHint, tenantID, domainHint string
-}
-
-// AuthCodeURLOption is implemented by options for AuthCodeURL
-type AuthCodeURLOption interface {
- authCodeURLOption()
-}
-
-// AuthCodeURL creates a URL used to acquire an authorization code. Users need to call CreateAuthorizationCodeURLParameters and pass it in.
-//
-// Options: [WithClaims], [WithDomainHint], [WithLoginHint], [WithTenantID]
-func (cca Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, opts ...AuthCodeURLOption) (string, error) {
- o := authCodeURLOptions{}
- if err := options.ApplyOptions(&o, opts); err != nil {
- return "", err
- }
- ap, err := cca.base.AuthParams.WithTenant(o.tenantID)
- if err != nil {
- return "", err
- }
- ap.Claims = o.claims
- ap.LoginHint = o.loginHint
- ap.DomainHint = o.domainHint
- return cca.base.AuthCodeURL(ctx, clientID, redirectURI, scopes, ap)
-}
-
-// WithLoginHint pre-populates the login prompt with a username.
-func WithLoginHint(username string) interface {
- AuthCodeURLOption
- options.CallOption
-} {
- return struct {
- AuthCodeURLOption
- options.CallOption
- }{
- CallOption: options.NewCallOption(
- func(a any) error {
- switch t := a.(type) {
- case *authCodeURLOptions:
- t.loginHint = username
- default:
- return fmt.Errorf("unexpected options type %T", a)
- }
- return nil
- },
- ),
- }
-}
-
-// WithDomainHint adds the IdP domain as domain_hint query parameter in the auth url.
-func WithDomainHint(domain string) interface {
- AuthCodeURLOption
- options.CallOption
-} {
- return struct {
- AuthCodeURLOption
- options.CallOption
- }{
- CallOption: options.NewCallOption(
- func(a any) error {
- switch t := a.(type) {
- case *authCodeURLOptions:
- t.domainHint = domain
- default:
- return fmt.Errorf("unexpected options type %T", a)
- }
- return nil
- },
- ),
- }
-}
-
-// WithClaims sets additional claims to request for the token, such as those required by conditional access policies.
-// Use this option when Azure AD returned a claims challenge for a prior request. The argument must be decoded.
-// This option is valid for any token acquisition method.
-func WithClaims(claims string) interface {
- AcquireByAuthCodeOption
- AcquireByCredentialOption
- AcquireOnBehalfOfOption
- AcquireSilentOption
- AuthCodeURLOption
- options.CallOption
-} {
- return struct {
- AcquireByAuthCodeOption
- AcquireByCredentialOption
- AcquireOnBehalfOfOption
- AcquireSilentOption
- AuthCodeURLOption
- options.CallOption
- }{
- CallOption: options.NewCallOption(
- func(a any) error {
- switch t := a.(type) {
- case *acquireTokenByAuthCodeOptions:
- t.claims = claims
- case *acquireTokenByCredentialOptions:
- t.claims = claims
- case *acquireTokenOnBehalfOfOptions:
- t.claims = claims
- case *acquireTokenSilentOptions:
- t.claims = claims
- case *authCodeURLOptions:
- t.claims = claims
- default:
- return fmt.Errorf("unexpected options type %T", a)
- }
- return nil
- },
- ),
- }
-}
-
-// WithAuthenticationScheme is an extensibility mechanism designed to be used only by Azure Arc for proof of possession access tokens.
-func WithAuthenticationScheme(authnScheme AuthenticationScheme) interface {
- AcquireSilentOption
- AcquireByCredentialOption
- options.CallOption
-} {
- return struct {
- AcquireSilentOption
- AcquireByCredentialOption
- options.CallOption
- }{
- CallOption: options.NewCallOption(
- func(a any) error {
- switch t := a.(type) {
- case *acquireTokenSilentOptions:
- t.authnScheme = authnScheme
- case *acquireTokenByCredentialOptions:
- t.authnScheme = authnScheme
- default:
- return fmt.Errorf("unexpected options type %T", a)
- }
- return nil
- },
- ),
- }
-}
-
-// WithTenantID specifies a tenant for a single authentication. It may be different than the tenant set in [New].
-// This option is valid for any token acquisition method.
-func WithTenantID(tenantID string) interface {
- AcquireByAuthCodeOption
- AcquireByCredentialOption
- AcquireOnBehalfOfOption
- AcquireSilentOption
- AuthCodeURLOption
- options.CallOption
-} {
- return struct {
- AcquireByAuthCodeOption
- AcquireByCredentialOption
- AcquireOnBehalfOfOption
- AcquireSilentOption
- AuthCodeURLOption
- options.CallOption
- }{
- CallOption: options.NewCallOption(
- func(a any) error {
- switch t := a.(type) {
- case *acquireTokenByAuthCodeOptions:
- t.tenantID = tenantID
- case *acquireTokenByCredentialOptions:
- t.tenantID = tenantID
- case *acquireTokenOnBehalfOfOptions:
- t.tenantID = tenantID
- case *acquireTokenSilentOptions:
- t.tenantID = tenantID
- case *authCodeURLOptions:
- t.tenantID = tenantID
- default:
- return fmt.Errorf("unexpected options type %T", a)
- }
- return nil
- },
- ),
- }
-}
-
-// acquireTokenSilentOptions are all the optional settings to an AcquireTokenSilent() call.
-// These are set by using various AcquireTokenSilentOption functions.
-type acquireTokenSilentOptions struct {
- account Account
- claims, tenantID string
- authnScheme AuthenticationScheme
-}
-
-// AcquireSilentOption is implemented by options for AcquireTokenSilent
-type AcquireSilentOption interface {
- acquireSilentOption()
-}
-
-// WithSilentAccount uses the passed account during an AcquireTokenSilent() call.
-func WithSilentAccount(account Account) interface {
- AcquireSilentOption
- options.CallOption
-} {
- return struct {
- AcquireSilentOption
- options.CallOption
- }{
- CallOption: options.NewCallOption(
- func(a any) error {
- switch t := a.(type) {
- case *acquireTokenSilentOptions:
- t.account = account
- default:
- return fmt.Errorf("unexpected options type %T", a)
- }
- return nil
- },
- ),
- }
-}
-
-// AcquireTokenSilent acquires a token from either the cache or using a refresh token.
-//
-// Options: [WithClaims], [WithSilentAccount], [WithTenantID]
-func (cca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts ...AcquireSilentOption) (AuthResult, error) {
- o := acquireTokenSilentOptions{}
- if err := options.ApplyOptions(&o, opts); err != nil {
- return AuthResult{}, err
- }
-
- if o.claims != "" {
- return AuthResult{}, errors.New("call another AcquireToken method to request a new token having these claims")
- }
-
- silentParameters := base.AcquireTokenSilentParameters{
- Scopes: scopes,
- Account: o.account,
- RequestType: accesstokens.ATConfidential,
- Credential: cca.cred,
- IsAppCache: o.account.IsZero(),
- TenantID: o.tenantID,
- AuthnScheme: o.authnScheme,
- }
-
- return cca.base.AcquireTokenSilent(ctx, silentParameters)
-}
-
-// acquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow.
-type acquireTokenByAuthCodeOptions struct {
- challenge, claims, tenantID string
-}
-
-// AcquireByAuthCodeOption is implemented by options for AcquireTokenByAuthCode
-type AcquireByAuthCodeOption interface {
- acquireByAuthCodeOption()
-}
-
-// WithChallenge allows you to provide a challenge for the .AcquireTokenByAuthCode() call.
-func WithChallenge(challenge string) interface {
- AcquireByAuthCodeOption
- options.CallOption
-} {
- return struct {
- AcquireByAuthCodeOption
- options.CallOption
- }{
- CallOption: options.NewCallOption(
- func(a any) error {
- switch t := a.(type) {
- case *acquireTokenByAuthCodeOptions:
- t.challenge = challenge
- default:
- return fmt.Errorf("unexpected options type %T", a)
- }
- return nil
- },
- ),
- }
-}
-
-// AcquireTokenByAuthCode is a request to acquire a security token from the authority, using an authorization code.
-// The specified redirect URI must be the same URI that was used when the authorization code was requested.
-//
-// Options: [WithChallenge], [WithClaims], [WithTenantID]
-func (cca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, opts ...AcquireByAuthCodeOption) (AuthResult, error) {
- o := acquireTokenByAuthCodeOptions{}
- if err := options.ApplyOptions(&o, opts); err != nil {
- return AuthResult{}, err
- }
-
- params := base.AcquireTokenAuthCodeParameters{
- Scopes: scopes,
- Code: code,
- Challenge: o.challenge,
- Claims: o.claims,
- AppType: accesstokens.ATConfidential,
- Credential: cca.cred, // This setting differs from public.Client.AcquireTokenByAuthCode
- RedirectURI: redirectURI,
- TenantID: o.tenantID,
- }
-
- return cca.base.AcquireTokenByAuthCode(ctx, params)
-}
-
-// acquireTokenByCredentialOptions contains optional configuration for AcquireTokenByCredential
-type acquireTokenByCredentialOptions struct {
- claims, tenantID string
- authnScheme AuthenticationScheme
-}
-
-// AcquireByCredentialOption is implemented by options for AcquireTokenByCredential
-type AcquireByCredentialOption interface {
- acquireByCredOption()
-}
-
-// AcquireTokenByCredential acquires a security token from the authority, using the client credentials grant.
-//
-// Options: [WithClaims], [WithTenantID]
-func (cca Client) AcquireTokenByCredential(ctx context.Context, scopes []string, opts ...AcquireByCredentialOption) (AuthResult, error) {
- o := acquireTokenByCredentialOptions{}
- err := options.ApplyOptions(&o, opts)
- if err != nil {
- return AuthResult{}, err
- }
- authParams, err := cca.base.AuthParams.WithTenant(o.tenantID)
- if err != nil {
- return AuthResult{}, err
- }
- authParams.Scopes = scopes
- authParams.AuthorizationType = authority.ATClientCredentials
- authParams.Claims = o.claims
- if o.authnScheme != nil {
- authParams.AuthnScheme = o.authnScheme
- }
- token, err := cca.base.Token.Credential(ctx, authParams, cca.cred)
- if err != nil {
- return AuthResult{}, err
- }
- return cca.base.AuthResultFromToken(ctx, authParams, token, true)
-}
-
-// acquireTokenOnBehalfOfOptions contains optional configuration for AcquireTokenOnBehalfOf
-type acquireTokenOnBehalfOfOptions struct {
- claims, tenantID string
-}
-
-// AcquireOnBehalfOfOption is implemented by options for AcquireTokenOnBehalfOf
-type AcquireOnBehalfOfOption interface {
- acquireOBOOption()
-}
-
-// AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token.
-// Refer https://docs.microsoft.com/en-us/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow.
-//
-// Options: [WithClaims], [WithTenantID]
-func (cca Client) AcquireTokenOnBehalfOf(ctx context.Context, userAssertion string, scopes []string, opts ...AcquireOnBehalfOfOption) (AuthResult, error) {
- o := acquireTokenOnBehalfOfOptions{}
- if err := options.ApplyOptions(&o, opts); err != nil {
- return AuthResult{}, err
- }
- params := base.AcquireTokenOnBehalfOfParameters{
- Scopes: scopes,
- UserAssertion: userAssertion,
- Claims: o.claims,
- Credential: cca.cred,
- TenantID: o.tenantID,
- }
- return cca.base.AcquireTokenOnBehalfOf(ctx, params)
-}
-
-// Account gets the account in the token cache with the specified homeAccountID.
-func (cca Client) Account(ctx context.Context, accountID string) (Account, error) {
- return cca.base.Account(ctx, accountID)
-}
-
-// RemoveAccount signs the account out and forgets account from token cache.
-func (cca Client) RemoveAccount(ctx context.Context, account Account) error {
- return cca.base.RemoveAccount(ctx, account)
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md
deleted file mode 100644
index 7ef7862f..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/error_design.md
+++ /dev/null
@@ -1,111 +0,0 @@
-# MSAL Error Design
-
-Author: Abhidnya Patil(abhidnya.patil@microsoft.com)
-
-Contributors:
-
-- John Doak(jdoak@microsoft.com)
-- Keegan Caruso(Keegan.Caruso@microsoft.com)
-- Joel Hendrix(jhendrix@microsoft.com)
-
-## Background
-
-Errors in MSAL are intended for app developers to troubleshoot and not for displaying to end-users.
-
-### Go error handling vs other MSAL languages
-
-Most modern languages use exception based errors. Simply put, you "throw" an exception and it must be caught at some routine in the upper stack or it will eventually crash the program.
-
-Go doesn't use exceptions, instead it relies on multiple return values, one of which can be the builtin error interface type. It is up to the user to decide what to do.
-
-### Go custom error types
-
-Errors can be created in Go by simply using errors.New() or fmt.Errorf() to create an "error".
-
-Custom errors can be created in multiple ways. One of the more robust ways is simply to satisfy the error interface:
-
-```go
-type MyCustomErr struct {
- Msg string
-}
-func (m MyCustomErr) Error() string { // This implements "error"
- return m.Msg
-}
-```
-
-### MSAL Error Goals
-
-- Provide diagnostics to the user and for tickets that can be used to track down bugs or client misconfigurations
-- Detect errors that are transitory and can be retried
-- Allow the user to identify certain errors that the program can respond to, such a informing the user for the need to do an enrollment
-
-## Implementing Client Side Errors
-
-Client side errors indicate a misconfiguration or passing of bad arguments that is non-recoverable. Retrying isn't possible.
-
-These errors can simply be standard Go errors created by errors.New() or fmt.Errorf(). If down the line we need a custom error, we can introduce it, but for now the error messages just need to be clear on what the issue was.
-
-## Implementing Service Side Errors
-
-Service side errors occur when an external RPC responds either with an HTTP error code or returns a message that includes an error.
-
-These errors can be transitory (please slow down) or permanent (HTTP 404). To provide our diagnostic goals, we require the ability to differentiate these errors from other errors.
-
-The current implementation includes a specialized type that captures any error from the server:
-
-```go
-// CallErr represents an HTTP call error. Has a Verbose() method that allows getting the
-// http.Request and Response objects. Implements error.
-type CallErr struct {
- Req *http.Request
- Resp *http.Response
- Err error
-}
-
-// Errors implements error.Error().
-func (e CallErr) Error() string {
- return e.Err.Error()
-}
-
-// Verbose prints a versbose error message with the request or response.
-func (e CallErr) Verbose() string {
- e.Resp.Request = nil // This brings in a bunch of TLS stuff we don't need
- e.Resp.TLS = nil // Same
- return fmt.Sprintf("%s:\nRequest:\n%s\nResponse:\n%s", e.Err, prettyConf.Sprint(e.Req), prettyConf.Sprint(e.Resp))
-}
-```
-
-A user will always receive the most concise error we provide. They can tell if it is a server side error using Go error package:
-
-```go
-var callErr CallErr
-if errors.As(err, &callErr) {
- ...
-}
-```
-
-We provide a Verbose() function that can retrieve the most verbose message from any error we provide:
-
-```go
-fmt.Println(errors.Verbose(err))
-```
-
-If further differentiation is required, we can add custom errors that use Go error wrapping on top of CallErr to achieve our diagnostic goals (such as detecting when to retry a call due to transient errors).
-
-CallErr is always thrown from the comm package (which handles all http requests) and looks similar to:
-
-```go
-return nil, errors.CallErr{
- Req: req,
- Resp: reply,
- Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s", req.URL.String(), req.Method, reply.StatusCode, ErrorResponse), //ErrorResponse is the json body extracted from the http response
- }
-```
-
-## Future Decisions
-
-The ability to retry calls needs to have centralized responsibility. Either the user is doing it or the client is doing it.
-
-If the user should be responsible, our errors package will include a CanRetry() function that will inform the user if the error provided to them is retryable. This is based on the http error code and possibly the type of error that was returned. It would also include a sleep time if the server returned an amount of time to wait.
-
-Otherwise we will do this internally and retries will be left to us.
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go
deleted file mode 100644
index c9b8dbed..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-package errors
-
-import (
- "errors"
- "fmt"
- "io"
- "net/http"
- "reflect"
- "strings"
-
- "github.com/kylelemons/godebug/pretty"
-)
-
-var prettyConf = &pretty.Config{
- IncludeUnexported: false,
- SkipZeroFields: true,
- TrackCycles: true,
- Formatter: map[reflect.Type]interface{}{
- reflect.TypeOf((*io.Reader)(nil)).Elem(): func(r io.Reader) string {
- b, err := io.ReadAll(r)
- if err != nil {
- return "could not read io.Reader content"
- }
- return string(b)
- },
- },
-}
-
-type verboser interface {
- Verbose() string
-}
-
-// Verbose prints the most verbose error that the error message has.
-func Verbose(err error) string {
- build := strings.Builder{}
- for {
- if err == nil {
- break
- }
- if v, ok := err.(verboser); ok {
- build.WriteString(v.Verbose())
- } else {
- build.WriteString(err.Error())
- }
- err = errors.Unwrap(err)
- }
- return build.String()
-}
-
-// New is equivalent to errors.New().
-func New(text string) error {
- return errors.New(text)
-}
-
-// CallErr represents an HTTP call error. Has a Verbose() method that allows getting the
-// http.Request and Response objects. Implements error.
-type CallErr struct {
- Req *http.Request
- // Resp contains response body
- Resp *http.Response
- Err error
-}
-
-// Errors implements error.Error().
-func (e CallErr) Error() string {
- return e.Err.Error()
-}
-
-// Verbose prints a versbose error message with the request or response.
-func (e CallErr) Verbose() string {
- e.Resp.Request = nil // This brings in a bunch of TLS crap we don't need
- e.Resp.TLS = nil // Same
- return fmt.Sprintf("%s:\nRequest:\n%s\nResponse:\n%s", e.Err, prettyConf.Sprint(e.Req), prettyConf.Sprint(e.Resp))
-}
-
-// Is reports whether any error in errors chain matches target.
-func Is(err, target error) bool {
- return errors.Is(err, target)
-}
-
-// As finds the first error in errors chain that matches target,
-// and if so, sets target to that error value and returns true.
-// Otherwise, it returns false.
-func As(err error, target interface{}) bool {
- return errors.As(err, target)
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go
deleted file mode 100644
index 09a0d92f..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go
+++ /dev/null
@@ -1,477 +0,0 @@
-// Package base contains a "Base" client that is used by the external public.Client and confidential.Client.
-// Base holds shared attributes that must be available to both clients and methods that act as
-// shared calls.
-package base
-
-import (
- "context"
- "errors"
- "fmt"
- "net/url"
- "reflect"
- "strings"
- "sync"
- "time"
-
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared"
-)
-
-const (
- // AuthorityPublicCloud is the default AAD authority host
- AuthorityPublicCloud = "https://login.microsoftonline.com/common"
- scopeSeparator = " "
-)
-
-// manager provides an internal cache. It is defined to allow faking the cache in tests.
-// In production it's a *storage.Manager or *storage.PartitionedManager.
-type manager interface {
- cache.Serializer
- Read(context.Context, authority.AuthParams) (storage.TokenResponse, error)
- Write(authority.AuthParams, accesstokens.TokenResponse) (shared.Account, error)
-}
-
-// accountManager is a manager that also caches accounts. In production it's a *storage.Manager.
-type accountManager interface {
- manager
- AllAccounts() []shared.Account
- Account(homeAccountID string) shared.Account
- RemoveAccount(account shared.Account, clientID string)
-}
-
-// AcquireTokenSilentParameters contains the parameters to acquire a token silently (from cache).
-type AcquireTokenSilentParameters struct {
- Scopes []string
- Account shared.Account
- RequestType accesstokens.AppType
- Credential *accesstokens.Credential
- IsAppCache bool
- TenantID string
- UserAssertion string
- AuthorizationType authority.AuthorizeType
- Claims string
- AuthnScheme authority.AuthenticationScheme
-}
-
-// AcquireTokenAuthCodeParameters contains the parameters required to acquire an access token using the auth code flow.
-// To use PKCE, set the CodeChallengeParameter.
-// Code challenges are used to secure authorization code grants; for more information, visit
-// https://tools.ietf.org/html/rfc7636.
-type AcquireTokenAuthCodeParameters struct {
- Scopes []string
- Code string
- Challenge string
- Claims string
- RedirectURI string
- AppType accesstokens.AppType
- Credential *accesstokens.Credential
- TenantID string
-}
-
-type AcquireTokenOnBehalfOfParameters struct {
- Scopes []string
- Claims string
- Credential *accesstokens.Credential
- TenantID string
- UserAssertion string
-}
-
-// AuthResult contains the results of one token acquisition operation in PublicClientApplication
-// or ConfidentialClientApplication. For details see https://aka.ms/msal-net-authenticationresult
-type AuthResult struct {
- Account shared.Account
- IDToken accesstokens.IDToken
- AccessToken string
- ExpiresOn time.Time
- GrantedScopes []string
- DeclinedScopes []string
-}
-
-// AuthResultFromStorage creates an AuthResult from a storage token response (which is generated from the cache).
-func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResult, error) {
- if err := storageTokenResponse.AccessToken.Validate(); err != nil {
- return AuthResult{}, fmt.Errorf("problem with access token in StorageTokenResponse: %w", err)
- }
-
- account := storageTokenResponse.Account
- accessToken := storageTokenResponse.AccessToken.Secret
- grantedScopes := strings.Split(storageTokenResponse.AccessToken.Scopes, scopeSeparator)
-
- // Checking if there was an ID token in the cache; this will throw an error in the case of confidential client applications.
- var idToken accesstokens.IDToken
- if !storageTokenResponse.IDToken.IsZero() {
- err := idToken.UnmarshalJSON([]byte(storageTokenResponse.IDToken.Secret))
- if err != nil {
- return AuthResult{}, fmt.Errorf("problem decoding JWT token: %w", err)
- }
- }
- return AuthResult{account, idToken, accessToken, storageTokenResponse.AccessToken.ExpiresOn.T, grantedScopes, nil}, nil
-}
-
-// NewAuthResult creates an AuthResult.
-func NewAuthResult(tokenResponse accesstokens.TokenResponse, account shared.Account) (AuthResult, error) {
- if len(tokenResponse.DeclinedScopes) > 0 {
- return AuthResult{}, fmt.Errorf("token response failed because declined scopes are present: %s", strings.Join(tokenResponse.DeclinedScopes, ","))
- }
- return AuthResult{
- Account: account,
- IDToken: tokenResponse.IDToken,
- AccessToken: tokenResponse.AccessToken,
- ExpiresOn: tokenResponse.ExpiresOn.T,
- GrantedScopes: tokenResponse.GrantedScopes.Slice,
- }, nil
-}
-
-// Client is a base client that provides access to common methods and primatives that
-// can be used by multiple clients.
-type Client struct {
- Token *oauth.Client
- manager accountManager // *storage.Manager or fakeManager in tests
- // pmanager is a partitioned cache for OBO authentication. *storage.PartitionedManager or fakeManager in tests
- pmanager manager
-
- AuthParams authority.AuthParams // DO NOT EVER MAKE THIS A POINTER! See "Note" in New().
- cacheAccessor cache.ExportReplace
- cacheAccessorMu *sync.RWMutex
-}
-
-// Option is an optional argument to the New constructor.
-type Option func(c *Client) error
-
-// WithCacheAccessor allows you to set some type of cache for storing authentication tokens.
-func WithCacheAccessor(ca cache.ExportReplace) Option {
- return func(c *Client) error {
- if ca != nil {
- c.cacheAccessor = ca
- }
- return nil
- }
-}
-
-// WithClientCapabilities allows configuring one or more client capabilities such as "CP1"
-func WithClientCapabilities(capabilities []string) Option {
- return func(c *Client) error {
- var err error
- if len(capabilities) > 0 {
- cc, err := authority.NewClientCapabilities(capabilities)
- if err == nil {
- c.AuthParams.Capabilities = cc
- }
- }
- return err
- }
-}
-
-// WithKnownAuthorityHosts specifies hosts Client shouldn't validate or request metadata for because they're known to the user
-func WithKnownAuthorityHosts(hosts []string) Option {
- return func(c *Client) error {
- cp := make([]string, len(hosts))
- copy(cp, hosts)
- c.AuthParams.KnownAuthorityHosts = cp
- return nil
- }
-}
-
-// WithX5C specifies if x5c claim(public key of the certificate) should be sent to STS to enable Subject Name Issuer Authentication.
-func WithX5C(sendX5C bool) Option {
- return func(c *Client) error {
- c.AuthParams.SendX5C = sendX5C
- return nil
- }
-}
-
-func WithRegionDetection(region string) Option {
- return func(c *Client) error {
- c.AuthParams.AuthorityInfo.Region = region
- return nil
- }
-}
-
-func WithInstanceDiscovery(instanceDiscoveryEnabled bool) Option {
- return func(c *Client) error {
- c.AuthParams.AuthorityInfo.ValidateAuthority = instanceDiscoveryEnabled
- c.AuthParams.AuthorityInfo.InstanceDiscoveryDisabled = !instanceDiscoveryEnabled
- return nil
- }
-}
-
-// New is the constructor for Base.
-func New(clientID string, authorityURI string, token *oauth.Client, options ...Option) (Client, error) {
- //By default, validateAuthority is set to true and instanceDiscoveryDisabled is set to false
- authInfo, err := authority.NewInfoFromAuthorityURI(authorityURI, true, false)
- if err != nil {
- return Client{}, err
- }
- authParams := authority.NewAuthParams(clientID, authInfo)
- client := Client{ // Note: Hey, don't even THINK about making Base into *Base. See "design notes" in public.go and confidential.go
- Token: token,
- AuthParams: authParams,
- cacheAccessorMu: &sync.RWMutex{},
- manager: storage.New(token),
- pmanager: storage.NewPartitionedManager(token),
- }
- for _, o := range options {
- if err = o(&client); err != nil {
- break
- }
- }
- return client, err
-
-}
-
-// AuthCodeURL creates a URL used to acquire an authorization code.
-func (b Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, authParams authority.AuthParams) (string, error) {
- endpoints, err := b.Token.ResolveEndpoints(ctx, authParams.AuthorityInfo, "")
- if err != nil {
- return "", err
- }
-
- baseURL, err := url.Parse(endpoints.AuthorizationEndpoint)
- if err != nil {
- return "", err
- }
-
- claims, err := authParams.MergeCapabilitiesAndClaims()
- if err != nil {
- return "", err
- }
-
- v := url.Values{}
- v.Add("client_id", clientID)
- v.Add("response_type", "code")
- v.Add("redirect_uri", redirectURI)
- v.Add("scope", strings.Join(scopes, scopeSeparator))
- if authParams.State != "" {
- v.Add("state", authParams.State)
- }
- if claims != "" {
- v.Add("claims", claims)
- }
- if authParams.CodeChallenge != "" {
- v.Add("code_challenge", authParams.CodeChallenge)
- }
- if authParams.CodeChallengeMethod != "" {
- v.Add("code_challenge_method", authParams.CodeChallengeMethod)
- }
- if authParams.LoginHint != "" {
- v.Add("login_hint", authParams.LoginHint)
- }
- if authParams.Prompt != "" {
- v.Add("prompt", authParams.Prompt)
- }
- if authParams.DomainHint != "" {
- v.Add("domain_hint", authParams.DomainHint)
- }
- // There were left over from an implementation that didn't use any of these. We may
- // need to add them later, but as of now aren't needed.
- /*
- if p.ResponseMode != "" {
- urlParams.Add("response_mode", p.ResponseMode)
- }
- */
- baseURL.RawQuery = v.Encode()
- return baseURL.String(), nil
-}
-
-func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilentParameters) (AuthResult, error) {
- ar := AuthResult{}
- // when tenant == "", the caller didn't specify a tenant and WithTenant will choose the client's configured tenant
- tenant := silent.TenantID
- authParams, err := b.AuthParams.WithTenant(tenant)
- if err != nil {
- return ar, err
- }
- authParams.Scopes = silent.Scopes
- authParams.HomeAccountID = silent.Account.HomeAccountID
- authParams.AuthorizationType = silent.AuthorizationType
- authParams.Claims = silent.Claims
- authParams.UserAssertion = silent.UserAssertion
- if silent.AuthnScheme != nil {
- authParams.AuthnScheme = silent.AuthnScheme
- }
-
- m := b.pmanager
- if authParams.AuthorizationType != authority.ATOnBehalfOf {
- authParams.AuthorizationType = authority.ATRefreshToken
- m = b.manager
- }
- if b.cacheAccessor != nil {
- key := authParams.CacheKey(silent.IsAppCache)
- b.cacheAccessorMu.RLock()
- err = b.cacheAccessor.Replace(ctx, m, cache.ReplaceHints{PartitionKey: key})
- b.cacheAccessorMu.RUnlock()
- }
- if err != nil {
- return ar, err
- }
- storageTokenResponse, err := m.Read(ctx, authParams)
- if err != nil {
- return ar, err
- }
-
- // ignore cached access tokens when given claims
- if silent.Claims == "" {
- ar, err = AuthResultFromStorage(storageTokenResponse)
- if err == nil {
- ar.AccessToken, err = authParams.AuthnScheme.FormatAccessToken(ar.AccessToken)
- return ar, err
- }
- }
-
- // redeem a cached refresh token, if available
- if reflect.ValueOf(storageTokenResponse.RefreshToken).IsZero() {
- return ar, errors.New("no token found")
- }
- var cc *accesstokens.Credential
- if silent.RequestType == accesstokens.ATConfidential {
- cc = silent.Credential
- }
- token, err := b.Token.Refresh(ctx, silent.RequestType, authParams, cc, storageTokenResponse.RefreshToken)
- if err != nil {
- return ar, err
- }
- return b.AuthResultFromToken(ctx, authParams, token, true)
-}
-
-func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams AcquireTokenAuthCodeParameters) (AuthResult, error) {
- authParams, err := b.AuthParams.WithTenant(authCodeParams.TenantID)
- if err != nil {
- return AuthResult{}, err
- }
- authParams.Claims = authCodeParams.Claims
- authParams.Scopes = authCodeParams.Scopes
- authParams.Redirecturi = authCodeParams.RedirectURI
- authParams.AuthorizationType = authority.ATAuthCode
-
- var cc *accesstokens.Credential
- if authCodeParams.AppType == accesstokens.ATConfidential {
- cc = authCodeParams.Credential
- authParams.IsConfidentialClient = true
- }
-
- req, err := accesstokens.NewCodeChallengeRequest(authParams, authCodeParams.AppType, cc, authCodeParams.Code, authCodeParams.Challenge)
- if err != nil {
- return AuthResult{}, err
- }
-
- token, err := b.Token.AuthCode(ctx, req)
- if err != nil {
- return AuthResult{}, err
- }
-
- return b.AuthResultFromToken(ctx, authParams, token, true)
-}
-
-// AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token.
-func (b Client) AcquireTokenOnBehalfOf(ctx context.Context, onBehalfOfParams AcquireTokenOnBehalfOfParameters) (AuthResult, error) {
- var ar AuthResult
- silentParameters := AcquireTokenSilentParameters{
- Scopes: onBehalfOfParams.Scopes,
- RequestType: accesstokens.ATConfidential,
- Credential: onBehalfOfParams.Credential,
- UserAssertion: onBehalfOfParams.UserAssertion,
- AuthorizationType: authority.ATOnBehalfOf,
- TenantID: onBehalfOfParams.TenantID,
- Claims: onBehalfOfParams.Claims,
- }
- ar, err := b.AcquireTokenSilent(ctx, silentParameters)
- if err == nil {
- return ar, err
- }
- authParams, err := b.AuthParams.WithTenant(onBehalfOfParams.TenantID)
- if err != nil {
- return AuthResult{}, err
- }
- authParams.AuthorizationType = authority.ATOnBehalfOf
- authParams.Claims = onBehalfOfParams.Claims
- authParams.Scopes = onBehalfOfParams.Scopes
- authParams.UserAssertion = onBehalfOfParams.UserAssertion
- token, err := b.Token.OnBehalfOf(ctx, authParams, onBehalfOfParams.Credential)
- if err == nil {
- ar, err = b.AuthResultFromToken(ctx, authParams, token, true)
- }
- return ar, err
-}
-
-func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse, cacheWrite bool) (AuthResult, error) {
- if !cacheWrite {
- return NewAuthResult(token, shared.Account{})
- }
- var m manager = b.manager
- if authParams.AuthorizationType == authority.ATOnBehalfOf {
- m = b.pmanager
- }
- key := token.CacheKey(authParams)
- if b.cacheAccessor != nil {
- b.cacheAccessorMu.Lock()
- defer b.cacheAccessorMu.Unlock()
- err := b.cacheAccessor.Replace(ctx, m, cache.ReplaceHints{PartitionKey: key})
- if err != nil {
- return AuthResult{}, err
- }
- }
- account, err := m.Write(authParams, token)
- if err != nil {
- return AuthResult{}, err
- }
- ar, err := NewAuthResult(token, account)
- if err == nil && b.cacheAccessor != nil {
- err = b.cacheAccessor.Export(ctx, b.manager, cache.ExportHints{PartitionKey: key})
- }
- if err != nil {
- return AuthResult{}, err
- }
-
- ar.AccessToken, err = authParams.AuthnScheme.FormatAccessToken(ar.AccessToken)
- return ar, err
-}
-
-func (b Client) AllAccounts(ctx context.Context) ([]shared.Account, error) {
- if b.cacheAccessor != nil {
- b.cacheAccessorMu.RLock()
- defer b.cacheAccessorMu.RUnlock()
- key := b.AuthParams.CacheKey(false)
- err := b.cacheAccessor.Replace(ctx, b.manager, cache.ReplaceHints{PartitionKey: key})
- if err != nil {
- return nil, err
- }
- }
- return b.manager.AllAccounts(), nil
-}
-
-func (b Client) Account(ctx context.Context, homeAccountID string) (shared.Account, error) {
- if b.cacheAccessor != nil {
- b.cacheAccessorMu.RLock()
- defer b.cacheAccessorMu.RUnlock()
- authParams := b.AuthParams // This is a copy, as we don't have a pointer receiver and .AuthParams is not a pointer.
- authParams.AuthorizationType = authority.AccountByID
- authParams.HomeAccountID = homeAccountID
- key := b.AuthParams.CacheKey(false)
- err := b.cacheAccessor.Replace(ctx, b.manager, cache.ReplaceHints{PartitionKey: key})
- if err != nil {
- return shared.Account{}, err
- }
- }
- return b.manager.Account(homeAccountID), nil
-}
-
-// RemoveAccount removes all the ATs, RTs and IDTs from the cache associated with this account.
-func (b Client) RemoveAccount(ctx context.Context, account shared.Account) error {
- if b.cacheAccessor == nil {
- b.manager.RemoveAccount(account, b.AuthParams.ClientID)
- return nil
- }
- b.cacheAccessorMu.Lock()
- defer b.cacheAccessorMu.Unlock()
- key := b.AuthParams.CacheKey(false)
- err := b.cacheAccessor.Replace(ctx, b.manager, cache.ReplaceHints{PartitionKey: key})
- if err != nil {
- return err
- }
- b.manager.RemoveAccount(account, b.AuthParams.ClientID)
- return b.cacheAccessor.Export(ctx, b.manager, cache.ExportHints{PartitionKey: key})
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go
deleted file mode 100644
index f9be9027..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go
+++ /dev/null
@@ -1,213 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-package storage
-
-import (
- "errors"
- "fmt"
- "reflect"
- "strings"
- "time"
-
- internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared"
-)
-
-// Contract is the JSON structure that is written to any storage medium when serializing
-// the internal cache. This design is shared between MSAL versions in many languages.
-// This cannot be changed without design that includes other SDKs.
-type Contract struct {
- AccessTokens map[string]AccessToken `json:"AccessToken,omitempty"`
- RefreshTokens map[string]accesstokens.RefreshToken `json:"RefreshToken,omitempty"`
- IDTokens map[string]IDToken `json:"IdToken,omitempty"`
- Accounts map[string]shared.Account `json:"Account,omitempty"`
- AppMetaData map[string]AppMetaData `json:"AppMetadata,omitempty"`
-
- AdditionalFields map[string]interface{}
-}
-
-// Contract is the JSON structure that is written to any storage medium when serializing
-// the internal cache. This design is shared between MSAL versions in many languages.
-// This cannot be changed without design that includes other SDKs.
-type InMemoryContract struct {
- AccessTokensPartition map[string]map[string]AccessToken
- RefreshTokensPartition map[string]map[string]accesstokens.RefreshToken
- IDTokensPartition map[string]map[string]IDToken
- AccountsPartition map[string]map[string]shared.Account
- AppMetaData map[string]AppMetaData
-}
-
-// NewContract is the constructor for Contract.
-func NewInMemoryContract() *InMemoryContract {
- return &InMemoryContract{
- AccessTokensPartition: map[string]map[string]AccessToken{},
- RefreshTokensPartition: map[string]map[string]accesstokens.RefreshToken{},
- IDTokensPartition: map[string]map[string]IDToken{},
- AccountsPartition: map[string]map[string]shared.Account{},
- AppMetaData: map[string]AppMetaData{},
- }
-}
-
-// NewContract is the constructor for Contract.
-func NewContract() *Contract {
- return &Contract{
- AccessTokens: map[string]AccessToken{},
- RefreshTokens: map[string]accesstokens.RefreshToken{},
- IDTokens: map[string]IDToken{},
- Accounts: map[string]shared.Account{},
- AppMetaData: map[string]AppMetaData{},
- AdditionalFields: map[string]interface{}{},
- }
-}
-
-// AccessToken is the JSON representation of a MSAL access token for encoding to storage.
-type AccessToken struct {
- HomeAccountID string `json:"home_account_id,omitempty"`
- Environment string `json:"environment,omitempty"`
- Realm string `json:"realm,omitempty"`
- CredentialType string `json:"credential_type,omitempty"`
- ClientID string `json:"client_id,omitempty"`
- Secret string `json:"secret,omitempty"`
- Scopes string `json:"target,omitempty"`
- ExpiresOn internalTime.Unix `json:"expires_on,omitempty"`
- ExtendedExpiresOn internalTime.Unix `json:"extended_expires_on,omitempty"`
- CachedAt internalTime.Unix `json:"cached_at,omitempty"`
- UserAssertionHash string `json:"user_assertion_hash,omitempty"`
- TokenType string `json:"token_type,omitempty"`
- AuthnSchemeKeyID string `json:"keyid,omitempty"`
-
- AdditionalFields map[string]interface{}
-}
-
-// NewAccessToken is the constructor for AccessToken.
-func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, extendedExpiresOn time.Time, scopes, token, tokenType, authnSchemeKeyID string) AccessToken {
- return AccessToken{
- HomeAccountID: homeID,
- Environment: env,
- Realm: realm,
- CredentialType: "AccessToken",
- ClientID: clientID,
- Secret: token,
- Scopes: scopes,
- CachedAt: internalTime.Unix{T: cachedAt.UTC()},
- ExpiresOn: internalTime.Unix{T: expiresOn.UTC()},
- ExtendedExpiresOn: internalTime.Unix{T: extendedExpiresOn.UTC()},
- TokenType: tokenType,
- AuthnSchemeKeyID: authnSchemeKeyID,
- }
-}
-
-// Key outputs the key that can be used to uniquely look up this entry in a map.
-func (a AccessToken) Key() string {
- key := strings.Join(
- []string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes},
- shared.CacheKeySeparator,
- )
- // add token type to key for new access tokens types. skip for bearer token type to
- // preserve fwd and back compat between a common cache and msal clients
- if !strings.EqualFold(a.TokenType, authority.AccessTokenTypeBearer) {
- key = strings.Join([]string{key, a.TokenType}, shared.CacheKeySeparator)
- }
- return strings.ToLower(key)
-}
-
-// FakeValidate enables tests to fake access token validation
-var FakeValidate func(AccessToken) error
-
-// Validate validates that this AccessToken can be used.
-func (a AccessToken) Validate() error {
- if FakeValidate != nil {
- return FakeValidate(a)
- }
- if a.CachedAt.T.After(time.Now()) {
- return errors.New("access token isn't valid, it was cached at a future time")
- }
- if a.ExpiresOn.T.Before(time.Now().Add(5 * time.Minute)) {
- return fmt.Errorf("access token is expired")
- }
- if a.CachedAt.T.IsZero() {
- return fmt.Errorf("access token does not have CachedAt set")
- }
- return nil
-}
-
-// IDToken is the JSON representation of an MSAL id token for encoding to storage.
-type IDToken struct {
- HomeAccountID string `json:"home_account_id,omitempty"`
- Environment string `json:"environment,omitempty"`
- Realm string `json:"realm,omitempty"`
- CredentialType string `json:"credential_type,omitempty"`
- ClientID string `json:"client_id,omitempty"`
- Secret string `json:"secret,omitempty"`
- UserAssertionHash string `json:"user_assertion_hash,omitempty"`
- AdditionalFields map[string]interface{}
-}
-
-// IsZero determines if IDToken is the zero value.
-func (i IDToken) IsZero() bool {
- v := reflect.ValueOf(i)
- for i := 0; i < v.NumField(); i++ {
- field := v.Field(i)
- if !field.IsZero() {
- switch field.Kind() {
- case reflect.Map, reflect.Slice:
- if field.Len() == 0 {
- continue
- }
- }
- return false
- }
- }
- return true
-}
-
-// NewIDToken is the constructor for IDToken.
-func NewIDToken(homeID, env, realm, clientID, idToken string) IDToken {
- return IDToken{
- HomeAccountID: homeID,
- Environment: env,
- Realm: realm,
- CredentialType: "IDToken",
- ClientID: clientID,
- Secret: idToken,
- }
-}
-
-// Key outputs the key that can be used to uniquely look up this entry in a map.
-func (id IDToken) Key() string {
- key := strings.Join(
- []string{id.HomeAccountID, id.Environment, id.CredentialType, id.ClientID, id.Realm},
- shared.CacheKeySeparator,
- )
- return strings.ToLower(key)
-}
-
-// AppMetaData is the JSON representation of application metadata for encoding to storage.
-type AppMetaData struct {
- FamilyID string `json:"family_id,omitempty"`
- ClientID string `json:"client_id,omitempty"`
- Environment string `json:"environment,omitempty"`
-
- AdditionalFields map[string]interface{}
-}
-
-// NewAppMetaData is the constructor for AppMetaData.
-func NewAppMetaData(familyID, clientID, environment string) AppMetaData {
- return AppMetaData{
- FamilyID: familyID,
- ClientID: clientID,
- Environment: environment,
- }
-}
-
-// Key outputs the key that can be used to uniquely look up this entry in a map.
-func (a AppMetaData) Key() string {
- key := strings.Join(
- []string{"AppMetaData", a.Environment, a.ClientID},
- shared.CacheKeySeparator,
- )
- return strings.ToLower(key)
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go
deleted file mode 100644
index c0931833..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go
+++ /dev/null
@@ -1,442 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-package storage
-
-import (
- "context"
- "errors"
- "fmt"
- "strings"
- "sync"
- "time"
-
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared"
-)
-
-// PartitionedManager is a partitioned in-memory cache of access tokens, accounts and meta data.
-type PartitionedManager struct {
- contract *InMemoryContract
- contractMu sync.RWMutex
- requests aadInstanceDiscoveryer // *oauth.Token
-
- aadCacheMu sync.RWMutex
- aadCache map[string]authority.InstanceDiscoveryMetadata
-}
-
-// NewPartitionedManager is the constructor for PartitionedManager.
-func NewPartitionedManager(requests *oauth.Client) *PartitionedManager {
- m := &PartitionedManager{requests: requests, aadCache: make(map[string]authority.InstanceDiscoveryMetadata)}
- m.contract = NewInMemoryContract()
- return m
-}
-
-// Read reads a storage token from the cache if it exists.
-func (m *PartitionedManager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) {
- tr := TokenResponse{}
- realm := authParameters.AuthorityInfo.Tenant
- clientID := authParameters.ClientID
- scopes := authParameters.Scopes
- authnSchemeKeyID := authParameters.AuthnScheme.KeyID()
- tokenType := authParameters.AuthnScheme.AccessTokenType()
-
- // fetch metadata if instanceDiscovery is enabled
- aliases := []string{authParameters.AuthorityInfo.Host}
- if !authParameters.AuthorityInfo.InstanceDiscoveryDisabled {
- metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo)
- if err != nil {
- return TokenResponse{}, err
- }
- aliases = metadata.Aliases
- }
-
- userAssertionHash := authParameters.AssertionHash()
- partitionKeyFromRequest := userAssertionHash
-
- // errors returned by read* methods indicate a cache miss and are therefore non-fatal. We continue populating
- // TokenResponse fields so that e.g. lack of an ID token doesn't prevent the caller from receiving a refresh token.
- accessToken, err := m.readAccessToken(aliases, realm, clientID, userAssertionHash, scopes, partitionKeyFromRequest, tokenType, authnSchemeKeyID)
- if err == nil {
- tr.AccessToken = accessToken
- }
- idToken, err := m.readIDToken(aliases, realm, clientID, userAssertionHash, getPartitionKeyIDTokenRead(accessToken))
- if err == nil {
- tr.IDToken = idToken
- }
-
- if appMetadata, err := m.readAppMetaData(aliases, clientID); err == nil {
- // we need the family ID to identify the correct refresh token, if any
- familyID := appMetadata.FamilyID
- refreshToken, err := m.readRefreshToken(aliases, familyID, clientID, userAssertionHash, partitionKeyFromRequest)
- if err == nil {
- tr.RefreshToken = refreshToken
- }
- }
-
- account, err := m.readAccount(aliases, realm, userAssertionHash, idToken.HomeAccountID)
- if err == nil {
- tr.Account = account
- }
- return tr, nil
-}
-
-// Write writes a token response to the cache and returns the account information the token is stored with.
-func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) {
- authParameters.HomeAccountID = tokenResponse.HomeAccountID()
- homeAccountID := authParameters.HomeAccountID
- environment := authParameters.AuthorityInfo.Host
- realm := authParameters.AuthorityInfo.Tenant
- clientID := authParameters.ClientID
- target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator)
- userAssertionHash := authParameters.AssertionHash()
- cachedAt := time.Now()
- authnSchemeKeyID := authParameters.AuthnScheme.KeyID()
- var account shared.Account
-
- if len(tokenResponse.RefreshToken) > 0 {
- refreshToken := accesstokens.NewRefreshToken(homeAccountID, environment, clientID, tokenResponse.RefreshToken, tokenResponse.FamilyID)
- if authParameters.AuthorizationType == authority.ATOnBehalfOf {
- refreshToken.UserAssertionHash = userAssertionHash
- }
- if err := m.writeRefreshToken(refreshToken, getPartitionKeyRefreshToken(refreshToken)); err != nil {
- return account, err
- }
- }
-
- if len(tokenResponse.AccessToken) > 0 {
- accessToken := NewAccessToken(
- homeAccountID,
- environment,
- realm,
- clientID,
- cachedAt,
- tokenResponse.ExpiresOn.T,
- tokenResponse.ExtExpiresOn.T,
- target,
- tokenResponse.AccessToken,
- tokenResponse.TokenType,
- authnSchemeKeyID,
- )
- if authParameters.AuthorizationType == authority.ATOnBehalfOf {
- accessToken.UserAssertionHash = userAssertionHash // get Hash method on this
- }
-
- // Since we have a valid access token, cache it before moving on.
- if err := accessToken.Validate(); err == nil {
- if err := m.writeAccessToken(accessToken, getPartitionKeyAccessToken(accessToken)); err != nil {
- return account, err
- }
- } else {
- return shared.Account{}, err
- }
- }
-
- idTokenJwt := tokenResponse.IDToken
- if !idTokenJwt.IsZero() {
- idToken := NewIDToken(homeAccountID, environment, realm, clientID, idTokenJwt.RawToken)
- if authParameters.AuthorizationType == authority.ATOnBehalfOf {
- idToken.UserAssertionHash = userAssertionHash
- }
- if err := m.writeIDToken(idToken, getPartitionKeyIDToken(idToken)); err != nil {
- return shared.Account{}, err
- }
-
- localAccountID := idTokenJwt.LocalAccountID()
- authorityType := authParameters.AuthorityInfo.AuthorityType
-
- preferredUsername := idTokenJwt.UPN
- if idTokenJwt.PreferredUsername != "" {
- preferredUsername = idTokenJwt.PreferredUsername
- }
-
- account = shared.NewAccount(
- homeAccountID,
- environment,
- realm,
- localAccountID,
- authorityType,
- preferredUsername,
- )
- if authParameters.AuthorizationType == authority.ATOnBehalfOf {
- account.UserAssertionHash = userAssertionHash
- }
- if err := m.writeAccount(account, getPartitionKeyAccount(account)); err != nil {
- return shared.Account{}, err
- }
- }
-
- AppMetaData := NewAppMetaData(tokenResponse.FamilyID, clientID, environment)
-
- if err := m.writeAppMetaData(AppMetaData); err != nil {
- return shared.Account{}, err
- }
- return account, nil
-}
-
-func (m *PartitionedManager) getMetadataEntry(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
- md, err := m.aadMetadataFromCache(ctx, authorityInfo)
- if err != nil {
- // not in the cache, retrieve it
- md, err = m.aadMetadata(ctx, authorityInfo)
- }
- return md, err
-}
-
-func (m *PartitionedManager) aadMetadataFromCache(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
- m.aadCacheMu.RLock()
- defer m.aadCacheMu.RUnlock()
- metadata, ok := m.aadCache[authorityInfo.Host]
- if ok {
- return metadata, nil
- }
- return metadata, errors.New("not found")
-}
-
-func (m *PartitionedManager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
- discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo)
- if err != nil {
- return authority.InstanceDiscoveryMetadata{}, err
- }
-
- m.aadCacheMu.Lock()
- defer m.aadCacheMu.Unlock()
-
- for _, metadataEntry := range discoveryResponse.Metadata {
- for _, aliasedAuthority := range metadataEntry.Aliases {
- m.aadCache[aliasedAuthority] = metadataEntry
- }
- }
- if _, ok := m.aadCache[authorityInfo.Host]; !ok {
- m.aadCache[authorityInfo.Host] = authority.InstanceDiscoveryMetadata{
- PreferredNetwork: authorityInfo.Host,
- PreferredCache: authorityInfo.Host,
- }
- }
- return m.aadCache[authorityInfo.Host], nil
-}
-
-func (m *PartitionedManager) readAccessToken(envAliases []string, realm, clientID, userAssertionHash string, scopes []string, partitionKey, tokenType, authnSchemeKeyID string) (AccessToken, error) {
- m.contractMu.RLock()
- defer m.contractMu.RUnlock()
- if accessTokens, ok := m.contract.AccessTokensPartition[partitionKey]; ok {
- // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens.
- // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't
- // an issue, however if it does become a problem then we know where to look.
- for _, at := range accessTokens {
- if at.Realm == realm && at.ClientID == clientID && at.UserAssertionHash == userAssertionHash {
- if at.TokenType == tokenType && at.AuthnSchemeKeyID == authnSchemeKeyID {
- if checkAlias(at.Environment, envAliases) {
- if isMatchingScopes(scopes, at.Scopes) {
- return at, nil
- }
- }
- }
- }
- }
- }
- return AccessToken{}, fmt.Errorf("access token not found")
-}
-
-func (m *PartitionedManager) writeAccessToken(accessToken AccessToken, partitionKey string) error {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- key := accessToken.Key()
- if m.contract.AccessTokensPartition[partitionKey] == nil {
- m.contract.AccessTokensPartition[partitionKey] = make(map[string]AccessToken)
- }
- m.contract.AccessTokensPartition[partitionKey][key] = accessToken
- return nil
-}
-
-func matchFamilyRefreshTokenObo(rt accesstokens.RefreshToken, userAssertionHash string, envAliases []string) bool {
- return rt.UserAssertionHash == userAssertionHash && checkAlias(rt.Environment, envAliases) && rt.FamilyID != ""
-}
-
-func matchClientIDRefreshTokenObo(rt accesstokens.RefreshToken, userAssertionHash string, envAliases []string, clientID string) bool {
- return rt.UserAssertionHash == userAssertionHash && checkAlias(rt.Environment, envAliases) && rt.ClientID == clientID
-}
-
-func (m *PartitionedManager) readRefreshToken(envAliases []string, familyID, clientID, userAssertionHash, partitionKey string) (accesstokens.RefreshToken, error) {
- byFamily := func(rt accesstokens.RefreshToken) bool {
- return matchFamilyRefreshTokenObo(rt, userAssertionHash, envAliases)
- }
- byClient := func(rt accesstokens.RefreshToken) bool {
- return matchClientIDRefreshTokenObo(rt, userAssertionHash, envAliases, clientID)
- }
-
- var matchers []func(rt accesstokens.RefreshToken) bool
- if familyID == "" {
- matchers = []func(rt accesstokens.RefreshToken) bool{
- byClient, byFamily,
- }
- } else {
- matchers = []func(rt accesstokens.RefreshToken) bool{
- byFamily, byClient,
- }
- }
-
- // TODO(keegan): All the tests here pass, but Bogdan says this is
- // more complicated. I'm opening an issue for this to have him
- // review the tests and suggest tests that would break this so
- // we can re-write against good tests. His comments as follow:
- // The algorithm is a bit more complex than this, I assume there are some tests covering everything. I would keep the order as is.
- // The algorithm is:
- // If application is NOT part of the family, search by client_ID
- // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response).
- // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95
- m.contractMu.RLock()
- defer m.contractMu.RUnlock()
- for _, matcher := range matchers {
- for _, rt := range m.contract.RefreshTokensPartition[partitionKey] {
- if matcher(rt) {
- return rt, nil
- }
- }
- }
-
- return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found")
-}
-
-func (m *PartitionedManager) writeRefreshToken(refreshToken accesstokens.RefreshToken, partitionKey string) error {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- key := refreshToken.Key()
- if m.contract.AccessTokensPartition[partitionKey] == nil {
- m.contract.RefreshTokensPartition[partitionKey] = make(map[string]accesstokens.RefreshToken)
- }
- m.contract.RefreshTokensPartition[partitionKey][key] = refreshToken
- return nil
-}
-
-func (m *PartitionedManager) readIDToken(envAliases []string, realm, clientID, userAssertionHash, partitionKey string) (IDToken, error) {
- m.contractMu.RLock()
- defer m.contractMu.RUnlock()
- for _, idt := range m.contract.IDTokensPartition[partitionKey] {
- if idt.Realm == realm && idt.ClientID == clientID && idt.UserAssertionHash == userAssertionHash {
- if checkAlias(idt.Environment, envAliases) {
- return idt, nil
- }
- }
- }
- return IDToken{}, fmt.Errorf("token not found")
-}
-
-func (m *PartitionedManager) writeIDToken(idToken IDToken, partitionKey string) error {
- key := idToken.Key()
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- if m.contract.IDTokensPartition[partitionKey] == nil {
- m.contract.IDTokensPartition[partitionKey] = make(map[string]IDToken)
- }
- m.contract.IDTokensPartition[partitionKey][key] = idToken
- return nil
-}
-
-func (m *PartitionedManager) readAccount(envAliases []string, realm, UserAssertionHash, partitionKey string) (shared.Account, error) {
- m.contractMu.RLock()
- defer m.contractMu.RUnlock()
-
- // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key.
- // We only use a map because the storage contract shared between all language implementations says use a map.
- // We can't change that. The other is because the keys are made using a specific "env", but here we are allowing
- // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup
- // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored
- // is really low (say 2). Each hash is more expensive than the entire iteration.
- for _, acc := range m.contract.AccountsPartition[partitionKey] {
- if checkAlias(acc.Environment, envAliases) && acc.UserAssertionHash == UserAssertionHash && acc.Realm == realm {
- return acc, nil
- }
- }
- return shared.Account{}, fmt.Errorf("account not found")
-}
-
-func (m *PartitionedManager) writeAccount(account shared.Account, partitionKey string) error {
- key := account.Key()
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- if m.contract.AccountsPartition[partitionKey] == nil {
- m.contract.AccountsPartition[partitionKey] = make(map[string]shared.Account)
- }
- m.contract.AccountsPartition[partitionKey][key] = account
- return nil
-}
-
-func (m *PartitionedManager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) {
- m.contractMu.RLock()
- defer m.contractMu.RUnlock()
-
- for _, app := range m.contract.AppMetaData {
- if checkAlias(app.Environment, envAliases) && app.ClientID == clientID {
- return app, nil
- }
- }
- return AppMetaData{}, fmt.Errorf("not found")
-}
-
-func (m *PartitionedManager) writeAppMetaData(AppMetaData AppMetaData) error {
- key := AppMetaData.Key()
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- m.contract.AppMetaData[key] = AppMetaData
- return nil
-}
-
-// update updates the internal cache object. This is for use in tests, other uses are not
-// supported.
-func (m *PartitionedManager) update(cache *InMemoryContract) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- m.contract = cache
-}
-
-// Marshal implements cache.Marshaler.
-func (m *PartitionedManager) Marshal() ([]byte, error) {
- return json.Marshal(m.contract)
-}
-
-// Unmarshal implements cache.Unmarshaler.
-func (m *PartitionedManager) Unmarshal(b []byte) error {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
-
- contract := NewInMemoryContract()
-
- err := json.Unmarshal(b, contract)
- if err != nil {
- return err
- }
-
- m.contract = contract
-
- return nil
-}
-
-func getPartitionKeyAccessToken(item AccessToken) string {
- if item.UserAssertionHash != "" {
- return item.UserAssertionHash
- }
- return item.HomeAccountID
-}
-
-func getPartitionKeyRefreshToken(item accesstokens.RefreshToken) string {
- if item.UserAssertionHash != "" {
- return item.UserAssertionHash
- }
- return item.HomeAccountID
-}
-
-func getPartitionKeyIDToken(item IDToken) string {
- return item.HomeAccountID
-}
-
-func getPartitionKeyAccount(item shared.Account) string {
- return item.HomeAccountID
-}
-
-func getPartitionKeyIDTokenRead(item AccessToken) string {
- return item.HomeAccountID
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go
deleted file mode 100644
index 2221e60c..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go
+++ /dev/null
@@ -1,583 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-// Package storage holds all cached token information for MSAL. This storage can be
-// augmented with third-party extensions to provide persistent storage. In that case,
-// reads and writes in upper packages will call Marshal() to take the entire in-memory
-// representation and write it to storage and Unmarshal() to update the entire in-memory
-// storage with what was in the persistent storage. The persistent storage can only be
-// accessed in this way because multiple MSAL clients written in multiple languages can
-// access the same storage and must adhere to the same method that was defined
-// previously.
-package storage
-
-import (
- "context"
- "errors"
- "fmt"
- "strings"
- "sync"
- "time"
-
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared"
-)
-
-// aadInstanceDiscoveryer allows faking in tests.
-// It is implemented in production by ops/authority.Client
-type aadInstanceDiscoveryer interface {
- AADInstanceDiscovery(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryResponse, error)
-}
-
-// TokenResponse mimics a token response that was pulled from the cache.
-type TokenResponse struct {
- RefreshToken accesstokens.RefreshToken
- IDToken IDToken // *Credential
- AccessToken AccessToken
- Account shared.Account
-}
-
-// Manager is an in-memory cache of access tokens, accounts and meta data. This data is
-// updated on read/write calls. Unmarshal() replaces all data stored here with whatever
-// was given to it on each call.
-type Manager struct {
- contract *Contract
- contractMu sync.RWMutex
- requests aadInstanceDiscoveryer // *oauth.Token
-
- aadCacheMu sync.RWMutex
- aadCache map[string]authority.InstanceDiscoveryMetadata
-}
-
-// New is the constructor for Manager.
-func New(requests *oauth.Client) *Manager {
- m := &Manager{requests: requests, aadCache: make(map[string]authority.InstanceDiscoveryMetadata)}
- m.contract = NewContract()
- return m
-}
-
-func checkAlias(alias string, aliases []string) bool {
- for _, v := range aliases {
- if alias == v {
- return true
- }
- }
- return false
-}
-
-func isMatchingScopes(scopesOne []string, scopesTwo string) bool {
- newScopesTwo := strings.Split(scopesTwo, scopeSeparator)
- scopeCounter := 0
- for _, scope := range scopesOne {
- for _, otherScope := range newScopesTwo {
- if strings.EqualFold(scope, otherScope) {
- scopeCounter++
- continue
- }
- }
- }
- return scopeCounter == len(scopesOne)
-}
-
-// needsUpgrade returns true if the given key follows the v1.0 schema i.e.,
-// it contains an uppercase character (v1.1+ keys are all lowercase)
-func needsUpgrade(key string) bool {
- for _, r := range key {
- if 'A' <= r && r <= 'Z' {
- return true
- }
- }
- return false
-}
-
-// upgrade a v1.0 cache item by adding a v1.1+ item having the same value and deleting
-// the v1.0 item. Callers must hold an exclusive lock on m.
-func upgrade[T any](m map[string]T, k string) T {
- v1_1Key := strings.ToLower(k)
- v, ok := m[k]
- if !ok {
- // another goroutine did the upgrade while this one was waiting for the write lock
- return m[v1_1Key]
- }
- if v2, ok := m[v1_1Key]; ok {
- // cache has an equivalent v1.1+ item, which we prefer because we know it was added
- // by a newer version of the module and is therefore more likely to remain valid.
- // The v1.0 item may have expired because only v1.0 or earlier would update it.
- v = v2
- } else {
- // add an equivalent item according to the v1.1 schema
- m[v1_1Key] = v
- }
- delete(m, k)
- return v
-}
-
-// Read reads a storage token from the cache if it exists.
-func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) {
- tr := TokenResponse{}
- homeAccountID := authParameters.HomeAccountID
- realm := authParameters.AuthorityInfo.Tenant
- clientID := authParameters.ClientID
- scopes := authParameters.Scopes
- authnSchemeKeyID := authParameters.AuthnScheme.KeyID()
- tokenType := authParameters.AuthnScheme.AccessTokenType()
-
- // fetch metadata if instanceDiscovery is enabled
- aliases := []string{authParameters.AuthorityInfo.Host}
- if !authParameters.AuthorityInfo.InstanceDiscoveryDisabled {
- metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo)
- if err != nil {
- return TokenResponse{}, err
- }
- aliases = metadata.Aliases
- }
-
- accessToken := m.readAccessToken(homeAccountID, aliases, realm, clientID, scopes, tokenType, authnSchemeKeyID)
- tr.AccessToken = accessToken
-
- if homeAccountID == "" {
- // caller didn't specify a user, so there's no reason to search for an ID or refresh token
- return tr, nil
- }
- // errors returned by read* methods indicate a cache miss and are therefore non-fatal. We continue populating
- // TokenResponse fields so that e.g. lack of an ID token doesn't prevent the caller from receiving a refresh token.
- idToken, err := m.readIDToken(homeAccountID, aliases, realm, clientID)
- if err == nil {
- tr.IDToken = idToken
- }
-
- if appMetadata, err := m.readAppMetaData(aliases, clientID); err == nil {
- // we need the family ID to identify the correct refresh token, if any
- familyID := appMetadata.FamilyID
- refreshToken, err := m.readRefreshToken(homeAccountID, aliases, familyID, clientID)
- if err == nil {
- tr.RefreshToken = refreshToken
- }
- }
-
- account, err := m.readAccount(homeAccountID, aliases, realm)
- if err == nil {
- tr.Account = account
- }
- return tr, nil
-}
-
-const scopeSeparator = " "
-
-// Write writes a token response to the cache and returns the account information the token is stored with.
-func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) {
- homeAccountID := tokenResponse.HomeAccountID()
- environment := authParameters.AuthorityInfo.Host
- realm := authParameters.AuthorityInfo.Tenant
- clientID := authParameters.ClientID
- target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator)
- cachedAt := time.Now()
- authnSchemeKeyID := authParameters.AuthnScheme.KeyID()
-
- var account shared.Account
-
- if len(tokenResponse.RefreshToken) > 0 {
- refreshToken := accesstokens.NewRefreshToken(homeAccountID, environment, clientID, tokenResponse.RefreshToken, tokenResponse.FamilyID)
- if err := m.writeRefreshToken(refreshToken); err != nil {
- return account, err
- }
- }
-
- if len(tokenResponse.AccessToken) > 0 {
- accessToken := NewAccessToken(
- homeAccountID,
- environment,
- realm,
- clientID,
- cachedAt,
- tokenResponse.ExpiresOn.T,
- tokenResponse.ExtExpiresOn.T,
- target,
- tokenResponse.AccessToken,
- tokenResponse.TokenType,
- authnSchemeKeyID,
- )
-
- // Since we have a valid access token, cache it before moving on.
- if err := accessToken.Validate(); err == nil {
- if err := m.writeAccessToken(accessToken); err != nil {
- return account, err
- }
- }
- }
-
- idTokenJwt := tokenResponse.IDToken
- if !idTokenJwt.IsZero() {
- idToken := NewIDToken(homeAccountID, environment, realm, clientID, idTokenJwt.RawToken)
- if err := m.writeIDToken(idToken); err != nil {
- return shared.Account{}, err
- }
-
- localAccountID := idTokenJwt.LocalAccountID()
- authorityType := authParameters.AuthorityInfo.AuthorityType
-
- preferredUsername := idTokenJwt.UPN
- if idTokenJwt.PreferredUsername != "" {
- preferredUsername = idTokenJwt.PreferredUsername
- }
-
- account = shared.NewAccount(
- homeAccountID,
- environment,
- realm,
- localAccountID,
- authorityType,
- preferredUsername,
- )
- if err := m.writeAccount(account); err != nil {
- return shared.Account{}, err
- }
- }
-
- AppMetaData := NewAppMetaData(tokenResponse.FamilyID, clientID, environment)
-
- if err := m.writeAppMetaData(AppMetaData); err != nil {
- return shared.Account{}, err
- }
- return account, nil
-}
-
-func (m *Manager) getMetadataEntry(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
- md, err := m.aadMetadataFromCache(ctx, authorityInfo)
- if err != nil {
- // not in the cache, retrieve it
- md, err = m.aadMetadata(ctx, authorityInfo)
- }
- return md, err
-}
-
-func (m *Manager) aadMetadataFromCache(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
- m.aadCacheMu.RLock()
- defer m.aadCacheMu.RUnlock()
- metadata, ok := m.aadCache[authorityInfo.Host]
- if ok {
- return metadata, nil
- }
- return metadata, errors.New("not found")
-}
-
-func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
- m.aadCacheMu.Lock()
- defer m.aadCacheMu.Unlock()
- discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo)
- if err != nil {
- return authority.InstanceDiscoveryMetadata{}, err
- }
-
- for _, metadataEntry := range discoveryResponse.Metadata {
- for _, aliasedAuthority := range metadataEntry.Aliases {
- m.aadCache[aliasedAuthority] = metadataEntry
- }
- }
- if _, ok := m.aadCache[authorityInfo.Host]; !ok {
- m.aadCache[authorityInfo.Host] = authority.InstanceDiscoveryMetadata{
- PreferredNetwork: authorityInfo.Host,
- PreferredCache: authorityInfo.Host,
- }
- }
- return m.aadCache[authorityInfo.Host], nil
-}
-
-func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string, tokenType, authnSchemeKeyID string) AccessToken {
- m.contractMu.RLock()
- // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens.
- // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't
- // an issue, however if it does become a problem then we know where to look.
- for k, at := range m.contract.AccessTokens {
- if at.HomeAccountID == homeID && at.Realm == realm && at.ClientID == clientID {
- if (strings.EqualFold(at.TokenType, tokenType) && at.AuthnSchemeKeyID == authnSchemeKeyID) || (at.TokenType == "" && (tokenType == "" || tokenType == "Bearer")) {
- if checkAlias(at.Environment, envAliases) && isMatchingScopes(scopes, at.Scopes) {
- m.contractMu.RUnlock()
- if needsUpgrade(k) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- at = upgrade(m.contract.AccessTokens, k)
- }
- return at
- }
- }
- }
- }
- m.contractMu.RUnlock()
- return AccessToken{}
-}
-
-func (m *Manager) writeAccessToken(accessToken AccessToken) error {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- key := accessToken.Key()
- m.contract.AccessTokens[key] = accessToken
- return nil
-}
-
-func (m *Manager) readRefreshToken(homeID string, envAliases []string, familyID, clientID string) (accesstokens.RefreshToken, error) {
- byFamily := func(rt accesstokens.RefreshToken) bool {
- return matchFamilyRefreshToken(rt, homeID, envAliases)
- }
- byClient := func(rt accesstokens.RefreshToken) bool {
- return matchClientIDRefreshToken(rt, homeID, envAliases, clientID)
- }
-
- var matchers []func(rt accesstokens.RefreshToken) bool
- if familyID == "" {
- matchers = []func(rt accesstokens.RefreshToken) bool{
- byClient, byFamily,
- }
- } else {
- matchers = []func(rt accesstokens.RefreshToken) bool{
- byFamily, byClient,
- }
- }
-
- // TODO(keegan): All the tests here pass, but Bogdan says this is
- // more complicated. I'm opening an issue for this to have him
- // review the tests and suggest tests that would break this so
- // we can re-write against good tests. His comments as follow:
- // The algorithm is a bit more complex than this, I assume there are some tests covering everything. I would keep the order as is.
- // The algorithm is:
- // If application is NOT part of the family, search by client_ID
- // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response).
- // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95
- m.contractMu.RLock()
- for _, matcher := range matchers {
- for k, rt := range m.contract.RefreshTokens {
- if matcher(rt) {
- m.contractMu.RUnlock()
- if needsUpgrade(k) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- rt = upgrade(m.contract.RefreshTokens, k)
- }
- return rt, nil
- }
- }
- }
-
- m.contractMu.RUnlock()
- return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found")
-}
-
-func matchFamilyRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string) bool {
- return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.FamilyID != ""
-}
-
-func matchClientIDRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string, clientID string) bool {
- return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.ClientID == clientID
-}
-
-func (m *Manager) writeRefreshToken(refreshToken accesstokens.RefreshToken) error {
- key := refreshToken.Key()
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- m.contract.RefreshTokens[key] = refreshToken
- return nil
-}
-
-func (m *Manager) readIDToken(homeID string, envAliases []string, realm, clientID string) (IDToken, error) {
- m.contractMu.RLock()
- for k, idt := range m.contract.IDTokens {
- if idt.HomeAccountID == homeID && idt.Realm == realm && idt.ClientID == clientID {
- if checkAlias(idt.Environment, envAliases) {
- m.contractMu.RUnlock()
- if needsUpgrade(k) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- idt = upgrade(m.contract.IDTokens, k)
- }
- return idt, nil
- }
- }
- }
- m.contractMu.RUnlock()
- return IDToken{}, fmt.Errorf("token not found")
-}
-
-func (m *Manager) writeIDToken(idToken IDToken) error {
- key := idToken.Key()
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- m.contract.IDTokens[key] = idToken
- return nil
-}
-
-func (m *Manager) AllAccounts() []shared.Account {
- m.contractMu.RLock()
- defer m.contractMu.RUnlock()
-
- var accounts []shared.Account
- for _, v := range m.contract.Accounts {
- accounts = append(accounts, v)
- }
-
- return accounts
-}
-
-func (m *Manager) Account(homeAccountID string) shared.Account {
- m.contractMu.RLock()
- defer m.contractMu.RUnlock()
-
- for _, v := range m.contract.Accounts {
- if v.HomeAccountID == homeAccountID {
- return v
- }
- }
-
- return shared.Account{}
-}
-
-func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm string) (shared.Account, error) {
- m.contractMu.RLock()
-
- // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key.
- // We only use a map because the storage contract shared between all language implementations says use a map.
- // We can't change that. The other is because the keys are made using a specific "env", but here we are allowing
- // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup
- // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored
- // is really low (say 2). Each hash is more expensive than the entire iteration.
- for k, acc := range m.contract.Accounts {
- if acc.HomeAccountID == homeAccountID && checkAlias(acc.Environment, envAliases) && acc.Realm == realm {
- m.contractMu.RUnlock()
- if needsUpgrade(k) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- acc = upgrade(m.contract.Accounts, k)
- }
- return acc, nil
- }
- }
- m.contractMu.RUnlock()
- return shared.Account{}, fmt.Errorf("account not found")
-}
-
-func (m *Manager) writeAccount(account shared.Account) error {
- key := account.Key()
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- m.contract.Accounts[key] = account
- return nil
-}
-
-func (m *Manager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) {
- m.contractMu.RLock()
- for k, app := range m.contract.AppMetaData {
- if checkAlias(app.Environment, envAliases) && app.ClientID == clientID {
- m.contractMu.RUnlock()
- if needsUpgrade(k) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- app = upgrade(m.contract.AppMetaData, k)
- }
- return app, nil
- }
- }
- m.contractMu.RUnlock()
- return AppMetaData{}, fmt.Errorf("not found")
-}
-
-func (m *Manager) writeAppMetaData(AppMetaData AppMetaData) error {
- key := AppMetaData.Key()
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- m.contract.AppMetaData[key] = AppMetaData
- return nil
-}
-
-// RemoveAccount removes all the associated ATs, RTs and IDTs from the cache associated with this account.
-func (m *Manager) RemoveAccount(account shared.Account, clientID string) {
- m.removeRefreshTokens(account.HomeAccountID, account.Environment, clientID)
- m.removeAccessTokens(account.HomeAccountID, account.Environment)
- m.removeIDTokens(account.HomeAccountID, account.Environment)
- m.removeAccounts(account.HomeAccountID, account.Environment)
-}
-
-func (m *Manager) removeRefreshTokens(homeID string, env string, clientID string) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- for key, rt := range m.contract.RefreshTokens {
- // Check for RTs associated with the account.
- if rt.HomeAccountID == homeID && rt.Environment == env {
- // Do RT's app ownership check as a precaution, in case family apps
- // and 3rd-party apps share same token cache, although they should not.
- if rt.ClientID == clientID || rt.FamilyID != "" {
- delete(m.contract.RefreshTokens, key)
- }
- }
- }
-}
-
-func (m *Manager) removeAccessTokens(homeID string, env string) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- for key, at := range m.contract.AccessTokens {
- // Remove AT's associated with the account
- if at.HomeAccountID == homeID && at.Environment == env {
- // # To avoid the complexity of locating sibling family app's AT, we skip AT's app ownership check.
- // It means ATs for other apps will also be removed, it is OK because:
- // non-family apps are not supposed to share token cache to begin with;
- // Even if it happens, we keep other app's RT already, so SSO still works.
- delete(m.contract.AccessTokens, key)
- }
- }
-}
-
-func (m *Manager) removeIDTokens(homeID string, env string) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- for key, idt := range m.contract.IDTokens {
- // Remove ID tokens associated with the account.
- if idt.HomeAccountID == homeID && idt.Environment == env {
- delete(m.contract.IDTokens, key)
- }
- }
-}
-
-func (m *Manager) removeAccounts(homeID string, env string) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- for key, acc := range m.contract.Accounts {
- // Remove the specified account.
- if acc.HomeAccountID == homeID && acc.Environment == env {
- delete(m.contract.Accounts, key)
- }
- }
-}
-
-// update updates the internal cache object. This is for use in tests, other uses are not
-// supported.
-func (m *Manager) update(cache *Contract) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- m.contract = cache
-}
-
-// Marshal implements cache.Marshaler.
-func (m *Manager) Marshal() ([]byte, error) {
- m.contractMu.RLock()
- defer m.contractMu.RUnlock()
- return json.Marshal(m.contract)
-}
-
-// Unmarshal implements cache.Unmarshaler.
-func (m *Manager) Unmarshal(b []byte) error {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
-
- contract := NewContract()
-
- err := json.Unmarshal(b, contract)
- if err != nil {
- return err
- }
-
- m.contract = contract
-
- return nil
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go
deleted file mode 100644
index 7b673e3f..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-// package exported contains internal types that are re-exported from a public package
-package exported
-
-// AssertionRequestOptions has information required to generate a client assertion
-type AssertionRequestOptions struct {
- // ClientID identifies the application for which an assertion is requested. Used as the assertion's "iss" and "sub" claims.
- ClientID string
-
- // TokenEndpoint is the intended token endpoint. Used as the assertion's "aud" claim.
- TokenEndpoint string
-}
-
-// TokenProviderParameters is the authentication parameters passed to token providers
-type TokenProviderParameters struct {
- // Claims contains any additional claims requested for the token
- Claims string
- // CorrelationID of the authentication request
- CorrelationID string
- // Scopes requested for the token
- Scopes []string
- // TenantID identifies the tenant in which to authenticate
- TenantID string
-}
-
-// TokenProviderResult is the authentication result returned by custom token providers
-type TokenProviderResult struct {
- // AccessToken is the requested token
- AccessToken string
- // ExpiresInSeconds is the lifetime of the token in seconds
- ExpiresInSeconds int
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md
deleted file mode 100644
index 09edb01b..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/design.md
+++ /dev/null
@@ -1,140 +0,0 @@
-# JSON Package Design
-Author: John Doak(jdoak@microsoft.com)
-
-## Why?
-
-This project needs a special type of marshal/unmarshal not directly supported
-by the encoding/json package.
-
-The need revolves around a few key wants/needs:
-- unmarshal and marshal structs representing JSON messages
-- fields in the messgage not in the struct must be maintained when unmarshalled
-- those same fields must be marshalled back when encoded again
-
-The initial version used map[string]interface{} to put in the keys that
-were known and then any other keys were put into a field called AdditionalFields.
-
-This has a few negatives:
-- Dual marshaling/unmarshalling is required
-- Adding a struct field requires manually adding a key by name to be encoded/decoded from the map (which is a loosely coupled construct), which can lead to bugs that aren't detected or have bad side effects
-- Tests can become quickly disconnected if those keys aren't put
-in tests as well. So you think you have support working, but you
-don't. Existing tests were found that didn't test the marshalling output.
-- There is no enforcement that if AdditionalFields is required on one struct, it should be on all containers
-that don't have custom marshal/unmarshal.
-
-This package aims to support our needs by providing custom Marshal()/Unmarshal() functions.
-
-This prevents all the negatives in the initial solution listed above. However, it does add its own negative:
-- Custom encoding/decoding via reflection is messy (as can be seen in encoding/json itself)
-
-Go proverb: Reflection is never clear
-Suggested reading: https://blog.golang.org/laws-of-reflection
-
-## Important design decisions
-
-- We don't want to understand all JSON decoding rules
-- We don't want to deal with all the quoting, commas, etc on decode
-- Need support for json.Marshaler/Unmarshaler, so we can support types like time.Time
-- If struct does not implement json.Unmarshaler, it must have AdditionalFields defined
-- We only support root level objects that are \*struct or struct
-
-To faciliate these goals, we will utilize the json.Encoder and json.Decoder.
-They provide streaming processing (efficient) and return errors on bad JSON.
-
-Support for json.Marshaler/Unmarshaler allows for us to use non-basic types
-that must be specially encoded/decoded (like time.Time objects).
-
-We don't support types that can't customer unmarshal or have AdditionalFields
-in order to prevent future devs from forgetting that important field and
-generating bad return values.
-
-Support for root level objects of \*struct or struct simply acknowledges the
-fact that this is designed only for the purposes listed in the Introduction.
-Outside that (like encoding a lone number) should be done with the
-regular json package (as it will not have additional fields).
-
-We don't support a few things on json supported reference types and structs:
-- \*map: no need for pointers to maps
-- \*slice: no need for pointers to slices
-- any further pointers on struct after \*struct
-
-There should never be a need for this in Go.
-
-## Design
-
-## State Machines
-
-This uses state machine designs that based upon the Rob Pike talk on
-lexers and parsers: https://www.youtube.com/watch?v=HxaD_trXwRE
-
-This is the most common pattern for state machines in Go and
-the model to follow closesly when dealing with streaming
-processing of textual data.
-
-Our state machines are based on the type:
-```go
-type stateFn func() (stateFn, error)
-```
-
-The state machine itself is simply a struct that has methods that
-satisfy stateFn.
-
-Our state machines have a few standard calls
-- run(): runs the state machine
-- start(): always the first stateFn to be called
-
-All state machines have the following logic:
-* run() is called
-* start() is called and returns the next stateFn or error
-* stateFn is called
- - If returned stateFn(next state) is non-nil, call it
- - If error is non-nil, run() returns the error
- - If stateFn == nil and err == nil, run() return err == nil
-
-## Supporting types
-
-Marshalling/Unmarshalling must support(within top level struct):
-- struct
-- \*struct
-- []struct
-- []\*struct
-- []map[string]structContainer
-- [][]structContainer
-
-**Term note:** structContainer == type that has a struct or \*struct inside it
-
-We specifically do not support []interface or map[string]interface
-where the interface value would hold some value with a struct in it.
-
-Those will still marshal/unmarshal, but without support for
-AdditionalFields.
-
-## Marshalling
-
-The marshalling design will be based around a statemachine design.
-
-The basic logic is as follows:
-
-* If struct has custom marshaller, call it and return
-* If struct has field "AdditionalFields", it must be a map[string]interface{}
-* If struct does not have "AdditionalFields", give an error
-* Get struct tag detailing json names to go names, create mapping
-* For each public field name
- - Write field name out
- - If field value is a struct, recursively call our state machine
- - Otherwise, use the json.Encoder to write out the value
-
-## Unmarshalling
-
-The unmarshalling desin is also based around a statemachine design. The
-basic logic is as follows:
-
-* If struct has custom marhaller, call it
-* If struct has field "AdditionalFields", it must be a map[string]interface{}
-* Get struct tag detailing json names to go names, create mapping
-* For each key found
- - If key exists,
- - If value is basic type, extract value into struct field using Decoder
- - If value is struct type, recursively call statemachine
- - If key doesn't exist, add it to AdditionalFields if it exists using Decoder
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go
deleted file mode 100644
index 2238521f..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go
+++ /dev/null
@@ -1,184 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-// Package json provide functions for marshalling an unmarshalling types to JSON. These functions are meant to
-// be utilized inside of structs that implement json.Unmarshaler and json.Marshaler interfaces.
-// This package provides the additional functionality of writing fields that are not in the struct when marshalling
-// to a field called AdditionalFields if that field exists and is a map[string]interface{}.
-// When marshalling, if the struct has all the same prerequisites, it will uses the keys in AdditionalFields as
-// extra fields. This package uses encoding/json underneath.
-package json
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "reflect"
- "strings"
-)
-
-const addField = "AdditionalFields"
-const (
- marshalJSON = "MarshalJSON"
- unmarshalJSON = "UnmarshalJSON"
-)
-
-var (
- leftBrace = []byte("{")[0]
- rightBrace = []byte("}")[0]
- comma = []byte(",")[0]
- leftParen = []byte("[")[0]
- rightParen = []byte("]")[0]
-)
-
-var mapStrInterType = reflect.TypeOf(map[string]interface{}{})
-
-// stateFn defines a state machine function. This will be used in all state
-// machines in this package.
-type stateFn func() (stateFn, error)
-
-// Marshal is used to marshal a type into its JSON representation. It
-// wraps the stdlib calls in order to marshal a struct or *struct so
-// that a field called "AdditionalFields" of type map[string]interface{}
-// with "-" used inside struct tag `json:"-"` can be marshalled as if
-// they were fields within the struct.
-func Marshal(i interface{}) ([]byte, error) {
- buff := bytes.Buffer{}
- enc := json.NewEncoder(&buff)
- enc.SetEscapeHTML(false)
- enc.SetIndent("", "")
-
- v := reflect.ValueOf(i)
- if v.Kind() != reflect.Ptr && v.CanAddr() {
- v = v.Addr()
- }
- err := marshalStruct(v, &buff, enc)
- if err != nil {
- return nil, err
- }
- return buff.Bytes(), nil
-}
-
-// Unmarshal unmarshals a []byte representing JSON into i, which must be a *struct. In addition, if the struct has
-// a field called AdditionalFields of type map[string]interface{}, JSON data representing fields not in the struct
-// will be written as key/value pairs to AdditionalFields.
-func Unmarshal(b []byte, i interface{}) error {
- if len(b) == 0 {
- return nil
- }
-
- jdec := json.NewDecoder(bytes.NewBuffer(b))
- jdec.UseNumber()
- return unmarshalStruct(jdec, i)
-}
-
-// MarshalRaw marshals i into a json.RawMessage. If I cannot be marshalled,
-// this will panic. This is exposed to help test AdditionalField values
-// which are stored as json.RawMessage.
-func MarshalRaw(i interface{}) json.RawMessage {
- b, err := json.Marshal(i)
- if err != nil {
- panic(err)
- }
- return json.RawMessage(b)
-}
-
-// isDelim simply tests to see if a json.Token is a delimeter.
-func isDelim(got json.Token) bool {
- switch got.(type) {
- case json.Delim:
- return true
- }
- return false
-}
-
-// delimIs tests got to see if it is want.
-func delimIs(got json.Token, want rune) bool {
- switch v := got.(type) {
- case json.Delim:
- if v == json.Delim(want) {
- return true
- }
- }
- return false
-}
-
-// hasMarshalJSON will determine if the value or a pointer to this value has
-// the MarshalJSON method.
-func hasMarshalJSON(v reflect.Value) bool {
- if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid {
- _, ok := v.Interface().(json.Marshaler)
- return ok
- }
-
- if v.Kind() == reflect.Ptr {
- v = v.Elem()
- } else {
- if !v.CanAddr() {
- return false
- }
- v = v.Addr()
- }
-
- if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid {
- _, ok := v.Interface().(json.Marshaler)
- return ok
- }
- return false
-}
-
-// callMarshalJSON will call MarshalJSON() method on the value or a pointer to this value.
-// This will panic if the method is not defined.
-func callMarshalJSON(v reflect.Value) ([]byte, error) {
- if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid {
- marsh := v.Interface().(json.Marshaler)
- return marsh.MarshalJSON()
- }
-
- if v.Kind() == reflect.Ptr {
- v = v.Elem()
- } else {
- if v.CanAddr() {
- v = v.Addr()
- }
- }
-
- if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid {
- marsh := v.Interface().(json.Marshaler)
- return marsh.MarshalJSON()
- }
-
- panic(fmt.Sprintf("callMarshalJSON called on type %T that does not have MarshalJSON defined", v.Interface()))
-}
-
-// hasUnmarshalJSON will determine if the value or a pointer to this value has
-// the UnmarshalJSON method.
-func hasUnmarshalJSON(v reflect.Value) bool {
- // You can't unmarshal on a non-pointer type.
- if v.Kind() != reflect.Ptr {
- if !v.CanAddr() {
- return false
- }
- v = v.Addr()
- }
-
- if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid {
- _, ok := v.Interface().(json.Unmarshaler)
- return ok
- }
-
- return false
-}
-
-// hasOmitEmpty indicates if the field has instructed us to not output
-// the field if omitempty is set on the tag. tag is the string
-// returned by reflect.StructField.Tag().Get().
-func hasOmitEmpty(tag string) bool {
- sl := strings.Split(tag, ",")
- for _, str := range sl {
- if str == "omitempty" {
- return true
- }
- }
- return false
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go
deleted file mode 100644
index cef442f2..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/mapslice.go
+++ /dev/null
@@ -1,333 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-package json
-
-import (
- "encoding/json"
- "fmt"
- "reflect"
-)
-
-// unmarshalMap unmarshal's a map.
-func unmarshalMap(dec *json.Decoder, m reflect.Value) error {
- if m.Kind() != reflect.Ptr || m.Elem().Kind() != reflect.Map {
- panic("unmarshalMap called on non-*map value")
- }
- mapValueType := m.Elem().Type().Elem()
- walk := mapWalk{dec: dec, m: m, valueType: mapValueType}
- if err := walk.run(); err != nil {
- return err
- }
- return nil
-}
-
-type mapWalk struct {
- dec *json.Decoder
- key string
- m reflect.Value
- valueType reflect.Type
-}
-
-// run runs our decoder state machine.
-func (m *mapWalk) run() error {
- var state = m.start
- var err error
- for {
- state, err = state()
- if err != nil {
- return err
- }
- if state == nil {
- return nil
- }
- }
-}
-
-func (m *mapWalk) start() (stateFn, error) {
- // maps can have custom unmarshaler's.
- if hasUnmarshalJSON(m.m) {
- err := m.dec.Decode(m.m.Interface())
- if err != nil {
- return nil, err
- }
- return nil, nil
- }
-
- // We only want to use this if the map value is:
- // *struct/struct/map/slice
- // otherwise use standard decode
- t, _ := m.valueBaseType()
- switch t.Kind() {
- case reflect.Struct, reflect.Map, reflect.Slice:
- delim, err := m.dec.Token()
- if err != nil {
- return nil, err
- }
- // This indicates the value was set to JSON null.
- if delim == nil {
- return nil, nil
- }
- if !delimIs(delim, '{') {
- return nil, fmt.Errorf("Unmarshal expected opening {, received %v", delim)
- }
- return m.next, nil
- case reflect.Ptr:
- return nil, fmt.Errorf("do not support maps with values of '**type' or '*reference")
- }
-
- // This is a basic map type, so just use Decode().
- if err := m.dec.Decode(m.m.Interface()); err != nil {
- return nil, err
- }
-
- return nil, nil
-}
-
-func (m *mapWalk) next() (stateFn, error) {
- if m.dec.More() {
- key, err := m.dec.Token()
- if err != nil {
- return nil, err
- }
- m.key = key.(string)
- return m.storeValue, nil
- }
- // No more entries, so remove final }.
- _, err := m.dec.Token()
- if err != nil {
- return nil, err
- }
- return nil, nil
-}
-
-func (m *mapWalk) storeValue() (stateFn, error) {
- v := m.valueType
- for {
- switch v.Kind() {
- case reflect.Ptr:
- v = v.Elem()
- continue
- case reflect.Struct:
- return m.storeStruct, nil
- case reflect.Map:
- return m.storeMap, nil
- case reflect.Slice:
- return m.storeSlice, nil
- }
- return nil, fmt.Errorf("bug: mapWalk.storeValue() called on unsupported type: %v", v.Kind())
- }
-}
-
-func (m *mapWalk) storeStruct() (stateFn, error) {
- v := newValue(m.valueType)
- if err := unmarshalStruct(m.dec, v.Interface()); err != nil {
- return nil, err
- }
-
- if m.valueType.Kind() == reflect.Ptr {
- m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v)
- return m.next, nil
- }
- m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v.Elem())
-
- return m.next, nil
-}
-
-func (m *mapWalk) storeMap() (stateFn, error) {
- v := reflect.MakeMap(m.valueType)
- ptr := newValue(v.Type())
- ptr.Elem().Set(v)
- if err := unmarshalMap(m.dec, ptr); err != nil {
- return nil, err
- }
-
- m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v)
-
- return m.next, nil
-}
-
-func (m *mapWalk) storeSlice() (stateFn, error) {
- v := newValue(m.valueType)
- if err := unmarshalSlice(m.dec, v); err != nil {
- return nil, err
- }
-
- m.m.Elem().SetMapIndex(reflect.ValueOf(m.key), v.Elem())
-
- return m.next, nil
-}
-
-// valueType returns the underlying Type. So a *struct would yield
-// struct, etc...
-func (m *mapWalk) valueBaseType() (reflect.Type, bool) {
- ptr := false
- v := m.valueType
- if v.Kind() == reflect.Ptr {
- ptr = true
- v = v.Elem()
- }
- return v, ptr
-}
-
-// unmarshalSlice unmarshal's the next value, which must be a slice, into
-// ptrSlice, which must be a pointer to a slice. newValue() can be use to
-// create the slice.
-func unmarshalSlice(dec *json.Decoder, ptrSlice reflect.Value) error {
- if ptrSlice.Kind() != reflect.Ptr || ptrSlice.Elem().Kind() != reflect.Slice {
- panic("unmarshalSlice called on non-*[]slice value")
- }
- sliceValueType := ptrSlice.Elem().Type().Elem()
- walk := sliceWalk{
- dec: dec,
- s: ptrSlice,
- valueType: sliceValueType,
- }
- if err := walk.run(); err != nil {
- return err
- }
-
- return nil
-}
-
-type sliceWalk struct {
- dec *json.Decoder
- s reflect.Value // *[]slice
- valueType reflect.Type
-}
-
-// run runs our decoder state machine.
-func (s *sliceWalk) run() error {
- var state = s.start
- var err error
- for {
- state, err = state()
- if err != nil {
- return err
- }
- if state == nil {
- return nil
- }
- }
-}
-
-func (s *sliceWalk) start() (stateFn, error) {
- // slices can have custom unmarshaler's.
- if hasUnmarshalJSON(s.s) {
- err := s.dec.Decode(s.s.Interface())
- if err != nil {
- return nil, err
- }
- return nil, nil
- }
-
- // We only want to use this if the slice value is:
- // []*struct/[]struct/[]map/[]slice
- // otherwise use standard decode
- t := s.valueBaseType()
-
- switch t.Kind() {
- case reflect.Ptr:
- return nil, fmt.Errorf("cannot unmarshal into a ** or *")
- case reflect.Struct, reflect.Map, reflect.Slice:
- delim, err := s.dec.Token()
- if err != nil {
- return nil, err
- }
- // This indicates the value was set to nil.
- if delim == nil {
- return nil, nil
- }
- if !delimIs(delim, '[') {
- return nil, fmt.Errorf("Unmarshal expected opening [, received %v", delim)
- }
- return s.next, nil
- }
-
- if err := s.dec.Decode(s.s.Interface()); err != nil {
- return nil, err
- }
- return nil, nil
-}
-
-func (s *sliceWalk) next() (stateFn, error) {
- if s.dec.More() {
- return s.storeValue, nil
- }
- // Nothing left in the slice, remove closing ]
- _, err := s.dec.Token()
- return nil, err
-}
-
-func (s *sliceWalk) storeValue() (stateFn, error) {
- t := s.valueBaseType()
- switch t.Kind() {
- case reflect.Ptr:
- return nil, fmt.Errorf("do not support 'pointer to pointer' or 'pointer to reference' types")
- case reflect.Struct:
- return s.storeStruct, nil
- case reflect.Map:
- return s.storeMap, nil
- case reflect.Slice:
- return s.storeSlice, nil
- }
- return nil, fmt.Errorf("bug: sliceWalk.storeValue() called on unsupported type: %v", t.Kind())
-}
-
-func (s *sliceWalk) storeStruct() (stateFn, error) {
- v := newValue(s.valueType)
- if err := unmarshalStruct(s.dec, v.Interface()); err != nil {
- return nil, err
- }
-
- if s.valueType.Kind() == reflect.Ptr {
- s.s.Elem().Set(reflect.Append(s.s.Elem(), v))
- return s.next, nil
- }
-
- s.s.Elem().Set(reflect.Append(s.s.Elem(), v.Elem()))
- return s.next, nil
-}
-
-func (s *sliceWalk) storeMap() (stateFn, error) {
- v := reflect.MakeMap(s.valueType)
- ptr := newValue(v.Type())
- ptr.Elem().Set(v)
-
- if err := unmarshalMap(s.dec, ptr); err != nil {
- return nil, err
- }
-
- s.s.Elem().Set(reflect.Append(s.s.Elem(), v))
-
- return s.next, nil
-}
-
-func (s *sliceWalk) storeSlice() (stateFn, error) {
- v := newValue(s.valueType)
- if err := unmarshalSlice(s.dec, v); err != nil {
- return nil, err
- }
-
- s.s.Elem().Set(reflect.Append(s.s.Elem(), v.Elem()))
-
- return s.next, nil
-}
-
-// valueType returns the underlying Type. So a *struct would yield
-// struct, etc...
-func (s *sliceWalk) valueBaseType() reflect.Type {
- v := s.valueType
- if v.Kind() == reflect.Ptr {
- v = v.Elem()
- }
- return v
-}
-
-// newValue() returns a new *type that represents type passed.
-func newValue(valueType reflect.Type) reflect.Value {
- if valueType.Kind() == reflect.Ptr {
- return reflect.New(valueType.Elem())
- }
- return reflect.New(valueType)
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go
deleted file mode 100644
index df5dc6e1..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/marshal.go
+++ /dev/null
@@ -1,346 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-package json
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "reflect"
- "unicode"
-)
-
-// marshalStruct takes in i, which must be a *struct or struct and marshals its content
-// as JSON into buff (sometimes with writes to buff directly, sometimes via enc).
-// This call is recursive for all fields of *struct or struct type.
-func marshalStruct(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error {
- if v.Kind() == reflect.Ptr {
- v = v.Elem()
- }
- // We only care about custom Marshalling a struct.
- if v.Kind() != reflect.Struct {
- return fmt.Errorf("bug: marshal() received a non *struct or struct, received type %T", v.Interface())
- }
-
- if hasMarshalJSON(v) {
- b, err := callMarshalJSON(v)
- if err != nil {
- return err
- }
- buff.Write(b)
- return nil
- }
-
- t := v.Type()
-
- // If it has an AdditionalFields field make sure its the right type.
- f := v.FieldByName(addField)
- if f.Kind() != reflect.Invalid {
- if f.Kind() != reflect.Map {
- return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", v.Interface())
- }
- if !f.Type().AssignableTo(mapStrInterType) {
- return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", v.Interface())
- }
- }
-
- translator, err := findFields(v)
- if err != nil {
- return err
- }
-
- buff.WriteByte(leftBrace)
- for x := 0; x < v.NumField(); x++ {
- field := v.Field(x)
-
- // We don't access private fields.
- if unicode.IsLower(rune(t.Field(x).Name[0])) {
- continue
- }
-
- if t.Field(x).Name == addField {
- if v.Field(x).Len() > 0 {
- if err := writeAddFields(field.Interface(), buff, enc); err != nil {
- return err
- }
- buff.WriteByte(comma)
- }
- continue
- }
-
- // If they have omitempty set, we don't write out the field if
- // it is the zero value.
- if hasOmitEmpty(t.Field(x).Tag.Get("json")) {
- if v.Field(x).IsZero() {
- continue
- }
- }
-
- // Write out the field name part.
- jsonName := translator.jsonName(t.Field(x).Name)
- buff.WriteString(fmt.Sprintf("%q:", jsonName))
-
- if field.Kind() == reflect.Ptr {
- field = field.Elem()
- }
-
- if err := marshalStructField(field, buff, enc); err != nil {
- return err
- }
- }
-
- buff.Truncate(buff.Len() - 1) // Remove final comma
- buff.WriteByte(rightBrace)
-
- return nil
-}
-
-func marshalStructField(field reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error {
- // Determine if we need a trailing comma.
- defer buff.WriteByte(comma)
-
- switch field.Kind() {
- // If it was a *struct or struct, we need to recursively all marshal().
- case reflect.Struct:
- if field.CanAddr() {
- field = field.Addr()
- }
- return marshalStruct(field, buff, enc)
- case reflect.Map:
- return marshalMap(field, buff, enc)
- case reflect.Slice:
- return marshalSlice(field, buff, enc)
- }
-
- // It is just a basic type, so encode it.
- if err := enc.Encode(field.Interface()); err != nil {
- return err
- }
- buff.Truncate(buff.Len() - 1) // Remove Encode() added \n
-
- return nil
-}
-
-func marshalMap(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error {
- if v.Kind() != reflect.Map {
- return fmt.Errorf("bug: marshalMap() called on %T", v.Interface())
- }
- if v.Len() == 0 {
- buff.WriteByte(leftBrace)
- buff.WriteByte(rightBrace)
- return nil
- }
- encoder := mapEncode{m: v, buff: buff, enc: enc}
- return encoder.run()
-}
-
-type mapEncode struct {
- m reflect.Value
- buff *bytes.Buffer
- enc *json.Encoder
-
- valueBaseType reflect.Type
-}
-
-// run runs our encoder state machine.
-func (m *mapEncode) run() error {
- var state = m.start
- var err error
- for {
- state, err = state()
- if err != nil {
- return err
- }
- if state == nil {
- return nil
- }
- }
-}
-
-func (m *mapEncode) start() (stateFn, error) {
- if hasMarshalJSON(m.m) {
- b, err := callMarshalJSON(m.m)
- if err != nil {
- return nil, err
- }
- m.buff.Write(b)
- return nil, nil
- }
-
- valueBaseType := m.m.Type().Elem()
- if valueBaseType.Kind() == reflect.Ptr {
- valueBaseType = valueBaseType.Elem()
- }
- m.valueBaseType = valueBaseType
-
- switch valueBaseType.Kind() {
- case reflect.Ptr:
- return nil, fmt.Errorf("Marshal does not support ** or *")
- case reflect.Struct, reflect.Map, reflect.Slice:
- return m.encode, nil
- }
-
- // If the map value doesn't have a struct/map/slice, just Encode() it.
- if err := m.enc.Encode(m.m.Interface()); err != nil {
- return nil, err
- }
- m.buff.Truncate(m.buff.Len() - 1) // Remove Encode() added \n
- return nil, nil
-}
-
-func (m *mapEncode) encode() (stateFn, error) {
- m.buff.WriteByte(leftBrace)
-
- iter := m.m.MapRange()
- for iter.Next() {
- // Write the key.
- k := iter.Key()
- m.buff.WriteString(fmt.Sprintf("%q:", k.String()))
-
- v := iter.Value()
- switch m.valueBaseType.Kind() {
- case reflect.Struct:
- if v.CanAddr() {
- v = v.Addr()
- }
- if err := marshalStruct(v, m.buff, m.enc); err != nil {
- return nil, err
- }
- case reflect.Map:
- if err := marshalMap(v, m.buff, m.enc); err != nil {
- return nil, err
- }
- case reflect.Slice:
- if err := marshalSlice(v, m.buff, m.enc); err != nil {
- return nil, err
- }
- default:
- panic(fmt.Sprintf("critical bug: mapEncode.encode() called with value base type: %v", m.valueBaseType.Kind()))
- }
- m.buff.WriteByte(comma)
- }
- m.buff.Truncate(m.buff.Len() - 1) // Remove final comma
- m.buff.WriteByte(rightBrace)
-
- return nil, nil
-}
-
-func marshalSlice(v reflect.Value, buff *bytes.Buffer, enc *json.Encoder) error {
- if v.Kind() != reflect.Slice {
- return fmt.Errorf("bug: marshalSlice() called on %T", v.Interface())
- }
- if v.Len() == 0 {
- buff.WriteByte(leftParen)
- buff.WriteByte(rightParen)
- return nil
- }
- encoder := sliceEncode{s: v, buff: buff, enc: enc}
- return encoder.run()
-}
-
-type sliceEncode struct {
- s reflect.Value
- buff *bytes.Buffer
- enc *json.Encoder
-
- valueBaseType reflect.Type
-}
-
-// run runs our encoder state machine.
-func (s *sliceEncode) run() error {
- var state = s.start
- var err error
- for {
- state, err = state()
- if err != nil {
- return err
- }
- if state == nil {
- return nil
- }
- }
-}
-
-func (s *sliceEncode) start() (stateFn, error) {
- if hasMarshalJSON(s.s) {
- b, err := callMarshalJSON(s.s)
- if err != nil {
- return nil, err
- }
- s.buff.Write(b)
- return nil, nil
- }
-
- valueBaseType := s.s.Type().Elem()
- if valueBaseType.Kind() == reflect.Ptr {
- valueBaseType = valueBaseType.Elem()
- }
- s.valueBaseType = valueBaseType
-
- switch valueBaseType.Kind() {
- case reflect.Ptr:
- return nil, fmt.Errorf("Marshal does not support ** or *")
- case reflect.Struct, reflect.Map, reflect.Slice:
- return s.encode, nil
- }
-
- // If the map value doesn't have a struct/map/slice, just Encode() it.
- if err := s.enc.Encode(s.s.Interface()); err != nil {
- return nil, err
- }
- s.buff.Truncate(s.buff.Len() - 1) // Remove Encode added \n
-
- return nil, nil
-}
-
-func (s *sliceEncode) encode() (stateFn, error) {
- s.buff.WriteByte(leftParen)
- for i := 0; i < s.s.Len(); i++ {
- v := s.s.Index(i)
- switch s.valueBaseType.Kind() {
- case reflect.Struct:
- if v.CanAddr() {
- v = v.Addr()
- }
- if err := marshalStruct(v, s.buff, s.enc); err != nil {
- return nil, err
- }
- case reflect.Map:
- if err := marshalMap(v, s.buff, s.enc); err != nil {
- return nil, err
- }
- case reflect.Slice:
- if err := marshalSlice(v, s.buff, s.enc); err != nil {
- return nil, err
- }
- default:
- panic(fmt.Sprintf("critical bug: mapEncode.encode() called with value base type: %v", s.valueBaseType.Kind()))
- }
- s.buff.WriteByte(comma)
- }
- s.buff.Truncate(s.buff.Len() - 1) // Remove final comma
- s.buff.WriteByte(rightParen)
- return nil, nil
-}
-
-// writeAddFields writes the AdditionalFields struct field out to JSON as field
-// values. i must be a map[string]interface{} or this will panic.
-func writeAddFields(i interface{}, buff *bytes.Buffer, enc *json.Encoder) error {
- m := i.(map[string]interface{})
-
- x := 0
- for k, v := range m {
- buff.WriteString(fmt.Sprintf("%q:", k))
- if err := enc.Encode(v); err != nil {
- return err
- }
- buff.Truncate(buff.Len() - 1) // Remove Encode() added \n
-
- if x+1 != len(m) {
- buff.WriteByte(comma)
- }
- x++
- }
- return nil
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go
deleted file mode 100644
index 07751544..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/struct.go
+++ /dev/null
@@ -1,290 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-package json
-
-import (
- "encoding/json"
- "fmt"
- "reflect"
- "strings"
-)
-
-func unmarshalStruct(jdec *json.Decoder, i interface{}) error {
- v := reflect.ValueOf(i)
- if v.Kind() != reflect.Ptr {
- return fmt.Errorf("Unmarshal() received type %T, which is not a *struct", i)
- }
- v = v.Elem()
- if v.Kind() != reflect.Struct {
- return fmt.Errorf("Unmarshal() received type %T, which is not a *struct", i)
- }
-
- if hasUnmarshalJSON(v) {
- // Indicates that this type has a custom Unmarshaler.
- return jdec.Decode(v.Addr().Interface())
- }
-
- f := v.FieldByName(addField)
- if f.Kind() == reflect.Invalid {
- return fmt.Errorf("Unmarshal(%T) only supports structs that have the field AdditionalFields or implements json.Unmarshaler", i)
- }
-
- if f.Kind() != reflect.Map || !f.Type().AssignableTo(mapStrInterType) {
- return fmt.Errorf("type %T has field 'AdditionalFields' that is not a map[string]interface{}", i)
- }
-
- dec := newDecoder(jdec, v)
- return dec.run()
-}
-
-type decoder struct {
- dec *json.Decoder
- value reflect.Value // This will be a reflect.Struct
- translator translateFields
- key string
-}
-
-func newDecoder(dec *json.Decoder, value reflect.Value) *decoder {
- return &decoder{value: value, dec: dec}
-}
-
-// run runs our decoder state machine.
-func (d *decoder) run() error {
- var state = d.start
- var err error
- for {
- state, err = state()
- if err != nil {
- return err
- }
- if state == nil {
- return nil
- }
- }
-}
-
-// start looks for our opening delimeter '{' and then transitions to looping through our fields.
-func (d *decoder) start() (stateFn, error) {
- var err error
- d.translator, err = findFields(d.value)
- if err != nil {
- return nil, err
- }
-
- delim, err := d.dec.Token()
- if err != nil {
- return nil, err
- }
- if !delimIs(delim, '{') {
- return nil, fmt.Errorf("Unmarshal expected opening {, received %v", delim)
- }
-
- return d.next, nil
-}
-
-// next gets the next struct field name from the raw json or stops the machine if we get our closing }.
-func (d *decoder) next() (stateFn, error) {
- if !d.dec.More() {
- // Remove the closing }.
- if _, err := d.dec.Token(); err != nil {
- return nil, err
- }
- return nil, nil
- }
-
- key, err := d.dec.Token()
- if err != nil {
- return nil, err
- }
-
- d.key = key.(string)
- return d.storeValue, nil
-}
-
-// storeValue takes the next value and stores it our struct. If the field can't be found
-// in the struct, it pushes the operation to storeAdditional().
-func (d *decoder) storeValue() (stateFn, error) {
- goName := d.translator.goName(d.key)
- if goName == "" {
- goName = d.key
- }
-
- // We don't have the field in the struct, so it goes in AdditionalFields.
- f := d.value.FieldByName(goName)
- if f.Kind() == reflect.Invalid {
- return d.storeAdditional, nil
- }
-
- // Indicates that this type has a custom Unmarshaler.
- if hasUnmarshalJSON(f) {
- err := d.dec.Decode(f.Addr().Interface())
- if err != nil {
- return nil, err
- }
- return d.next, nil
- }
-
- t, isPtr, err := fieldBaseType(d.value, goName)
- if err != nil {
- return nil, fmt.Errorf("type(%s) had field(%s) %w", d.value.Type().Name(), goName, err)
- }
-
- switch t.Kind() {
- // We need to recursively call ourselves on any *struct or struct.
- case reflect.Struct:
- if isPtr {
- if f.IsNil() {
- f.Set(reflect.New(t))
- }
- } else {
- f = f.Addr()
- }
- if err := unmarshalStruct(d.dec, f.Interface()); err != nil {
- return nil, err
- }
- return d.next, nil
- case reflect.Map:
- v := reflect.MakeMap(f.Type())
- ptr := newValue(f.Type())
- ptr.Elem().Set(v)
- if err := unmarshalMap(d.dec, ptr); err != nil {
- return nil, err
- }
- f.Set(ptr.Elem())
- return d.next, nil
- case reflect.Slice:
- v := reflect.MakeSlice(f.Type(), 0, 0)
- ptr := newValue(f.Type())
- ptr.Elem().Set(v)
- if err := unmarshalSlice(d.dec, ptr); err != nil {
- return nil, err
- }
- f.Set(ptr.Elem())
- return d.next, nil
- }
-
- if !isPtr {
- f = f.Addr()
- }
-
- // For values that are pointers, we need them to be non-nil in order
- // to decode into them.
- if f.IsNil() {
- f.Set(reflect.New(t))
- }
-
- if err := d.dec.Decode(f.Interface()); err != nil {
- return nil, err
- }
-
- return d.next, nil
-}
-
-// storeAdditional pushes the key/value into our .AdditionalFields map.
-func (d *decoder) storeAdditional() (stateFn, error) {
- rw := json.RawMessage{}
- if err := d.dec.Decode(&rw); err != nil {
- return nil, err
- }
- field := d.value.FieldByName(addField)
- if field.IsNil() {
- field.Set(reflect.MakeMap(field.Type()))
- }
- field.SetMapIndex(reflect.ValueOf(d.key), reflect.ValueOf(rw))
- return d.next, nil
-}
-
-func fieldBaseType(v reflect.Value, fieldName string) (t reflect.Type, isPtr bool, err error) {
- sf, ok := v.Type().FieldByName(fieldName)
- if !ok {
- return nil, false, fmt.Errorf("bug: fieldBaseType() lookup of field(%s) on type(%s): do not have field", fieldName, v.Type().Name())
- }
- t = sf.Type
- if t.Kind() == reflect.Ptr {
- t = t.Elem()
- isPtr = true
- }
- if t.Kind() == reflect.Ptr {
- return nil, isPtr, fmt.Errorf("received pointer to pointer type, not supported")
- }
- return t, isPtr, nil
-}
-
-type translateField struct {
- jsonName string
- goName string
-}
-
-// translateFields is a list of translateFields with a handy lookup method.
-type translateFields []translateField
-
-// goName loops through a list of fields looking for one contaning the jsonName and
-// returning the goName. If not found, returns the empty string.
-// Note: not a map because at this size slices are faster even in tight loops.
-func (t translateFields) goName(jsonName string) string {
- for _, entry := range t {
- if entry.jsonName == jsonName {
- return entry.goName
- }
- }
- return ""
-}
-
-// jsonName loops through a list of fields looking for one contaning the goName and
-// returning the jsonName. If not found, returns the empty string.
-// Note: not a map because at this size slices are faster even in tight loops.
-func (t translateFields) jsonName(goName string) string {
- for _, entry := range t {
- if entry.goName == goName {
- return entry.jsonName
- }
- }
- return ""
-}
-
-var umarshalerType = reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()
-
-// findFields parses a struct and writes the field tags for lookup. It will return an error
-// if any field has a type of *struct or struct that does not implement json.Marshaler.
-func findFields(v reflect.Value) (translateFields, error) {
- if v.Kind() == reflect.Ptr {
- v = v.Elem()
- }
- if v.Kind() != reflect.Struct {
- return nil, fmt.Errorf("findFields received a %s type, expected *struct or struct", v.Type().Name())
- }
- tfs := make([]translateField, 0, v.NumField())
- for i := 0; i < v.NumField(); i++ {
- tf := translateField{
- goName: v.Type().Field(i).Name,
- jsonName: parseTag(v.Type().Field(i).Tag.Get("json")),
- }
- switch tf.jsonName {
- case "", "-":
- tf.jsonName = tf.goName
- }
- tfs = append(tfs, tf)
-
- f := v.Field(i)
- if f.Kind() == reflect.Ptr {
- f = f.Elem()
- }
- if f.Kind() == reflect.Struct {
- if f.Type().Implements(umarshalerType) {
- return nil, fmt.Errorf("struct type %q which has field %q which "+
- "doesn't implement json.Unmarshaler", v.Type().Name(), v.Type().Field(i).Name)
- }
- }
- }
- return tfs, nil
-}
-
-// parseTag just returns the first entry in the tag. tag is the string
-// returned by reflect.StructField.Tag().Get().
-func parseTag(tag string) string {
- if idx := strings.Index(tag, ","); idx != -1 {
- return tag[:idx]
- }
- return tag
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go
deleted file mode 100644
index a1c99621..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time/time.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-// Package time provides for custom types to translate time from JSON and other formats
-// into time.Time objects.
-package time
-
-import (
- "fmt"
- "strconv"
- "strings"
- "time"
-)
-
-// Unix provides a type that can marshal and unmarshal a string representation
-// of the unix epoch into a time.Time object.
-type Unix struct {
- T time.Time
-}
-
-// MarshalJSON implements encoding/json.MarshalJSON().
-func (u Unix) MarshalJSON() ([]byte, error) {
- if u.T.IsZero() {
- return []byte(""), nil
- }
- return []byte(fmt.Sprintf("%q", strconv.FormatInt(u.T.Unix(), 10))), nil
-}
-
-// UnmarshalJSON implements encoding/json.UnmarshalJSON().
-func (u *Unix) UnmarshalJSON(b []byte) error {
- i, err := strconv.Atoi(strings.Trim(string(b), `"`))
- if err != nil {
- return fmt.Errorf("unix time(%s) could not be converted from string to int: %w", string(b), err)
- }
- u.T = time.Unix(int64(i), 0)
- return nil
-}
-
-// DurationTime provides a type that can marshal and unmarshal a string representation
-// of a duration from now into a time.Time object.
-// Note: I'm not sure this is the best way to do this. What happens is we get a field
-// called "expires_in" that represents the seconds from now that this expires. We
-// turn that into a time we call .ExpiresOn. But maybe we should be recording
-// when the token was received at .TokenRecieved and .ExpiresIn should remain as a duration.
-// Then we could have a method called ExpiresOn(). Honestly, the whole thing is
-// bad because the server doesn't return a concrete time. I think this is
-// cleaner, but its not great either.
-type DurationTime struct {
- T time.Time
-}
-
-// MarshalJSON implements encoding/json.MarshalJSON().
-func (d DurationTime) MarshalJSON() ([]byte, error) {
- if d.T.IsZero() {
- return []byte(""), nil
- }
-
- dt := time.Until(d.T)
- return []byte(fmt.Sprintf("%d", int64(dt*time.Second))), nil
-}
-
-// UnmarshalJSON implements encoding/json.UnmarshalJSON().
-func (d *DurationTime) UnmarshalJSON(b []byte) error {
- i, err := strconv.Atoi(strings.Trim(string(b), `"`))
- if err != nil {
- return fmt.Errorf("unix time(%s) could not be converted from string to int: %w", string(b), err)
- }
- d.T = time.Now().Add(time.Duration(i) * time.Second)
- return nil
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go
deleted file mode 100644
index 04236ff3..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-// Package local contains a local HTTP server used with interactive authentication.
-package local
-
-import (
- "context"
- "fmt"
- "net"
- "net/http"
- "strconv"
- "strings"
- "time"
-)
-
-var okPage = []byte(`
-
-
-
-
- Authentication Complete
-
-
-
Authentication complete. You can return to the application. Feel free to close this browser tab.
-
-
-`)
-
-const failPage = `
-
-
-
-
- Authentication Failed
-
-
- Authentication failed. You can return to the application. Feel free to close this browser tab.
- Error details: error %s error_description: %s
-
-
-`
-
-// Result is the result from the redirect.
-type Result struct {
- // Code is the code sent by the authority server.
- Code string
- // Err is set if there was an error.
- Err error
-}
-
-// Server is an HTTP server.
-type Server struct {
- // Addr is the address the server is listening on.
- Addr string
- resultCh chan Result
- s *http.Server
- reqState string
-}
-
-// New creates a local HTTP server and starts it.
-func New(reqState string, port int) (*Server, error) {
- var l net.Listener
- var err error
- var portStr string
- if port > 0 {
- // use port provided by caller
- l, err = net.Listen("tcp", fmt.Sprintf("localhost:%d", port))
- portStr = strconv.FormatInt(int64(port), 10)
- } else {
- // find a free port
- for i := 0; i < 10; i++ {
- l, err = net.Listen("tcp", "localhost:0")
- if err != nil {
- continue
- }
- addr := l.Addr().String()
- portStr = addr[strings.LastIndex(addr, ":")+1:]
- break
- }
- }
- if err != nil {
- return nil, err
- }
-
- serv := &Server{
- Addr: fmt.Sprintf("http://localhost:%s", portStr),
- s: &http.Server{Addr: "localhost:0", ReadHeaderTimeout: time.Second},
- reqState: reqState,
- resultCh: make(chan Result, 1),
- }
- serv.s.Handler = http.HandlerFunc(serv.handler)
-
- if err := serv.start(l); err != nil {
- return nil, err
- }
-
- return serv, nil
-}
-
-func (s *Server) start(l net.Listener) error {
- go func() {
- err := s.s.Serve(l)
- if err != nil {
- select {
- case s.resultCh <- Result{Err: err}:
- default:
- }
- }
- }()
-
- return nil
-}
-
-// Result gets the result of the redirect operation. Once a single result is returned, the server
-// is shutdown. ctx deadline will be honored.
-func (s *Server) Result(ctx context.Context) Result {
- select {
- case <-ctx.Done():
- return Result{Err: ctx.Err()}
- case r := <-s.resultCh:
- return r
- }
-}
-
-// Shutdown shuts down the server.
-func (s *Server) Shutdown() {
- // Note: You might get clever and think you can do this in handler() as a defer, you can't.
- _ = s.s.Shutdown(context.Background())
-}
-
-func (s *Server) putResult(r Result) {
- select {
- case s.resultCh <- r:
- default:
- }
-}
-
-func (s *Server) handler(w http.ResponseWriter, r *http.Request) {
- q := r.URL.Query()
-
- headerErr := q.Get("error")
- if headerErr != "" {
- desc := q.Get("error_description")
- // Note: It is a little weird we handle some errors by not going to the failPage. If they all should,
- // change this to s.error() and make s.error() write the failPage instead of an error code.
- _, _ = w.Write([]byte(fmt.Sprintf(failPage, headerErr, desc)))
- s.putResult(Result{Err: fmt.Errorf(desc)})
- return
- }
-
- respState := q.Get("state")
- switch respState {
- case s.reqState:
- case "":
- s.error(w, http.StatusInternalServerError, "server didn't send OAuth state")
- return
- default:
- s.error(w, http.StatusInternalServerError, "mismatched OAuth state, req(%s), resp(%s)", s.reqState, respState)
- return
- }
-
- code := q.Get("code")
- if code == "" {
- s.error(w, http.StatusInternalServerError, "authorization code missing in query string")
- return
- }
-
- _, _ = w.Write(okPage)
- s.putResult(Result{Code: code})
-}
-
-func (s *Server) error(w http.ResponseWriter, code int, str string, i ...interface{}) {
- err := fmt.Errorf(str, i...)
- http.Error(w, err.Error(), code)
- s.putResult(Result{Err: err})
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go
deleted file mode 100644
index ef8d908a..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go
+++ /dev/null
@@ -1,354 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-package oauth
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "io"
- "time"
-
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported"
- internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs"
- "github.com/google/uuid"
-)
-
-// ResolveEndpointer contains the methods for resolving authority endpoints.
-type ResolveEndpointer interface {
- ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error)
-}
-
-// AccessTokens contains the methods for fetching tokens from different sources.
-type AccessTokens interface {
- DeviceCodeResult(ctx context.Context, authParameters authority.AuthParams) (accesstokens.DeviceCodeResult, error)
- FromUsernamePassword(ctx context.Context, authParameters authority.AuthParams) (accesstokens.TokenResponse, error)
- FromAuthCode(ctx context.Context, req accesstokens.AuthCodeRequest) (accesstokens.TokenResponse, error)
- FromRefreshToken(ctx context.Context, appType accesstokens.AppType, authParams authority.AuthParams, cc *accesstokens.Credential, refreshToken string) (accesstokens.TokenResponse, error)
- FromClientSecret(ctx context.Context, authParameters authority.AuthParams, clientSecret string) (accesstokens.TokenResponse, error)
- FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (accesstokens.TokenResponse, error)
- FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (accesstokens.TokenResponse, error)
- FromUserAssertionClientCertificate(ctx context.Context, authParameters authority.AuthParams, userAssertion string, assertion string) (accesstokens.TokenResponse, error)
- FromDeviceCodeResult(ctx context.Context, authParameters authority.AuthParams, deviceCodeResult accesstokens.DeviceCodeResult) (accesstokens.TokenResponse, error)
- FromSamlGrant(ctx context.Context, authParameters authority.AuthParams, samlGrant wstrust.SamlTokenInfo) (accesstokens.TokenResponse, error)
-}
-
-// FetchAuthority will be implemented by authority.Authority.
-type FetchAuthority interface {
- UserRealm(context.Context, authority.AuthParams) (authority.UserRealm, error)
- AADInstanceDiscovery(context.Context, authority.Info) (authority.InstanceDiscoveryResponse, error)
-}
-
-// FetchWSTrust contains the methods for interacting with WSTrust endpoints.
-type FetchWSTrust interface {
- Mex(ctx context.Context, federationMetadataURL string) (defs.MexDocument, error)
- SAMLTokenInfo(ctx context.Context, authParameters authority.AuthParams, cloudAudienceURN string, endpoint defs.Endpoint) (wstrust.SamlTokenInfo, error)
-}
-
-// Client provides tokens for various types of token requests.
-type Client struct {
- Resolver ResolveEndpointer
- AccessTokens AccessTokens
- Authority FetchAuthority
- WSTrust FetchWSTrust
-}
-
-// New is the constructor for Token.
-func New(httpClient ops.HTTPClient) *Client {
- r := ops.New(httpClient)
- return &Client{
- Resolver: newAuthorityEndpoint(r),
- AccessTokens: r.AccessTokens(),
- Authority: r.Authority(),
- WSTrust: r.WSTrust(),
- }
-}
-
-// ResolveEndpoints gets the authorization and token endpoints and creates an AuthorityEndpoints instance.
-func (t *Client) ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) {
- return t.Resolver.ResolveEndpoints(ctx, authorityInfo, userPrincipalName)
-}
-
-// AADInstanceDiscovery attempts to discover a tenant endpoint (used in OIDC auth with an authorization endpoint).
-// This is done by AAD which allows for aliasing of tenants (windows.sts.net is the same as login.windows.com).
-func (t *Client) AADInstanceDiscovery(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryResponse, error) {
- return t.Authority.AADInstanceDiscovery(ctx, authorityInfo)
-}
-
-// AuthCode returns a token based on an authorization code.
-func (t *Client) AuthCode(ctx context.Context, req accesstokens.AuthCodeRequest) (accesstokens.TokenResponse, error) {
- if err := scopeError(req.AuthParams); err != nil {
- return accesstokens.TokenResponse{}, err
- }
- if err := t.resolveEndpoint(ctx, &req.AuthParams, ""); err != nil {
- return accesstokens.TokenResponse{}, err
- }
-
- tResp, err := t.AccessTokens.FromAuthCode(ctx, req)
- if err != nil {
- return accesstokens.TokenResponse{}, fmt.Errorf("could not retrieve token from auth code: %w", err)
- }
- return tResp, nil
-}
-
-// Credential acquires a token from the authority using a client credentials grant.
-func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams, cred *accesstokens.Credential) (accesstokens.TokenResponse, error) {
- if cred.TokenProvider != nil {
- now := time.Now()
- scopes := make([]string, len(authParams.Scopes))
- copy(scopes, authParams.Scopes)
- params := exported.TokenProviderParameters{
- Claims: authParams.Claims,
- CorrelationID: uuid.New().String(),
- Scopes: scopes,
- TenantID: authParams.AuthorityInfo.Tenant,
- }
- tr, err := cred.TokenProvider(ctx, params)
- if err != nil {
- if len(scopes) == 0 {
- err = fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which may cause the following error: %w", err)
- return accesstokens.TokenResponse{}, err
- }
- return accesstokens.TokenResponse{}, err
- }
- return accesstokens.TokenResponse{
- TokenType: authParams.AuthnScheme.AccessTokenType(),
- AccessToken: tr.AccessToken,
- ExpiresOn: internalTime.DurationTime{
- T: now.Add(time.Duration(tr.ExpiresInSeconds) * time.Second),
- },
- GrantedScopes: accesstokens.Scopes{Slice: authParams.Scopes},
- }, nil
- }
-
- if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil {
- return accesstokens.TokenResponse{}, err
- }
-
- if cred.Secret != "" {
- return t.AccessTokens.FromClientSecret(ctx, authParams, cred.Secret)
- }
- jwt, err := cred.JWT(ctx, authParams)
- if err != nil {
- return accesstokens.TokenResponse{}, err
- }
- return t.AccessTokens.FromAssertion(ctx, authParams, jwt)
-}
-
-// Credential acquires a token from the authority using a client credentials grant.
-func (t *Client) OnBehalfOf(ctx context.Context, authParams authority.AuthParams, cred *accesstokens.Credential) (accesstokens.TokenResponse, error) {
- if err := scopeError(authParams); err != nil {
- return accesstokens.TokenResponse{}, err
- }
- if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil {
- return accesstokens.TokenResponse{}, err
- }
-
- if cred.Secret != "" {
- return t.AccessTokens.FromUserAssertionClientSecret(ctx, authParams, authParams.UserAssertion, cred.Secret)
- }
- jwt, err := cred.JWT(ctx, authParams)
- if err != nil {
- return accesstokens.TokenResponse{}, err
- }
- tr, err := t.AccessTokens.FromUserAssertionClientCertificate(ctx, authParams, authParams.UserAssertion, jwt)
- if err != nil {
- return accesstokens.TokenResponse{}, err
- }
- return tr, nil
-}
-
-func (t *Client) Refresh(ctx context.Context, reqType accesstokens.AppType, authParams authority.AuthParams, cc *accesstokens.Credential, refreshToken accesstokens.RefreshToken) (accesstokens.TokenResponse, error) {
- if err := scopeError(authParams); err != nil {
- return accesstokens.TokenResponse{}, err
- }
- if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil {
- return accesstokens.TokenResponse{}, err
- }
-
- tr, err := t.AccessTokens.FromRefreshToken(ctx, reqType, authParams, cc, refreshToken.Secret)
- if err != nil {
- return accesstokens.TokenResponse{}, err
- }
- return tr, nil
-}
-
-// UsernamePassword retrieves a token where a username and password is used. However, if this is
-// a user realm of "Federated", this uses SAML tokens. If "Managed", uses normal username/password.
-func (t *Client) UsernamePassword(ctx context.Context, authParams authority.AuthParams) (accesstokens.TokenResponse, error) {
- if err := scopeError(authParams); err != nil {
- return accesstokens.TokenResponse{}, err
- }
-
- if authParams.AuthorityInfo.AuthorityType == authority.ADFS {
- if err := t.resolveEndpoint(ctx, &authParams, authParams.Username); err != nil {
- return accesstokens.TokenResponse{}, err
- }
- return t.AccessTokens.FromUsernamePassword(ctx, authParams)
- }
- if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil {
- return accesstokens.TokenResponse{}, err
- }
-
- userRealm, err := t.Authority.UserRealm(ctx, authParams)
- if err != nil {
- return accesstokens.TokenResponse{}, fmt.Errorf("problem getting user realm from authority: %w", err)
- }
-
- switch userRealm.AccountType {
- case authority.Federated:
- mexDoc, err := t.WSTrust.Mex(ctx, userRealm.FederationMetadataURL)
- if err != nil {
- err = fmt.Errorf("problem getting mex doc from federated url(%s): %w", userRealm.FederationMetadataURL, err)
- return accesstokens.TokenResponse{}, err
- }
-
- saml, err := t.WSTrust.SAMLTokenInfo(ctx, authParams, userRealm.CloudAudienceURN, mexDoc.UsernamePasswordEndpoint)
- if err != nil {
- err = fmt.Errorf("problem getting SAML token info: %w", err)
- return accesstokens.TokenResponse{}, err
- }
- tr, err := t.AccessTokens.FromSamlGrant(ctx, authParams, saml)
- if err != nil {
- return accesstokens.TokenResponse{}, err
- }
- return tr, nil
- case authority.Managed:
- if len(authParams.Scopes) == 0 {
- err = fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which may cause the following error: %w", err)
- return accesstokens.TokenResponse{}, err
- }
- return t.AccessTokens.FromUsernamePassword(ctx, authParams)
- }
- return accesstokens.TokenResponse{}, errors.New("unknown account type")
-}
-
-// DeviceCode is the result of a call to Token.DeviceCode().
-type DeviceCode struct {
- // Result is the device code result from the first call in the device code flow. This allows
- // the caller to retrieve the displayed code that is used to authorize on the second device.
- Result accesstokens.DeviceCodeResult
- authParams authority.AuthParams
-
- accessTokens AccessTokens
-}
-
-// Token returns a token AFTER the user uses the user code on the second device. This will block
-// until either: (1) the code is input by the user and the service releases a token, (2) the token
-// expires, (3) the Context passed to .DeviceCode() is cancelled or expires, (4) some other service
-// error occurs.
-func (d DeviceCode) Token(ctx context.Context) (accesstokens.TokenResponse, error) {
- if d.accessTokens == nil {
- return accesstokens.TokenResponse{}, fmt.Errorf("DeviceCode was either created outside its package or the creating method had an error. DeviceCode is not valid")
- }
-
- var cancel context.CancelFunc
- if deadline, ok := ctx.Deadline(); !ok || d.Result.ExpiresOn.Before(deadline) {
- ctx, cancel = context.WithDeadline(ctx, d.Result.ExpiresOn)
- } else {
- ctx, cancel = context.WithCancel(ctx)
- }
- defer cancel()
-
- var interval = 50 * time.Millisecond
- timer := time.NewTimer(interval)
- defer timer.Stop()
-
- for {
- timer.Reset(interval)
- select {
- case <-ctx.Done():
- return accesstokens.TokenResponse{}, ctx.Err()
- case <-timer.C:
- interval += interval * 2
- if interval > 5*time.Second {
- interval = 5 * time.Second
- }
- }
-
- token, err := d.accessTokens.FromDeviceCodeResult(ctx, d.authParams, d.Result)
- if err != nil && isWaitDeviceCodeErr(err) {
- continue
- }
- return token, err // This handles if it was a non-wait error or success
- }
-}
-
-type deviceCodeError struct {
- Error string `json:"error"`
-}
-
-func isWaitDeviceCodeErr(err error) bool {
- var c errors.CallErr
- if !errors.As(err, &c) {
- return false
- }
- if c.Resp.StatusCode != 400 {
- return false
- }
- var dCErr deviceCodeError
- defer c.Resp.Body.Close()
- body, err := io.ReadAll(c.Resp.Body)
- if err != nil {
- return false
- }
- err = json.Unmarshal(body, &dCErr)
- if err != nil {
- return false
- }
- if dCErr.Error == "authorization_pending" || dCErr.Error == "slow_down" {
- return true
- }
- return false
-}
-
-// DeviceCode returns a DeviceCode object that can be used to get the code that must be entered on the second
-// device and optionally the token once the code has been entered on the second device.
-func (t *Client) DeviceCode(ctx context.Context, authParams authority.AuthParams) (DeviceCode, error) {
- if err := scopeError(authParams); err != nil {
- return DeviceCode{}, err
- }
-
- if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil {
- return DeviceCode{}, err
- }
-
- dcr, err := t.AccessTokens.DeviceCodeResult(ctx, authParams)
- if err != nil {
- return DeviceCode{}, err
- }
-
- return DeviceCode{Result: dcr, authParams: authParams, accessTokens: t.AccessTokens}, nil
-}
-
-func (t *Client) resolveEndpoint(ctx context.Context, authParams *authority.AuthParams, userPrincipalName string) error {
- endpoints, err := t.Resolver.ResolveEndpoints(ctx, authParams.AuthorityInfo, userPrincipalName)
- if err != nil {
- return fmt.Errorf("unable to resolve an endpoint: %s", err)
- }
- authParams.Endpoints = endpoints
- return nil
-}
-
-// scopeError takes an authority.AuthParams and returns an error
-// if len(AuthParams.Scope) == 0.
-func scopeError(a authority.AuthParams) error {
- // TODO(someone): we could look deeper at the message to determine if
- // it's a scope error, but this is a good start.
- /*
- {error":"invalid_scope","error_description":"AADSTS1002012: The provided value for scope
- openid offline_access profile is not valid. Client credential flows must have a scope value
- with /.default suffixed to the resource identifier (application ID URI)...}
- */
- if len(a.Scopes) == 0 {
- return fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which is invalid")
- }
- return nil
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go
deleted file mode 100644
index a7b7b074..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go
+++ /dev/null
@@ -1,457 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-/*
-Package accesstokens exposes a REST client for querying backend systems to get various types of
-access tokens (oauth) for use in authentication.
-
-These calls are of type "application/x-www-form-urlencoded". This means we use url.Values to
-represent arguments and then encode them into the POST body message. We receive JSON in
-return for the requests. The request definition is defined in https://tools.ietf.org/html/rfc7521#section-4.2 .
-*/
-package accesstokens
-
-import (
- "context"
- "crypto"
-
- /* #nosec */
- "crypto/sha1"
- "crypto/x509"
- "encoding/base64"
- "encoding/json"
- "fmt"
- "net/url"
- "strconv"
- "strings"
- "time"
-
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust"
- "github.com/golang-jwt/jwt/v5"
- "github.com/google/uuid"
-)
-
-const (
- grantType = "grant_type"
- deviceCode = "device_code"
- clientID = "client_id"
- clientInfo = "client_info"
- clientInfoVal = "1"
- username = "username"
- password = "password"
-)
-
-//go:generate stringer -type=AppType
-
-// AppType is whether the authorization code flow is for a public or confidential client.
-type AppType int8
-
-const (
- // ATUnknown is the zero value when the type hasn't been set.
- ATUnknown AppType = iota
- // ATPublic indicates this if for the Public.Client.
- ATPublic
- // ATConfidential indicates this if for the Confidential.Client.
- ATConfidential
-)
-
-type urlFormCaller interface {
- URLFormCall(ctx context.Context, endpoint string, qv url.Values, resp interface{}) error
-}
-
-// DeviceCodeResponse represents the HTTP response received from the device code endpoint
-type DeviceCodeResponse struct {
- authority.OAuthResponseBase
-
- UserCode string `json:"user_code"`
- DeviceCode string `json:"device_code"`
- VerificationURL string `json:"verification_url"`
- ExpiresIn int `json:"expires_in"`
- Interval int `json:"interval"`
- Message string `json:"message"`
-
- AdditionalFields map[string]interface{}
-}
-
-// Convert converts the DeviceCodeResponse to a DeviceCodeResult
-func (dcr DeviceCodeResponse) Convert(clientID string, scopes []string) DeviceCodeResult {
- expiresOn := time.Now().UTC().Add(time.Duration(dcr.ExpiresIn) * time.Second)
- return NewDeviceCodeResult(dcr.UserCode, dcr.DeviceCode, dcr.VerificationURL, expiresOn, dcr.Interval, dcr.Message, clientID, scopes)
-}
-
-// Credential represents the credential used in confidential client flows. This can be either
-// a Secret or Cert/Key.
-type Credential struct {
- // Secret contains the credential secret if we are doing auth by secret.
- Secret string
-
- // Cert is the public certificate, if we're authenticating by certificate.
- Cert *x509.Certificate
- // Key is the private key for signing, if we're authenticating by certificate.
- Key crypto.PrivateKey
- // X5c is the JWT assertion's x5c header value, required for SN/I authentication.
- X5c []string
-
- // AssertionCallback is a function provided by the application, if we're authenticating by assertion.
- AssertionCallback func(context.Context, exported.AssertionRequestOptions) (string, error)
-
- // TokenProvider is a function provided by the application that implements custom authentication
- // logic for a confidential client
- TokenProvider func(context.Context, exported.TokenProviderParameters) (exported.TokenProviderResult, error)
-}
-
-// JWT gets the jwt assertion when the credential is not using a secret.
-func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) (string, error) {
- if c.AssertionCallback != nil {
- options := exported.AssertionRequestOptions{
- ClientID: authParams.ClientID,
- TokenEndpoint: authParams.Endpoints.TokenEndpoint,
- }
- return c.AssertionCallback(ctx, options)
- }
-
- token := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{
- "aud": authParams.Endpoints.TokenEndpoint,
- "exp": json.Number(strconv.FormatInt(time.Now().Add(10*time.Minute).Unix(), 10)),
- "iss": authParams.ClientID,
- "jti": uuid.New().String(),
- "nbf": json.Number(strconv.FormatInt(time.Now().Unix(), 10)),
- "sub": authParams.ClientID,
- })
- token.Header = map[string]interface{}{
- "alg": "RS256",
- "typ": "JWT",
- "x5t": base64.StdEncoding.EncodeToString(thumbprint(c.Cert)),
- }
-
- if authParams.SendX5C {
- token.Header["x5c"] = c.X5c
- }
-
- assertion, err := token.SignedString(c.Key)
- if err != nil {
- return "", fmt.Errorf("unable to sign a JWT token using private key: %w", err)
- }
- return assertion, nil
-}
-
-// thumbprint runs the asn1.Der bytes through sha1 for use in the x5t parameter of JWT.
-// https://tools.ietf.org/html/rfc7517#section-4.8
-func thumbprint(cert *x509.Certificate) []byte {
- /* #nosec */
- a := sha1.Sum(cert.Raw)
- return a[:]
-}
-
-// Client represents the REST calls to get tokens from token generator backends.
-type Client struct {
- // Comm provides the HTTP transport client.
- Comm urlFormCaller
-
- testing bool
-}
-
-// FromUsernamePassword uses a username and password to get an access token.
-func (c Client) FromUsernamePassword(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) {
- qv := url.Values{}
- if err := addClaims(qv, authParameters); err != nil {
- return TokenResponse{}, err
- }
- qv.Set(grantType, grant.Password)
- qv.Set(username, authParameters.Username)
- qv.Set(password, authParameters.Password)
- qv.Set(clientID, authParameters.ClientID)
- qv.Set(clientInfo, clientInfoVal)
- addScopeQueryParam(qv, authParameters)
-
- return c.doTokenResp(ctx, authParameters, qv)
-}
-
-// AuthCodeRequest stores the values required to request a token from the authority using an authorization code
-type AuthCodeRequest struct {
- AuthParams authority.AuthParams
- Code string
- CodeChallenge string
- Credential *Credential
- AppType AppType
-}
-
-// NewCodeChallengeRequest returns an AuthCodeRequest that uses a code challenge..
-func NewCodeChallengeRequest(params authority.AuthParams, appType AppType, cc *Credential, code, challenge string) (AuthCodeRequest, error) {
- if appType == ATUnknown {
- return AuthCodeRequest{}, fmt.Errorf("bug: NewCodeChallengeRequest() called with AppType == ATUnknown")
- }
- return AuthCodeRequest{
- AuthParams: params,
- AppType: appType,
- Code: code,
- CodeChallenge: challenge,
- Credential: cc,
- }, nil
-}
-
-// FromAuthCode uses an authorization code to retrieve an access token.
-func (c Client) FromAuthCode(ctx context.Context, req AuthCodeRequest) (TokenResponse, error) {
- var qv url.Values
-
- switch req.AppType {
- case ATUnknown:
- return TokenResponse{}, fmt.Errorf("bug: Token.AuthCode() received request with AppType == ATUnknown")
- case ATConfidential:
- var err error
- if req.Credential == nil {
- return TokenResponse{}, fmt.Errorf("AuthCodeRequest had nil Credential for Confidential app")
- }
- qv, err = prepURLVals(ctx, req.Credential, req.AuthParams)
- if err != nil {
- return TokenResponse{}, err
- }
- case ATPublic:
- qv = url.Values{}
- default:
- return TokenResponse{}, fmt.Errorf("bug: Token.AuthCode() received request with AppType == %v, which we do not recongnize", req.AppType)
- }
-
- qv.Set(grantType, grant.AuthCode)
- qv.Set("code", req.Code)
- qv.Set("code_verifier", req.CodeChallenge)
- qv.Set("redirect_uri", req.AuthParams.Redirecturi)
- qv.Set(clientID, req.AuthParams.ClientID)
- qv.Set(clientInfo, clientInfoVal)
- addScopeQueryParam(qv, req.AuthParams)
- if err := addClaims(qv, req.AuthParams); err != nil {
- return TokenResponse{}, err
- }
-
- return c.doTokenResp(ctx, req.AuthParams, qv)
-}
-
-// FromRefreshToken uses a refresh token (for refreshing credentials) to get a new access token.
-func (c Client) FromRefreshToken(ctx context.Context, appType AppType, authParams authority.AuthParams, cc *Credential, refreshToken string) (TokenResponse, error) {
- qv := url.Values{}
- if appType == ATConfidential {
- var err error
- qv, err = prepURLVals(ctx, cc, authParams)
- if err != nil {
- return TokenResponse{}, err
- }
- }
- if err := addClaims(qv, authParams); err != nil {
- return TokenResponse{}, err
- }
- qv.Set(grantType, grant.RefreshToken)
- qv.Set(clientID, authParams.ClientID)
- qv.Set(clientInfo, clientInfoVal)
- qv.Set("refresh_token", refreshToken)
- addScopeQueryParam(qv, authParams)
-
- return c.doTokenResp(ctx, authParams, qv)
-}
-
-// FromClientSecret uses a client's secret (aka password) to get a new token.
-func (c Client) FromClientSecret(ctx context.Context, authParameters authority.AuthParams, clientSecret string) (TokenResponse, error) {
- qv := url.Values{}
- if err := addClaims(qv, authParameters); err != nil {
- return TokenResponse{}, err
- }
- qv.Set(grantType, grant.ClientCredential)
- qv.Set("client_secret", clientSecret)
- qv.Set(clientID, authParameters.ClientID)
- addScopeQueryParam(qv, authParameters)
-
- token, err := c.doTokenResp(ctx, authParameters, qv)
- if err != nil {
- return token, fmt.Errorf("FromClientSecret(): %w", err)
- }
- return token, nil
-}
-
-func (c Client) FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (TokenResponse, error) {
- qv := url.Values{}
- if err := addClaims(qv, authParameters); err != nil {
- return TokenResponse{}, err
- }
- qv.Set(grantType, grant.ClientCredential)
- qv.Set("client_assertion_type", grant.ClientAssertion)
- qv.Set("client_assertion", assertion)
- qv.Set(clientID, authParameters.ClientID)
- qv.Set(clientInfo, clientInfoVal)
- addScopeQueryParam(qv, authParameters)
-
- token, err := c.doTokenResp(ctx, authParameters, qv)
- if err != nil {
- return token, fmt.Errorf("FromAssertion(): %w", err)
- }
- return token, nil
-}
-
-func (c Client) FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (TokenResponse, error) {
- qv := url.Values{}
- if err := addClaims(qv, authParameters); err != nil {
- return TokenResponse{}, err
- }
- qv.Set(grantType, grant.JWT)
- qv.Set(clientID, authParameters.ClientID)
- qv.Set("client_secret", clientSecret)
- qv.Set("assertion", userAssertion)
- qv.Set(clientInfo, clientInfoVal)
- qv.Set("requested_token_use", "on_behalf_of")
- addScopeQueryParam(qv, authParameters)
-
- return c.doTokenResp(ctx, authParameters, qv)
-}
-
-func (c Client) FromUserAssertionClientCertificate(ctx context.Context, authParameters authority.AuthParams, userAssertion string, assertion string) (TokenResponse, error) {
- qv := url.Values{}
- if err := addClaims(qv, authParameters); err != nil {
- return TokenResponse{}, err
- }
- qv.Set(grantType, grant.JWT)
- qv.Set("client_assertion_type", grant.ClientAssertion)
- qv.Set("client_assertion", assertion)
- qv.Set(clientID, authParameters.ClientID)
- qv.Set("assertion", userAssertion)
- qv.Set(clientInfo, clientInfoVal)
- qv.Set("requested_token_use", "on_behalf_of")
- addScopeQueryParam(qv, authParameters)
-
- return c.doTokenResp(ctx, authParameters, qv)
-}
-
-func (c Client) DeviceCodeResult(ctx context.Context, authParameters authority.AuthParams) (DeviceCodeResult, error) {
- qv := url.Values{}
- if err := addClaims(qv, authParameters); err != nil {
- return DeviceCodeResult{}, err
- }
- qv.Set(clientID, authParameters.ClientID)
- addScopeQueryParam(qv, authParameters)
-
- endpoint := strings.Replace(authParameters.Endpoints.TokenEndpoint, "token", "devicecode", -1)
-
- resp := DeviceCodeResponse{}
- err := c.Comm.URLFormCall(ctx, endpoint, qv, &resp)
- if err != nil {
- return DeviceCodeResult{}, err
- }
-
- return resp.Convert(authParameters.ClientID, authParameters.Scopes), nil
-}
-
-func (c Client) FromDeviceCodeResult(ctx context.Context, authParameters authority.AuthParams, deviceCodeResult DeviceCodeResult) (TokenResponse, error) {
- qv := url.Values{}
- if err := addClaims(qv, authParameters); err != nil {
- return TokenResponse{}, err
- }
- qv.Set(grantType, grant.DeviceCode)
- qv.Set(deviceCode, deviceCodeResult.DeviceCode)
- qv.Set(clientID, authParameters.ClientID)
- qv.Set(clientInfo, clientInfoVal)
- addScopeQueryParam(qv, authParameters)
-
- return c.doTokenResp(ctx, authParameters, qv)
-}
-
-func (c Client) FromSamlGrant(ctx context.Context, authParameters authority.AuthParams, samlGrant wstrust.SamlTokenInfo) (TokenResponse, error) {
- qv := url.Values{}
- if err := addClaims(qv, authParameters); err != nil {
- return TokenResponse{}, err
- }
- qv.Set(username, authParameters.Username)
- qv.Set(password, authParameters.Password)
- qv.Set(clientID, authParameters.ClientID)
- qv.Set(clientInfo, clientInfoVal)
- qv.Set("assertion", base64.StdEncoding.WithPadding(base64.StdPadding).EncodeToString([]byte(samlGrant.Assertion)))
- addScopeQueryParam(qv, authParameters)
-
- switch samlGrant.AssertionType {
- case grant.SAMLV1:
- qv.Set(grantType, grant.SAMLV1)
- case grant.SAMLV2:
- qv.Set(grantType, grant.SAMLV2)
- default:
- return TokenResponse{}, fmt.Errorf("GetAccessTokenFromSamlGrant returned unknown SAML assertion type: %q", samlGrant.AssertionType)
- }
-
- return c.doTokenResp(ctx, authParameters, qv)
-}
-
-func (c Client) doTokenResp(ctx context.Context, authParams authority.AuthParams, qv url.Values) (TokenResponse, error) {
- resp := TokenResponse{}
- if authParams.AuthnScheme != nil {
- trParams := authParams.AuthnScheme.TokenRequestParams()
- for k, v := range trParams {
- qv.Set(k, v)
- }
- }
- err := c.Comm.URLFormCall(ctx, authParams.Endpoints.TokenEndpoint, qv, &resp)
- if err != nil {
- return resp, err
- }
- resp.ComputeScope(authParams)
- if c.testing {
- return resp, nil
- }
- return resp, resp.Validate()
-}
-
-// prepURLVals returns an url.Values that sets various key/values if we are doing secrets
-// or JWT assertions.
-func prepURLVals(ctx context.Context, cc *Credential, authParams authority.AuthParams) (url.Values, error) {
- params := url.Values{}
- if cc.Secret != "" {
- params.Set("client_secret", cc.Secret)
- return params, nil
- }
-
- jwt, err := cc.JWT(ctx, authParams)
- if err != nil {
- return nil, err
- }
- params.Set("client_assertion", jwt)
- params.Set("client_assertion_type", grant.ClientAssertion)
- return params, nil
-}
-
-// openid required to get an id token
-// offline_access required to get a refresh token
-// profile required to get the client_info field back
-var detectDefaultScopes = map[string]bool{
- "openid": true,
- "offline_access": true,
- "profile": true,
-}
-
-var defaultScopes = []string{"openid", "offline_access", "profile"}
-
-func AppendDefaultScopes(authParameters authority.AuthParams) []string {
- scopes := make([]string, 0, len(authParameters.Scopes)+len(defaultScopes))
- for _, scope := range authParameters.Scopes {
- s := strings.TrimSpace(scope)
- if s == "" {
- continue
- }
- if detectDefaultScopes[scope] {
- continue
- }
- scopes = append(scopes, scope)
- }
- scopes = append(scopes, defaultScopes...)
- return scopes
-}
-
-// addClaims adds client capabilities and claims from AuthParams to the given url.Values
-func addClaims(v url.Values, ap authority.AuthParams) error {
- claims, err := ap.MergeCapabilitiesAndClaims()
- if err == nil && claims != "" {
- v.Set("claims", claims)
- }
- return err
-}
-
-func addScopeQueryParam(queryParams url.Values, authParameters authority.AuthParams) {
- scopes := AppendDefaultScopes(authParameters)
- queryParams.Set("scope", strings.Join(scopes, " "))
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go
deleted file mode 100644
index 3bec4a67..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/apptype_string.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Code generated by "stringer -type=AppType"; DO NOT EDIT.
-
-package accesstokens
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[ATUnknown-0]
- _ = x[ATPublic-1]
- _ = x[ATConfidential-2]
-}
-
-const _AppType_name = "ATUnknownATPublicATConfidential"
-
-var _AppType_index = [...]uint8{0, 9, 17, 31}
-
-func (i AppType) String() string {
- if i < 0 || i >= AppType(len(_AppType_index)-1) {
- return "AppType(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _AppType_name[_AppType_index[i]:_AppType_index[i+1]]
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go
deleted file mode 100644
index 3107b45c..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go
+++ /dev/null
@@ -1,339 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-package accesstokens
-
-import (
- "bytes"
- "encoding/base64"
- "encoding/json"
- "errors"
- "fmt"
- "reflect"
- "strings"
- "time"
-
- internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared"
-)
-
-// IDToken consists of all the information used to validate a user.
-// https://docs.microsoft.com/azure/active-directory/develop/id-tokens .
-type IDToken struct {
- PreferredUsername string `json:"preferred_username,omitempty"`
- GivenName string `json:"given_name,omitempty"`
- FamilyName string `json:"family_name,omitempty"`
- MiddleName string `json:"middle_name,omitempty"`
- Name string `json:"name,omitempty"`
- Oid string `json:"oid,omitempty"`
- TenantID string `json:"tid,omitempty"`
- Subject string `json:"sub,omitempty"`
- UPN string `json:"upn,omitempty"`
- Email string `json:"email,omitempty"`
- AlternativeID string `json:"alternative_id,omitempty"`
- Issuer string `json:"iss,omitempty"`
- Audience string `json:"aud,omitempty"`
- ExpirationTime int64 `json:"exp,omitempty"`
- IssuedAt int64 `json:"iat,omitempty"`
- NotBefore int64 `json:"nbf,omitempty"`
- RawToken string
-
- AdditionalFields map[string]interface{}
-}
-
-var null = []byte("null")
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (i *IDToken) UnmarshalJSON(b []byte) error {
- if bytes.Equal(null, b) {
- return nil
- }
-
- // Because we have a custom unmarshaler, you
- // cannot directly call json.Unmarshal here. If you do, it will call this function
- // recursively until reach our recursion limit. We have to create a new type
- // that doesn't have this method in order to use json.Unmarshal.
- type idToken2 IDToken
-
- jwt := strings.Trim(string(b), `"`)
- jwtArr := strings.Split(jwt, ".")
- if len(jwtArr) < 2 {
- return errors.New("IDToken returned from server is invalid")
- }
-
- jwtPart := jwtArr[1]
- jwtDecoded, err := decodeJWT(jwtPart)
- if err != nil {
- return fmt.Errorf("unable to unmarshal IDToken, problem decoding JWT: %w", err)
- }
-
- token := idToken2{}
- err = json.Unmarshal(jwtDecoded, &token)
- if err != nil {
- return fmt.Errorf("unable to unmarshal IDToken: %w", err)
- }
- token.RawToken = jwt
-
- *i = IDToken(token)
- return nil
-}
-
-// IsZero indicates if the IDToken is the zero value.
-func (i IDToken) IsZero() bool {
- v := reflect.ValueOf(i)
- for i := 0; i < v.NumField(); i++ {
- field := v.Field(i)
- if !field.IsZero() {
- switch field.Kind() {
- case reflect.Map, reflect.Slice:
- if field.Len() == 0 {
- continue
- }
- }
- return false
- }
- }
- return true
-}
-
-// LocalAccountID extracts an account's local account ID from an ID token.
-func (i IDToken) LocalAccountID() string {
- if i.Oid != "" {
- return i.Oid
- }
- return i.Subject
-}
-
-// jwtDecoder is provided to allow tests to provide their own.
-var jwtDecoder = decodeJWT
-
-// ClientInfo is used to create a Home Account ID for an account.
-type ClientInfo struct {
- UID string `json:"uid"`
- UTID string `json:"utid"`
-
- AdditionalFields map[string]interface{}
-}
-
-// UnmarshalJSON implements json.Unmarshaler.s
-func (c *ClientInfo) UnmarshalJSON(b []byte) error {
- s := strings.Trim(string(b), `"`)
- // Client info may be empty in some flows, e.g. certificate exchange.
- if len(s) == 0 {
- return nil
- }
-
- // Because we have a custom unmarshaler, you
- // cannot directly call json.Unmarshal here. If you do, it will call this function
- // recursively until reach our recursion limit. We have to create a new type
- // that doesn't have this method in order to use json.Unmarshal.
- type clientInfo2 ClientInfo
-
- raw, err := jwtDecoder(s)
- if err != nil {
- return fmt.Errorf("TokenResponse client_info field had JWT decode error: %w", err)
- }
-
- var c2 clientInfo2
-
- err = json.Unmarshal(raw, &c2)
- if err != nil {
- return fmt.Errorf("was unable to unmarshal decoded JWT in TokenRespone to ClientInfo: %w", err)
- }
-
- *c = ClientInfo(c2)
- return nil
-}
-
-// Scopes represents scopes in a TokenResponse.
-type Scopes struct {
- Slice []string
-}
-
-// UnmarshalJSON implements json.Unmarshal.
-func (s *Scopes) UnmarshalJSON(b []byte) error {
- str := strings.Trim(string(b), `"`)
- if len(str) == 0 {
- return nil
- }
- sl := strings.Split(str, " ")
- s.Slice = sl
- return nil
-}
-
-// TokenResponse is the information that is returned from a token endpoint during a token acquisition flow.
-type TokenResponse struct {
- authority.OAuthResponseBase
-
- AccessToken string `json:"access_token"`
- RefreshToken string `json:"refresh_token"`
- TokenType string `json:"token_type"`
-
- FamilyID string `json:"foci"`
- IDToken IDToken `json:"id_token"`
- ClientInfo ClientInfo `json:"client_info"`
- ExpiresOn internalTime.DurationTime `json:"expires_in"`
- ExtExpiresOn internalTime.DurationTime `json:"ext_expires_in"`
- GrantedScopes Scopes `json:"scope"`
- DeclinedScopes []string // This is derived
-
- AdditionalFields map[string]interface{}
-
- scopesComputed bool
-}
-
-// ComputeScope computes the final scopes based on what was granted by the server and
-// what our AuthParams were from the authority server. Per OAuth spec, if no scopes are returned, the response should be treated as if all scopes were granted
-// This behavior can be observed in client assertion flows, but can happen at any time, this check ensures we treat
-// those special responses properly Link to spec: https://tools.ietf.org/html/rfc6749#section-3.3
-func (tr *TokenResponse) ComputeScope(authParams authority.AuthParams) {
- if len(tr.GrantedScopes.Slice) == 0 {
- tr.GrantedScopes = Scopes{Slice: authParams.Scopes}
- } else {
- tr.DeclinedScopes = findDeclinedScopes(authParams.Scopes, tr.GrantedScopes.Slice)
- }
- tr.scopesComputed = true
-}
-
-// HomeAccountID uniquely identifies the authenticated account, if any. It's "" when the token is an app token.
-func (tr *TokenResponse) HomeAccountID() string {
- id := tr.IDToken.Subject
- if uid := tr.ClientInfo.UID; uid != "" {
- utid := tr.ClientInfo.UTID
- if utid == "" {
- utid = uid
- }
- id = fmt.Sprintf("%s.%s", uid, utid)
- }
- return id
-}
-
-// Validate validates the TokenResponse has basic valid values. It must be called
-// after ComputeScopes() is called.
-func (tr *TokenResponse) Validate() error {
- if tr.Error != "" {
- return fmt.Errorf("%s: %s", tr.Error, tr.ErrorDescription)
- }
-
- if tr.AccessToken == "" {
- return errors.New("response is missing access_token")
- }
-
- if !tr.scopesComputed {
- return fmt.Errorf("TokenResponse hasn't had ScopesComputed() called")
- }
- return nil
-}
-
-func (tr *TokenResponse) CacheKey(authParams authority.AuthParams) string {
- if authParams.AuthorizationType == authority.ATOnBehalfOf {
- return authParams.AssertionHash()
- }
- if authParams.AuthorizationType == authority.ATClientCredentials {
- return authParams.AppKey()
- }
- if authParams.IsConfidentialClient || authParams.AuthorizationType == authority.ATRefreshToken {
- return tr.HomeAccountID()
- }
- return ""
-}
-
-func findDeclinedScopes(requestedScopes []string, grantedScopes []string) []string {
- declined := []string{}
- grantedMap := map[string]bool{}
- for _, s := range grantedScopes {
- grantedMap[strings.ToLower(s)] = true
- }
- // Comparing the requested scopes with the granted scopes to see if there are any scopes that have been declined.
- for _, r := range requestedScopes {
- if !grantedMap[strings.ToLower(r)] {
- declined = append(declined, r)
- }
- }
- return declined
-}
-
-// decodeJWT decodes a JWT and converts it to a byte array representing a JSON object
-// JWT has headers and payload base64url encoded without padding
-// https://tools.ietf.org/html/rfc7519#section-3 and
-// https://tools.ietf.org/html/rfc7515#section-2
-func decodeJWT(data string) ([]byte, error) {
- // https://tools.ietf.org/html/rfc7515#appendix-C
- return base64.RawURLEncoding.DecodeString(data)
-}
-
-// RefreshToken is the JSON representation of a MSAL refresh token for encoding to storage.
-type RefreshToken struct {
- HomeAccountID string `json:"home_account_id,omitempty"`
- Environment string `json:"environment,omitempty"`
- CredentialType string `json:"credential_type,omitempty"`
- ClientID string `json:"client_id,omitempty"`
- FamilyID string `json:"family_id,omitempty"`
- Secret string `json:"secret,omitempty"`
- Realm string `json:"realm,omitempty"`
- Target string `json:"target,omitempty"`
- UserAssertionHash string `json:"user_assertion_hash,omitempty"`
-
- AdditionalFields map[string]interface{}
-}
-
-// NewRefreshToken is the constructor for RefreshToken.
-func NewRefreshToken(homeID, env, clientID, refreshToken, familyID string) RefreshToken {
- return RefreshToken{
- HomeAccountID: homeID,
- Environment: env,
- CredentialType: "RefreshToken",
- ClientID: clientID,
- FamilyID: familyID,
- Secret: refreshToken,
- }
-}
-
-// Key outputs the key that can be used to uniquely look up this entry in a map.
-func (rt RefreshToken) Key() string {
- var fourth = rt.FamilyID
- if fourth == "" {
- fourth = rt.ClientID
- }
-
- key := strings.Join(
- []string{rt.HomeAccountID, rt.Environment, rt.CredentialType, fourth},
- shared.CacheKeySeparator,
- )
- return strings.ToLower(key)
-}
-
-func (rt RefreshToken) GetSecret() string {
- return rt.Secret
-}
-
-// DeviceCodeResult stores the response from the STS device code endpoint.
-type DeviceCodeResult struct {
- // UserCode is the code the user needs to provide when authentication at the verification URI.
- UserCode string
- // DeviceCode is the code used in the access token request.
- DeviceCode string
- // VerificationURL is the the URL where user can authenticate.
- VerificationURL string
- // ExpiresOn is the expiration time of device code in seconds.
- ExpiresOn time.Time
- // Interval is the interval at which the STS should be polled at.
- Interval int
- // Message is the message which should be displayed to the user.
- Message string
- // ClientID is the UUID issued by the authorization server for your application.
- ClientID string
- // Scopes is the OpenID scopes used to request access a protected API.
- Scopes []string
-}
-
-// NewDeviceCodeResult creates a DeviceCodeResult instance.
-func NewDeviceCodeResult(userCode, deviceCode, verificationURL string, expiresOn time.Time, interval int, message, clientID string, scopes []string) DeviceCodeResult {
- return DeviceCodeResult{userCode, deviceCode, verificationURL, expiresOn, interval, message, clientID, scopes}
-}
-
-func (dcr DeviceCodeResult) String() string {
- return fmt.Sprintf("UserCode: (%v)\nDeviceCode: (%v)\nURL: (%v)\nMessage: (%v)\n", dcr.UserCode, dcr.DeviceCode, dcr.VerificationURL, dcr.Message)
-
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go
deleted file mode 100644
index 9d60734f..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go
+++ /dev/null
@@ -1,589 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-package authority
-
-import (
- "context"
- "crypto/sha256"
- "encoding/base64"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "os"
- "path"
- "strings"
- "time"
-
- "github.com/google/uuid"
-)
-
-const (
- authorizationEndpoint = "https://%v/%v/oauth2/v2.0/authorize"
- instanceDiscoveryEndpoint = "https://%v/common/discovery/instance"
- tenantDiscoveryEndpointWithRegion = "https://%s.%s/%s/v2.0/.well-known/openid-configuration"
- regionName = "REGION_NAME"
- defaultAPIVersion = "2021-10-01"
- imdsEndpoint = "http://169.254.169.254/metadata/instance/compute/location?format=text&api-version=" + defaultAPIVersion
- autoDetectRegion = "TryAutoDetect"
- AccessTokenTypeBearer = "Bearer"
-)
-
-// These are various hosts that host AAD Instance discovery endpoints.
-const (
- defaultHost = "login.microsoftonline.com"
- loginMicrosoft = "login.microsoft.com"
- loginWindows = "login.windows.net"
- loginSTSWindows = "sts.windows.net"
- loginMicrosoftOnline = defaultHost
-)
-
-// jsonCaller is an interface that allows us to mock the JSONCall method.
-type jsonCaller interface {
- JSONCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, body, resp interface{}) error
-}
-
-var aadTrustedHostList = map[string]bool{
- "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list
- "login.chinacloudapi.cn": true, // Microsoft Azure China
- "login.microsoftonline.de": true, // Microsoft Azure Blackforest
- "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy
- "login.microsoftonline.us": true, // Microsoft Azure US Government
- "login.microsoftonline.com": true, // Microsoft Azure Worldwide
- "login.cloudgovapi.us": true, // Microsoft Azure US Government
-}
-
-// TrustedHost checks if an AAD host is trusted/valid.
-func TrustedHost(host string) bool {
- if _, ok := aadTrustedHostList[host]; ok {
- return true
- }
- return false
-}
-
-// OAuthResponseBase is the base JSON return message for an OAuth call.
-// This is embedded in other calls to get the base fields from every response.
-type OAuthResponseBase struct {
- Error string `json:"error"`
- SubError string `json:"suberror"`
- ErrorDescription string `json:"error_description"`
- ErrorCodes []int `json:"error_codes"`
- CorrelationID string `json:"correlation_id"`
- Claims string `json:"claims"`
-}
-
-// TenantDiscoveryResponse is the tenant endpoints from the OpenID configuration endpoint.
-type TenantDiscoveryResponse struct {
- OAuthResponseBase
-
- AuthorizationEndpoint string `json:"authorization_endpoint"`
- TokenEndpoint string `json:"token_endpoint"`
- Issuer string `json:"issuer"`
-
- AdditionalFields map[string]interface{}
-}
-
-// Validate validates that the response had the correct values required.
-func (r *TenantDiscoveryResponse) Validate() error {
- switch "" {
- case r.AuthorizationEndpoint:
- return errors.New("TenantDiscoveryResponse: authorize endpoint was not found in the openid configuration")
- case r.TokenEndpoint:
- return errors.New("TenantDiscoveryResponse: token endpoint was not found in the openid configuration")
- case r.Issuer:
- return errors.New("TenantDiscoveryResponse: issuer was not found in the openid configuration")
- }
- return nil
-}
-
-type InstanceDiscoveryMetadata struct {
- PreferredNetwork string `json:"preferred_network"`
- PreferredCache string `json:"preferred_cache"`
- Aliases []string `json:"aliases"`
-
- AdditionalFields map[string]interface{}
-}
-
-type InstanceDiscoveryResponse struct {
- TenantDiscoveryEndpoint string `json:"tenant_discovery_endpoint"`
- Metadata []InstanceDiscoveryMetadata `json:"metadata"`
-
- AdditionalFields map[string]interface{}
-}
-
-//go:generate stringer -type=AuthorizeType
-
-// AuthorizeType represents the type of token flow.
-type AuthorizeType int
-
-// These are all the types of token flows.
-const (
- ATUnknown AuthorizeType = iota
- ATUsernamePassword
- ATWindowsIntegrated
- ATAuthCode
- ATInteractive
- ATClientCredentials
- ATDeviceCode
- ATRefreshToken
- AccountByID
- ATOnBehalfOf
-)
-
-// These are all authority types
-const (
- AAD = "MSSTS"
- ADFS = "ADFS"
-)
-
-// AuthenticationScheme is an extensibility mechanism designed to be used only by Azure Arc for proof of possession access tokens.
-type AuthenticationScheme interface {
- // Extra parameters that are added to the request to the /token endpoint.
- TokenRequestParams() map[string]string
- // Key ID of the public / private key pair used by the encryption algorithm, if any.
- // Tokens obtained by authentication schemes that use this are bound to the KeyId, i.e.
- // if a different kid is presented, the access token cannot be used.
- KeyID() string
- // Creates the access token that goes into an Authorization HTTP header.
- FormatAccessToken(accessToken string) (string, error)
- //Expected to match the token_type parameter returned by ESTS. Used to disambiguate
- // between ATs of different types (e.g. Bearer and PoP) when loading from cache etc.
- AccessTokenType() string
-}
-
-// default authn scheme realizing AuthenticationScheme for "Bearer" tokens
-type BearerAuthenticationScheme struct{}
-
-var bearerAuthnScheme BearerAuthenticationScheme
-
-func (ba *BearerAuthenticationScheme) TokenRequestParams() map[string]string {
- return nil
-}
-func (ba *BearerAuthenticationScheme) KeyID() string {
- return ""
-}
-func (ba *BearerAuthenticationScheme) FormatAccessToken(accessToken string) (string, error) {
- return accessToken, nil
-}
-func (ba *BearerAuthenticationScheme) AccessTokenType() string {
- return AccessTokenTypeBearer
-}
-
-// AuthParams represents the parameters used for authorization for token acquisition.
-type AuthParams struct {
- AuthorityInfo Info
- CorrelationID string
- Endpoints Endpoints
- ClientID string
- // Redirecturi is used for auth flows that specify a redirect URI (e.g. local server for interactive auth flow).
- Redirecturi string
- HomeAccountID string
- // Username is the user-name portion for username/password auth flow.
- Username string
- // Password is the password portion for username/password auth flow.
- Password string
- // Scopes is the list of scopes the user consents to.
- Scopes []string
- // AuthorizationType specifies the auth flow being used.
- AuthorizationType AuthorizeType
- // State is a random value used to prevent cross-site request forgery attacks.
- State string
- // CodeChallenge is derived from a code verifier and is sent in the auth request.
- CodeChallenge string
- // CodeChallengeMethod describes the method used to create the CodeChallenge.
- CodeChallengeMethod string
- // Prompt specifies the user prompt type during interactive auth.
- Prompt string
- // IsConfidentialClient specifies if it is a confidential client.
- IsConfidentialClient bool
- // SendX5C specifies if x5c claim(public key of the certificate) should be sent to STS.
- SendX5C bool
- // UserAssertion is the access token used to acquire token on behalf of user
- UserAssertion string
- // Capabilities the client will include with each token request, for example "CP1".
- // Call [NewClientCapabilities] to construct a value for this field.
- Capabilities ClientCapabilities
- // Claims required for an access token to satisfy a conditional access policy
- Claims string
- // KnownAuthorityHosts don't require metadata discovery because they're known to the user
- KnownAuthorityHosts []string
- // LoginHint is a username with which to pre-populate account selection during interactive auth
- LoginHint string
- // DomainHint is a directive that can be used to accelerate the user to their federated IdP sign-in page
- DomainHint string
- // AuthnScheme is an optional scheme for formatting access tokens
- AuthnScheme AuthenticationScheme
-}
-
-// NewAuthParams creates an authorization parameters object.
-func NewAuthParams(clientID string, authorityInfo Info) AuthParams {
- return AuthParams{
- ClientID: clientID,
- AuthorityInfo: authorityInfo,
- CorrelationID: uuid.New().String(),
- AuthnScheme: &bearerAuthnScheme,
- }
-}
-
-// WithTenant returns a copy of the AuthParams having the specified tenant ID. If the given
-// ID is empty, the copy is identical to the original. This function returns an error in
-// several cases:
-// - ID isn't specific (for example, it's "common")
-// - ID is non-empty and the authority doesn't support tenants (for example, it's an ADFS authority)
-// - the client is configured to authenticate only Microsoft accounts via the "consumers" endpoint
-// - the resulting authority URL is invalid
-func (p AuthParams) WithTenant(ID string) (AuthParams, error) {
- switch ID {
- case "", p.AuthorityInfo.Tenant:
- // keep the default tenant because the caller didn't override it
- return p, nil
- case "common", "consumers", "organizations":
- if p.AuthorityInfo.AuthorityType == AAD {
- return p, fmt.Errorf(`tenant ID must be a specific tenant, not "%s"`, ID)
- }
- // else we'll return a better error below
- }
- if p.AuthorityInfo.AuthorityType != AAD {
- return p, errors.New("the authority doesn't support tenants")
- }
- if p.AuthorityInfo.Tenant == "consumers" {
- return p, errors.New(`client is configured to authenticate only personal Microsoft accounts, via the "consumers" endpoint`)
- }
- authority := "https://" + path.Join(p.AuthorityInfo.Host, ID)
- info, err := NewInfoFromAuthorityURI(authority, p.AuthorityInfo.ValidateAuthority, p.AuthorityInfo.InstanceDiscoveryDisabled)
- if err == nil {
- info.Region = p.AuthorityInfo.Region
- p.AuthorityInfo = info
- }
- return p, err
-}
-
-// MergeCapabilitiesAndClaims combines client capabilities and challenge claims into a value suitable for an authentication request's "claims" parameter.
-func (p AuthParams) MergeCapabilitiesAndClaims() (string, error) {
- claims := p.Claims
- if len(p.Capabilities.asMap) > 0 {
- if claims == "" {
- // without claims the result is simply the capabilities
- return p.Capabilities.asJSON, nil
- }
- // Otherwise, merge claims and capabilties into a single JSON object.
- // We handle the claims challenge as a map because we don't know its structure.
- var challenge map[string]any
- if err := json.Unmarshal([]byte(claims), &challenge); err != nil {
- return "", fmt.Errorf(`claims must be JSON. Are they base64 encoded? json.Unmarshal returned "%v"`, err)
- }
- if err := merge(p.Capabilities.asMap, challenge); err != nil {
- return "", err
- }
- b, err := json.Marshal(challenge)
- if err != nil {
- return "", err
- }
- claims = string(b)
- }
- return claims, nil
-}
-
-// merges a into b without overwriting b's values. Returns an error when a and b share a key for which either has a non-object value.
-func merge(a, b map[string]any) error {
- for k, av := range a {
- if bv, ok := b[k]; !ok {
- // b doesn't contain this key => simply set it to a's value
- b[k] = av
- } else {
- // b does contain this key => recursively merge a[k] into b[k], provided both are maps. If a[k] or b[k] isn't
- // a map, return an error because merging would overwrite some value in b. Errors shouldn't occur in practice
- // because the challenge will be from AAD, which knows the capabilities format.
- if A, ok := av.(map[string]any); ok {
- if B, ok := bv.(map[string]any); ok {
- return merge(A, B)
- } else {
- // b[k] isn't a map
- return errors.New("challenge claims conflict with client capabilities")
- }
- } else {
- // a[k] isn't a map
- return errors.New("challenge claims conflict with client capabilities")
- }
- }
- }
- return nil
-}
-
-// ClientCapabilities stores capabilities in the formats used by AuthParams.MergeCapabilitiesAndClaims.
-// [NewClientCapabilities] precomputes these representations because capabilities are static for the
-// lifetime of a client and are included with every authentication request i.e., these computations
-// always have the same result and would otherwise have to be repeated for every request.
-type ClientCapabilities struct {
- // asJSON is for the common case: adding the capabilities to an auth request with no challenge claims
- asJSON string
- // asMap is for merging the capabilities with challenge claims
- asMap map[string]any
-}
-
-func NewClientCapabilities(capabilities []string) (ClientCapabilities, error) {
- c := ClientCapabilities{}
- var err error
- if len(capabilities) > 0 {
- cpbs := make([]string, len(capabilities))
- for i := 0; i < len(cpbs); i++ {
- cpbs[i] = fmt.Sprintf(`"%s"`, capabilities[i])
- }
- c.asJSON = fmt.Sprintf(`{"access_token":{"xms_cc":{"values":[%s]}}}`, strings.Join(cpbs, ","))
- // note our JSON is valid but we can't stop users breaking it with garbage like "}"
- err = json.Unmarshal([]byte(c.asJSON), &c.asMap)
- }
- return c, err
-}
-
-// Info consists of information about the authority.
-type Info struct {
- Host string
- CanonicalAuthorityURI string
- AuthorityType string
- UserRealmURIPrefix string
- ValidateAuthority bool
- Tenant string
- Region string
- InstanceDiscoveryDisabled bool
-}
-
-func firstPathSegment(u *url.URL) (string, error) {
- pathParts := strings.Split(u.EscapedPath(), "/")
- if len(pathParts) >= 2 {
- return pathParts[1], nil
- }
-
- return "", errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`)
-}
-
-// NewInfoFromAuthorityURI creates an AuthorityInfo instance from the authority URL provided.
-func NewInfoFromAuthorityURI(authority string, validateAuthority bool, instanceDiscoveryDisabled bool) (Info, error) {
- u, err := url.Parse(strings.ToLower(authority))
- if err != nil || u.Scheme != "https" {
- return Info{}, errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`)
- }
-
- tenant, err := firstPathSegment(u)
- if err != nil {
- return Info{}, err
- }
- authorityType := AAD
- if tenant == "adfs" {
- authorityType = ADFS
- }
-
- // u.Host includes the port, if any, which is required for private cloud deployments
- return Info{
- Host: u.Host,
- CanonicalAuthorityURI: fmt.Sprintf("https://%v/%v/", u.Host, tenant),
- AuthorityType: authorityType,
- UserRealmURIPrefix: fmt.Sprintf("https://%v/common/userrealm/", u.Hostname()),
- ValidateAuthority: validateAuthority,
- Tenant: tenant,
- InstanceDiscoveryDisabled: instanceDiscoveryDisabled,
- }, nil
-}
-
-// Endpoints consists of the endpoints from the tenant discovery response.
-type Endpoints struct {
- AuthorizationEndpoint string
- TokenEndpoint string
- selfSignedJwtAudience string
- authorityHost string
-}
-
-// NewEndpoints creates an Endpoints object.
-func NewEndpoints(authorizationEndpoint string, tokenEndpoint string, selfSignedJwtAudience string, authorityHost string) Endpoints {
- return Endpoints{authorizationEndpoint, tokenEndpoint, selfSignedJwtAudience, authorityHost}
-}
-
-// UserRealmAccountType refers to the type of user realm.
-type UserRealmAccountType string
-
-// These are the different types of user realms.
-const (
- Unknown UserRealmAccountType = ""
- Federated UserRealmAccountType = "Federated"
- Managed UserRealmAccountType = "Managed"
-)
-
-// UserRealm is used for the username password request to determine user type
-type UserRealm struct {
- AccountType UserRealmAccountType `json:"account_type"`
- DomainName string `json:"domain_name"`
- CloudInstanceName string `json:"cloud_instance_name"`
- CloudAudienceURN string `json:"cloud_audience_urn"`
-
- // required if accountType is Federated
- FederationProtocol string `json:"federation_protocol"`
- FederationMetadataURL string `json:"federation_metadata_url"`
-
- AdditionalFields map[string]interface{}
-}
-
-func (u UserRealm) validate() error {
- switch "" {
- case string(u.AccountType):
- return errors.New("the account type (Federated or Managed) is missing")
- case u.DomainName:
- return errors.New("domain name of user realm is missing")
- case u.CloudInstanceName:
- return errors.New("cloud instance name of user realm is missing")
- case u.CloudAudienceURN:
- return errors.New("cloud Instance URN is missing")
- }
-
- if u.AccountType == Federated {
- switch "" {
- case u.FederationProtocol:
- return errors.New("federation protocol of user realm is missing")
- case u.FederationMetadataURL:
- return errors.New("federation metadata URL of user realm is missing")
- }
- }
- return nil
-}
-
-// Client represents the REST calls to authority backends.
-type Client struct {
- // Comm provides the HTTP transport client.
- Comm jsonCaller // *comm.Client
-}
-
-func (c Client) UserRealm(ctx context.Context, authParams AuthParams) (UserRealm, error) {
- endpoint := fmt.Sprintf("https://%s/common/UserRealm/%s", authParams.Endpoints.authorityHost, url.PathEscape(authParams.Username))
- qv := url.Values{
- "api-version": []string{"1.0"},
- }
-
- resp := UserRealm{}
- err := c.Comm.JSONCall(
- ctx,
- endpoint,
- http.Header{"client-request-id": []string{authParams.CorrelationID}},
- qv,
- nil,
- &resp,
- )
- if err != nil {
- return resp, err
- }
-
- return resp, resp.validate()
-}
-
-func (c Client) GetTenantDiscoveryResponse(ctx context.Context, openIDConfigurationEndpoint string) (TenantDiscoveryResponse, error) {
- resp := TenantDiscoveryResponse{}
- err := c.Comm.JSONCall(
- ctx,
- openIDConfigurationEndpoint,
- http.Header{},
- nil,
- nil,
- &resp,
- )
-
- return resp, err
-}
-
-// AADInstanceDiscovery attempts to discover a tenant endpoint (used in OIDC auth with an authorization endpoint).
-// This is done by AAD which allows for aliasing of tenants (windows.sts.net is the same as login.windows.com).
-func (c Client) AADInstanceDiscovery(ctx context.Context, authorityInfo Info) (InstanceDiscoveryResponse, error) {
- region := ""
- var err error
- resp := InstanceDiscoveryResponse{}
- if authorityInfo.Region != "" && authorityInfo.Region != autoDetectRegion {
- region = authorityInfo.Region
- } else if authorityInfo.Region == autoDetectRegion {
- region = detectRegion(ctx)
- }
- if region != "" {
- environment := authorityInfo.Host
- switch environment {
- case loginMicrosoft, loginWindows, loginSTSWindows, defaultHost:
- environment = loginMicrosoft
- }
-
- resp.TenantDiscoveryEndpoint = fmt.Sprintf(tenantDiscoveryEndpointWithRegion, region, environment, authorityInfo.Tenant)
- metadata := InstanceDiscoveryMetadata{
- PreferredNetwork: fmt.Sprintf("%v.%v", region, authorityInfo.Host),
- PreferredCache: authorityInfo.Host,
- Aliases: []string{fmt.Sprintf("%v.%v", region, authorityInfo.Host), authorityInfo.Host},
- }
- resp.Metadata = []InstanceDiscoveryMetadata{metadata}
- } else {
- qv := url.Values{}
- qv.Set("api-version", "1.1")
- qv.Set("authorization_endpoint", fmt.Sprintf(authorizationEndpoint, authorityInfo.Host, authorityInfo.Tenant))
-
- discoveryHost := defaultHost
- if TrustedHost(authorityInfo.Host) {
- discoveryHost = authorityInfo.Host
- }
-
- endpoint := fmt.Sprintf(instanceDiscoveryEndpoint, discoveryHost)
- err = c.Comm.JSONCall(ctx, endpoint, http.Header{}, qv, nil, &resp)
- }
- return resp, err
-}
-
-func detectRegion(ctx context.Context) string {
- region := os.Getenv(regionName)
- if region != "" {
- region = strings.ReplaceAll(region, " ", "")
- return strings.ToLower(region)
- }
- // HTTP call to IMDS endpoint to get region
- // Refer : https://identitydivision.visualstudio.com/DevEx/_git/AuthLibrariesApiReview?path=%2FPinAuthToRegion%2FAAD%20SDK%20Proposal%20to%20Pin%20Auth%20to%20region.md&_a=preview&version=GBdev
- // Set a 2 second timeout for this http client which only does calls to IMDS endpoint
- client := http.Client{
- Timeout: time.Duration(2 * time.Second),
- }
- req, _ := http.NewRequest("GET", imdsEndpoint, nil)
- req.Header.Set("Metadata", "true")
- resp, err := client.Do(req)
- // If the request times out or there is an error, it is retried once
- if err != nil || resp.StatusCode != 200 {
- resp, err = client.Do(req)
- if err != nil || resp.StatusCode != 200 {
- return ""
- }
- }
- defer resp.Body.Close()
- response, err := io.ReadAll(resp.Body)
- if err != nil {
- return ""
- }
- return string(response)
-}
-
-func (a *AuthParams) CacheKey(isAppCache bool) string {
- if a.AuthorizationType == ATOnBehalfOf {
- return a.AssertionHash()
- }
- if a.AuthorizationType == ATClientCredentials || isAppCache {
- return a.AppKey()
- }
- if a.AuthorizationType == ATRefreshToken || a.AuthorizationType == AccountByID {
- return a.HomeAccountID
- }
- return ""
-}
-func (a *AuthParams) AssertionHash() string {
- hasher := sha256.New()
- // Per documentation this never returns an error : https://pkg.go.dev/hash#pkg-types
- _, _ = hasher.Write([]byte(a.UserAssertion))
- sha := base64.URLEncoding.EncodeToString(hasher.Sum(nil))
- return sha
-}
-
-func (a *AuthParams) AppKey() string {
- if a.AuthorityInfo.Tenant != "" {
- return fmt.Sprintf("%s_%s_AppTokenCache", a.ClientID, a.AuthorityInfo.Tenant)
- }
- return fmt.Sprintf("%s__AppTokenCache", a.ClientID)
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go
deleted file mode 100644
index 10039773..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authorizetype_string.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Code generated by "stringer -type=AuthorizeType"; DO NOT EDIT.
-
-package authority
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[ATUnknown-0]
- _ = x[ATUsernamePassword-1]
- _ = x[ATWindowsIntegrated-2]
- _ = x[ATAuthCode-3]
- _ = x[ATInteractive-4]
- _ = x[ATClientCredentials-5]
- _ = x[ATDeviceCode-6]
- _ = x[ATRefreshToken-7]
-}
-
-const _AuthorizeType_name = "ATUnknownATUsernamePasswordATWindowsIntegratedATAuthCodeATInteractiveATClientCredentialsATDeviceCodeATRefreshToken"
-
-var _AuthorizeType_index = [...]uint8{0, 9, 27, 46, 56, 69, 88, 100, 114}
-
-func (i AuthorizeType) String() string {
- if i < 0 || i >= AuthorizeType(len(_AuthorizeType_index)-1) {
- return "AuthorizeType(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _AuthorizeType_name[_AuthorizeType_index[i]:_AuthorizeType_index[i+1]]
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go
deleted file mode 100644
index 7d9ec7cd..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go
+++ /dev/null
@@ -1,320 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-// Package comm provides helpers for communicating with HTTP backends.
-package comm
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "encoding/xml"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "reflect"
- "runtime"
- "strings"
- "time"
-
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
- customJSON "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version"
- "github.com/google/uuid"
-)
-
-// HTTPClient represents an HTTP client.
-// It's usually an *http.Client from the standard library.
-type HTTPClient interface {
- // Do sends an HTTP request and returns an HTTP response.
- Do(req *http.Request) (*http.Response, error)
-
- // CloseIdleConnections closes any idle connections in a "keep-alive" state.
- CloseIdleConnections()
-}
-
-// Client provides a wrapper to our *http.Client that handles compression and serialization needs.
-type Client struct {
- client HTTPClient
-}
-
-// New returns a new Client object.
-func New(httpClient HTTPClient) *Client {
- if httpClient == nil {
- panic("http.Client cannot == nil")
- }
-
- return &Client{client: httpClient}
-}
-
-// JSONCall connects to the REST endpoint passing the HTTP query values, headers and JSON conversion
-// of body in the HTTP body. It automatically handles compression and decompression with gzip. The response is JSON
-// unmarshalled into resp. resp must be a pointer to a struct. If the body struct contains a field called
-// "AdditionalFields" we use a custom marshal/unmarshal engine.
-func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, body, resp interface{}) error {
- if qv == nil {
- qv = url.Values{}
- }
-
- v := reflect.ValueOf(resp)
- if err := c.checkResp(v); err != nil {
- return err
- }
-
- // Choose a JSON marshal/unmarshal depending on if we have AdditionalFields attribute.
- var marshal = json.Marshal
- var unmarshal = json.Unmarshal
- if _, ok := v.Elem().Type().FieldByName("AdditionalFields"); ok {
- marshal = customJSON.Marshal
- unmarshal = customJSON.Unmarshal
- }
-
- u, err := url.Parse(endpoint)
- if err != nil {
- return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err)
- }
- u.RawQuery = qv.Encode()
-
- addStdHeaders(headers)
-
- req := &http.Request{Method: http.MethodGet, URL: u, Header: headers}
-
- if body != nil {
- // Note: In case your wondering why we are not gzip encoding....
- // I'm not sure if these various services support gzip on send.
- headers.Add("Content-Type", "application/json; charset=utf-8")
- data, err := marshal(body)
- if err != nil {
- return fmt.Errorf("bug: conn.Call(): could not marshal the body object: %w", err)
- }
- req.Body = io.NopCloser(bytes.NewBuffer(data))
- req.Method = http.MethodPost
- }
-
- data, err := c.do(ctx, req)
- if err != nil {
- return err
- }
-
- if resp != nil {
- if err := unmarshal(data, resp); err != nil {
- return fmt.Errorf("json decode error: %w\njson message bytes were: %s", err, string(data))
- }
- }
- return nil
-}
-
-// XMLCall connects to an endpoint and decodes the XML response into resp. This is used when
-// sending application/xml . If sending XML via SOAP, use SOAPCall().
-func (c *Client) XMLCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, resp interface{}) error {
- if err := c.checkResp(reflect.ValueOf(resp)); err != nil {
- return err
- }
-
- if qv == nil {
- qv = url.Values{}
- }
-
- u, err := url.Parse(endpoint)
- if err != nil {
- return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err)
- }
- u.RawQuery = qv.Encode()
-
- headers.Set("Content-Type", "application/xml; charset=utf-8") // This was not set in he original Mex(), but...
- addStdHeaders(headers)
-
- return c.xmlCall(ctx, u, headers, "", resp)
-}
-
-// SOAPCall returns the SOAP message given an endpoint, action, body of the request and the response object to marshal into.
-func (c *Client) SOAPCall(ctx context.Context, endpoint, action string, headers http.Header, qv url.Values, body string, resp interface{}) error {
- if body == "" {
- return fmt.Errorf("cannot make a SOAP call with body set to empty string")
- }
-
- if err := c.checkResp(reflect.ValueOf(resp)); err != nil {
- return err
- }
-
- if qv == nil {
- qv = url.Values{}
- }
-
- u, err := url.Parse(endpoint)
- if err != nil {
- return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err)
- }
- u.RawQuery = qv.Encode()
-
- headers.Set("Content-Type", "application/soap+xml; charset=utf-8")
- headers.Set("SOAPAction", action)
- addStdHeaders(headers)
-
- return c.xmlCall(ctx, u, headers, body, resp)
-}
-
-// xmlCall sends an XML in body and decodes into resp. This simply does the transport and relies on
-// an upper level call to set things such as SOAP parameters and Content-Type, if required.
-func (c *Client) xmlCall(ctx context.Context, u *url.URL, headers http.Header, body string, resp interface{}) error {
- req := &http.Request{Method: http.MethodGet, URL: u, Header: headers}
-
- if len(body) > 0 {
- req.Method = http.MethodPost
- req.Body = io.NopCloser(strings.NewReader(body))
- }
-
- data, err := c.do(ctx, req)
- if err != nil {
- return err
- }
-
- return xml.Unmarshal(data, resp)
-}
-
-// URLFormCall is used to make a call where we need to send application/x-www-form-urlencoded data
-// to the backend and receive JSON back. qv will be encoded into the request body.
-func (c *Client) URLFormCall(ctx context.Context, endpoint string, qv url.Values, resp interface{}) error {
- if len(qv) == 0 {
- return fmt.Errorf("URLFormCall() requires qv to have non-zero length")
- }
-
- if err := c.checkResp(reflect.ValueOf(resp)); err != nil {
- return err
- }
-
- u, err := url.Parse(endpoint)
- if err != nil {
- return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err)
- }
-
- headers := http.Header{}
- headers.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
- addStdHeaders(headers)
-
- enc := qv.Encode()
-
- req := &http.Request{
- Method: http.MethodPost,
- URL: u,
- Header: headers,
- ContentLength: int64(len(enc)),
- Body: io.NopCloser(strings.NewReader(enc)),
- GetBody: func() (io.ReadCloser, error) {
- return io.NopCloser(strings.NewReader(enc)), nil
- },
- }
-
- data, err := c.do(ctx, req)
- if err != nil {
- return err
- }
-
- v := reflect.ValueOf(resp)
- if err := c.checkResp(v); err != nil {
- return err
- }
-
- var unmarshal = json.Unmarshal
- if _, ok := v.Elem().Type().FieldByName("AdditionalFields"); ok {
- unmarshal = customJSON.Unmarshal
- }
- if resp != nil {
- if err := unmarshal(data, resp); err != nil {
- return fmt.Errorf("json decode error: %w\nraw message was: %s", err, string(data))
- }
- }
- return nil
-}
-
-// do makes the HTTP call to the server and returns the contents of the body.
-func (c *Client) do(ctx context.Context, req *http.Request) ([]byte, error) {
- if _, ok := ctx.Deadline(); !ok {
- var cancel context.CancelFunc
- ctx, cancel = context.WithTimeout(ctx, 30*time.Second)
- defer cancel()
- }
- req = req.WithContext(ctx)
-
- reply, err := c.client.Do(req)
- if err != nil {
- return nil, fmt.Errorf("server response error:\n %w", err)
- }
- defer reply.Body.Close()
-
- data, err := c.readBody(reply)
- if err != nil {
- return nil, fmt.Errorf("could not read the body of an HTTP Response: %w", err)
- }
- reply.Body = io.NopCloser(bytes.NewBuffer(data))
-
- // NOTE: This doesn't happen immediately after the call so that we can get an error message
- // from the server and include it in our error.
- switch reply.StatusCode {
- case 200, 201:
- default:
- sd := strings.TrimSpace(string(data))
- if sd != "" {
- // We probably have the error in the body.
- return nil, errors.CallErr{
- Req: req,
- Resp: reply,
- Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s", req.URL.String(), req.Method, reply.StatusCode, sd),
- }
- }
- return nil, errors.CallErr{
- Req: req,
- Resp: reply,
- Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d", req.URL.String(), req.Method, reply.StatusCode),
- }
- }
-
- return data, nil
-}
-
-// checkResp checks a response object o make sure it is a pointer to a struct.
-func (c *Client) checkResp(v reflect.Value) error {
- if v.Kind() != reflect.Ptr {
- return fmt.Errorf("bug: resp argument must a *struct, was %T", v.Interface())
- }
- v = v.Elem()
- if v.Kind() != reflect.Struct {
- return fmt.Errorf("bug: resp argument must be a *struct, was %T", v.Interface())
- }
- return nil
-}
-
-// readBody reads the body out of an *http.Response. It supports gzip encoded responses.
-func (c *Client) readBody(resp *http.Response) ([]byte, error) {
- var reader io.Reader = resp.Body
- switch resp.Header.Get("Content-Encoding") {
- case "":
- // Do nothing
- case "gzip":
- reader = gzipDecompress(resp.Body)
- default:
- return nil, fmt.Errorf("bug: comm.Client.JSONCall(): content was send with unsupported content-encoding %s", resp.Header.Get("Content-Encoding"))
- }
- return io.ReadAll(reader)
-}
-
-var testID string
-
-// addStdHeaders adds the standard headers we use on all calls.
-func addStdHeaders(headers http.Header) http.Header {
- headers.Set("Accept-Encoding", "gzip")
- // So that I can have a static id for tests.
- if testID != "" {
- headers.Set("client-request-id", testID)
- headers.Set("Return-Client-Request-Id", "false")
- } else {
- headers.Set("client-request-id", uuid.New().String())
- headers.Set("Return-Client-Request-Id", "false")
- }
- headers.Set("x-client-sku", "MSAL.Go")
- headers.Set("x-client-os", runtime.GOOS)
- headers.Set("x-client-cpu", runtime.GOARCH)
- headers.Set("x-client-ver", version.Version)
- return headers
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go
deleted file mode 100644
index 4d3dbfcf..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/compress.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-package comm
-
-import (
- "compress/gzip"
- "io"
-)
-
-func gzipDecompress(r io.Reader) io.Reader {
- gzipReader, _ := gzip.NewReader(r)
-
- pipeOut, pipeIn := io.Pipe()
- go func() {
- // decompression bomb would have to come from Azure services.
- // If we want to limit, we should do that in comm.do().
- _, err := io.Copy(pipeIn, gzipReader) //nolint
- if err != nil {
- // don't need the error.
- pipeIn.CloseWithError(err) //nolint
- gzipReader.Close()
- return
- }
- if err := gzipReader.Close(); err != nil {
- // don't need the error.
- pipeIn.CloseWithError(err) //nolint
- return
- }
- pipeIn.Close()
- }()
- return pipeOut
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go
deleted file mode 100644
index b628f61a..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant/grant.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-// Package grant holds types of grants issued by authorization services.
-package grant
-
-const (
- Password = "password"
- JWT = "urn:ietf:params:oauth:grant-type:jwt-bearer"
- SAMLV1 = "urn:ietf:params:oauth:grant-type:saml1_1-bearer"
- SAMLV2 = "urn:ietf:params:oauth:grant-type:saml2-bearer"
- DeviceCode = "device_code"
- AuthCode = "authorization_code"
- RefreshToken = "refresh_token"
- ClientCredential = "client_credentials"
- ClientAssertion = "urn:ietf:params:oauth:client-assertion-type:jwt-bearer"
-)
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go
deleted file mode 100644
index 1f9c543f..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/ops.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-/*
-Package ops provides operations to various backend services using REST clients.
-
-The REST type provides several clients that can be used to communicate to backends.
-Usage is simple:
-
- rest := ops.New()
-
- // Creates an authority client and calls the UserRealm() method.
- userRealm, err := rest.Authority().UserRealm(ctx, authParameters)
- if err != nil {
- // Do something
- }
-*/
-package ops
-
-import (
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust"
-)
-
-// HTTPClient represents an HTTP client.
-// It's usually an *http.Client from the standard library.
-type HTTPClient = comm.HTTPClient
-
-// REST provides REST clients for communicating with various backends used by MSAL.
-type REST struct {
- client *comm.Client
-}
-
-// New is the constructor for REST.
-func New(httpClient HTTPClient) *REST {
- return &REST{client: comm.New(httpClient)}
-}
-
-// Authority returns a client for querying information about various authorities.
-func (r *REST) Authority() authority.Client {
- return authority.Client{Comm: r.client}
-}
-
-// AccessTokens returns a client that can be used to get various access tokens for
-// authorization purposes.
-func (r *REST) AccessTokens() accesstokens.Client {
- return accesstokens.Client{Comm: r.client}
-}
-
-// WSTrust provides access to various metadata in a WSTrust service. This data can
-// be used to gain tokens based on SAML data using the client provided by AccessTokens().
-func (r *REST) WSTrust() wstrust.Client {
- return wstrust.Client{Comm: r.client}
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go
deleted file mode 100644
index a2bb6278..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/endpointtype_string.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Code generated by "stringer -type=endpointType"; DO NOT EDIT.
-
-package defs
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[etUnknown-0]
- _ = x[etUsernamePassword-1]
- _ = x[etWindowsTransport-2]
-}
-
-const _endpointType_name = "etUnknownetUsernamePasswordetWindowsTransport"
-
-var _endpointType_index = [...]uint8{0, 9, 27, 45}
-
-func (i endpointType) String() string {
- if i < 0 || i >= endpointType(len(_endpointType_index)-1) {
- return "endpointType(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _endpointType_name[_endpointType_index[i]:_endpointType_index[i+1]]
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go
deleted file mode 100644
index 64972700..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/mex_document_definitions.go
+++ /dev/null
@@ -1,394 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-package defs
-
-import "encoding/xml"
-
-type Definitions struct {
- XMLName xml.Name `xml:"definitions"`
- Text string `xml:",chardata"`
- Name string `xml:"name,attr"`
- TargetNamespace string `xml:"targetNamespace,attr"`
- WSDL string `xml:"wsdl,attr"`
- XSD string `xml:"xsd,attr"`
- T string `xml:"t,attr"`
- SOAPENC string `xml:"soapenc,attr"`
- SOAP string `xml:"soap,attr"`
- TNS string `xml:"tns,attr"`
- MSC string `xml:"msc,attr"`
- WSAM string `xml:"wsam,attr"`
- SOAP12 string `xml:"soap12,attr"`
- WSA10 string `xml:"wsa10,attr"`
- WSA string `xml:"wsa,attr"`
- WSAW string `xml:"wsaw,attr"`
- WSX string `xml:"wsx,attr"`
- WSAP string `xml:"wsap,attr"`
- WSU string `xml:"wsu,attr"`
- Trust string `xml:"trust,attr"`
- WSP string `xml:"wsp,attr"`
- Policy []Policy `xml:"Policy"`
- Types Types `xml:"types"`
- Message []Message `xml:"message"`
- PortType []PortType `xml:"portType"`
- Binding []Binding `xml:"binding"`
- Service Service `xml:"service"`
-}
-
-type Policy struct {
- Text string `xml:",chardata"`
- ID string `xml:"Id,attr"`
- ExactlyOne ExactlyOne `xml:"ExactlyOne"`
-}
-
-type ExactlyOne struct {
- Text string `xml:",chardata"`
- All All `xml:"All"`
-}
-
-type All struct {
- Text string `xml:",chardata"`
- NegotiateAuthentication NegotiateAuthentication `xml:"NegotiateAuthentication"`
- TransportBinding TransportBinding `xml:"TransportBinding"`
- UsingAddressing Text `xml:"UsingAddressing"`
- EndorsingSupportingTokens EndorsingSupportingTokens `xml:"EndorsingSupportingTokens"`
- WSS11 WSS11 `xml:"Wss11"`
- Trust10 Trust10 `xml:"Trust10"`
- SignedSupportingTokens SignedSupportingTokens `xml:"SignedSupportingTokens"`
- Trust13 WSTrust13 `xml:"Trust13"`
- SignedEncryptedSupportingTokens SignedEncryptedSupportingTokens `xml:"SignedEncryptedSupportingTokens"`
-}
-
-type NegotiateAuthentication struct {
- Text string `xml:",chardata"`
- HTTP string `xml:"http,attr"`
- XMLName xml.Name
-}
-
-type TransportBinding struct {
- Text string `xml:",chardata"`
- SP string `xml:"sp,attr"`
- Policy TransportBindingPolicy `xml:"Policy"`
-}
-
-type TransportBindingPolicy struct {
- Text string `xml:",chardata"`
- TransportToken TransportToken `xml:"TransportToken"`
- AlgorithmSuite AlgorithmSuite `xml:"AlgorithmSuite"`
- Layout Layout `xml:"Layout"`
- IncludeTimestamp Text `xml:"IncludeTimestamp"`
-}
-
-type TransportToken struct {
- Text string `xml:",chardata"`
- Policy TransportTokenPolicy `xml:"Policy"`
-}
-
-type TransportTokenPolicy struct {
- Text string `xml:",chardata"`
- HTTPSToken HTTPSToken `xml:"HttpsToken"`
-}
-
-type HTTPSToken struct {
- Text string `xml:",chardata"`
- RequireClientCertificate string `xml:"RequireClientCertificate,attr"`
-}
-
-type AlgorithmSuite struct {
- Text string `xml:",chardata"`
- Policy AlgorithmSuitePolicy `xml:"Policy"`
-}
-
-type AlgorithmSuitePolicy struct {
- Text string `xml:",chardata"`
- Basic256 Text `xml:"Basic256"`
- Basic128 Text `xml:"Basic128"`
-}
-
-type Layout struct {
- Text string `xml:",chardata"`
- Policy LayoutPolicy `xml:"Policy"`
-}
-
-type LayoutPolicy struct {
- Text string `xml:",chardata"`
- Strict Text `xml:"Strict"`
-}
-
-type EndorsingSupportingTokens struct {
- Text string `xml:",chardata"`
- SP string `xml:"sp,attr"`
- Policy EndorsingSupportingTokensPolicy `xml:"Policy"`
-}
-
-type EndorsingSupportingTokensPolicy struct {
- Text string `xml:",chardata"`
- X509Token X509Token `xml:"X509Token"`
- RSAToken RSAToken `xml:"RsaToken"`
- SignedParts SignedParts `xml:"SignedParts"`
- KerberosToken KerberosToken `xml:"KerberosToken"`
- IssuedToken IssuedToken `xml:"IssuedToken"`
- KeyValueToken KeyValueToken `xml:"KeyValueToken"`
-}
-
-type X509Token struct {
- Text string `xml:",chardata"`
- IncludeToken string `xml:"IncludeToken,attr"`
- Policy X509TokenPolicy `xml:"Policy"`
-}
-
-type X509TokenPolicy struct {
- Text string `xml:",chardata"`
- RequireThumbprintReference Text `xml:"RequireThumbprintReference"`
- WSSX509V3Token10 Text `xml:"WssX509V3Token10"`
-}
-
-type RSAToken struct {
- Text string `xml:",chardata"`
- IncludeToken string `xml:"IncludeToken,attr"`
- Optional string `xml:"Optional,attr"`
- MSSP string `xml:"mssp,attr"`
-}
-
-type SignedParts struct {
- Text string `xml:",chardata"`
- Header SignedPartsHeader `xml:"Header"`
-}
-
-type SignedPartsHeader struct {
- Text string `xml:",chardata"`
- Name string `xml:"Name,attr"`
- Namespace string `xml:"Namespace,attr"`
-}
-
-type KerberosToken struct {
- Text string `xml:",chardata"`
- IncludeToken string `xml:"IncludeToken,attr"`
- Policy KerberosTokenPolicy `xml:"Policy"`
-}
-
-type KerberosTokenPolicy struct {
- Text string `xml:",chardata"`
- WSSGSSKerberosV5ApReqToken11 Text `xml:"WssGssKerberosV5ApReqToken11"`
-}
-
-type IssuedToken struct {
- Text string `xml:",chardata"`
- IncludeToken string `xml:"IncludeToken,attr"`
- RequestSecurityTokenTemplate RequestSecurityTokenTemplate `xml:"RequestSecurityTokenTemplate"`
- Policy IssuedTokenPolicy `xml:"Policy"`
-}
-
-type RequestSecurityTokenTemplate struct {
- Text string `xml:",chardata"`
- KeyType Text `xml:"KeyType"`
- EncryptWith Text `xml:"EncryptWith"`
- SignatureAlgorithm Text `xml:"SignatureAlgorithm"`
- CanonicalizationAlgorithm Text `xml:"CanonicalizationAlgorithm"`
- EncryptionAlgorithm Text `xml:"EncryptionAlgorithm"`
- KeySize Text `xml:"KeySize"`
- KeyWrapAlgorithm Text `xml:"KeyWrapAlgorithm"`
-}
-
-type IssuedTokenPolicy struct {
- Text string `xml:",chardata"`
- RequireInternalReference Text `xml:"RequireInternalReference"`
-}
-
-type KeyValueToken struct {
- Text string `xml:",chardata"`
- IncludeToken string `xml:"IncludeToken,attr"`
- Optional string `xml:"Optional,attr"`
-}
-
-type WSS11 struct {
- Text string `xml:",chardata"`
- SP string `xml:"sp,attr"`
- Policy Wss11Policy `xml:"Policy"`
-}
-
-type Wss11Policy struct {
- Text string `xml:",chardata"`
- MustSupportRefThumbprint Text `xml:"MustSupportRefThumbprint"`
-}
-
-type Trust10 struct {
- Text string `xml:",chardata"`
- SP string `xml:"sp,attr"`
- Policy Trust10Policy `xml:"Policy"`
-}
-
-type Trust10Policy struct {
- Text string `xml:",chardata"`
- MustSupportIssuedTokens Text `xml:"MustSupportIssuedTokens"`
- RequireClientEntropy Text `xml:"RequireClientEntropy"`
- RequireServerEntropy Text `xml:"RequireServerEntropy"`
-}
-
-type SignedSupportingTokens struct {
- Text string `xml:",chardata"`
- SP string `xml:"sp,attr"`
- Policy SupportingTokensPolicy `xml:"Policy"`
-}
-
-type SupportingTokensPolicy struct {
- Text string `xml:",chardata"`
- UsernameToken UsernameToken `xml:"UsernameToken"`
-}
-type UsernameToken struct {
- Text string `xml:",chardata"`
- IncludeToken string `xml:"IncludeToken,attr"`
- Policy UsernameTokenPolicy `xml:"Policy"`
-}
-
-type UsernameTokenPolicy struct {
- Text string `xml:",chardata"`
- WSSUsernameToken10 WSSUsernameToken10 `xml:"WssUsernameToken10"`
-}
-
-type WSSUsernameToken10 struct {
- Text string `xml:",chardata"`
- XMLName xml.Name
-}
-
-type WSTrust13 struct {
- Text string `xml:",chardata"`
- SP string `xml:"sp,attr"`
- Policy WSTrust13Policy `xml:"Policy"`
-}
-
-type WSTrust13Policy struct {
- Text string `xml:",chardata"`
- MustSupportIssuedTokens Text `xml:"MustSupportIssuedTokens"`
- RequireClientEntropy Text `xml:"RequireClientEntropy"`
- RequireServerEntropy Text `xml:"RequireServerEntropy"`
-}
-
-type SignedEncryptedSupportingTokens struct {
- Text string `xml:",chardata"`
- SP string `xml:"sp,attr"`
- Policy SupportingTokensPolicy `xml:"Policy"`
-}
-
-type Types struct {
- Text string `xml:",chardata"`
- Schema Schema `xml:"schema"`
-}
-
-type Schema struct {
- Text string `xml:",chardata"`
- TargetNamespace string `xml:"targetNamespace,attr"`
- Import []Import `xml:"import"`
-}
-
-type Import struct {
- Text string `xml:",chardata"`
- SchemaLocation string `xml:"schemaLocation,attr"`
- Namespace string `xml:"namespace,attr"`
-}
-
-type Message struct {
- Text string `xml:",chardata"`
- Name string `xml:"name,attr"`
- Part Part `xml:"part"`
-}
-
-type Part struct {
- Text string `xml:",chardata"`
- Name string `xml:"name,attr"`
- Element string `xml:"element,attr"`
-}
-
-type PortType struct {
- Text string `xml:",chardata"`
- Name string `xml:"name,attr"`
- Operation Operation `xml:"operation"`
-}
-
-type Operation struct {
- Text string `xml:",chardata"`
- Name string `xml:"name,attr"`
- Input OperationIO `xml:"input"`
- Output OperationIO `xml:"output"`
-}
-
-type OperationIO struct {
- Text string `xml:",chardata"`
- Action string `xml:"Action,attr"`
- Message string `xml:"message,attr"`
- Body OperationIOBody `xml:"body"`
-}
-
-type OperationIOBody struct {
- Text string `xml:",chardata"`
- Use string `xml:"use,attr"`
-}
-
-type Binding struct {
- Text string `xml:",chardata"`
- Name string `xml:"name,attr"`
- Type string `xml:"type,attr"`
- PolicyReference PolicyReference `xml:"PolicyReference"`
- Binding DefinitionsBinding `xml:"binding"`
- Operation BindingOperation `xml:"operation"`
-}
-
-type PolicyReference struct {
- Text string `xml:",chardata"`
- URI string `xml:"URI,attr"`
-}
-
-type DefinitionsBinding struct {
- Text string `xml:",chardata"`
- Transport string `xml:"transport,attr"`
-}
-
-type BindingOperation struct {
- Text string `xml:",chardata"`
- Name string `xml:"name,attr"`
- Operation BindingOperationOperation `xml:"operation"`
- Input BindingOperationIO `xml:"input"`
- Output BindingOperationIO `xml:"output"`
-}
-
-type BindingOperationOperation struct {
- Text string `xml:",chardata"`
- SoapAction string `xml:"soapAction,attr"`
- Style string `xml:"style,attr"`
-}
-
-type BindingOperationIO struct {
- Text string `xml:",chardata"`
- Body OperationIOBody `xml:"body"`
-}
-
-type Service struct {
- Text string `xml:",chardata"`
- Name string `xml:"name,attr"`
- Port []Port `xml:"port"`
-}
-
-type Port struct {
- Text string `xml:",chardata"`
- Name string `xml:"name,attr"`
- Binding string `xml:"binding,attr"`
- Address Address `xml:"address"`
- EndpointReference PortEndpointReference `xml:"EndpointReference"`
-}
-
-type Address struct {
- Text string `xml:",chardata"`
- Location string `xml:"location,attr"`
-}
-
-type PortEndpointReference struct {
- Text string `xml:",chardata"`
- Address Text `xml:"Address"`
- Identity Identity `xml:"Identity"`
-}
-
-type Identity struct {
- Text string `xml:",chardata"`
- XMLNS string `xml:"xmlns,attr"`
- SPN Text `xml:"Spn"`
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go
deleted file mode 100644
index 7d072556..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/saml_assertion_definitions.go
+++ /dev/null
@@ -1,230 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-package defs
-
-import "encoding/xml"
-
-// TODO(msal): Someone (and it ain't gonna be me) needs to document these attributes or
-// at the least put a link to RFC.
-
-type SAMLDefinitions struct {
- XMLName xml.Name `xml:"Envelope"`
- Text string `xml:",chardata"`
- S string `xml:"s,attr"`
- A string `xml:"a,attr"`
- U string `xml:"u,attr"`
- Header Header `xml:"Header"`
- Body Body `xml:"Body"`
-}
-
-type Header struct {
- Text string `xml:",chardata"`
- Action Action `xml:"Action"`
- Security Security `xml:"Security"`
-}
-
-type Action struct {
- Text string `xml:",chardata"`
- MustUnderstand string `xml:"mustUnderstand,attr"`
-}
-
-type Security struct {
- Text string `xml:",chardata"`
- MustUnderstand string `xml:"mustUnderstand,attr"`
- O string `xml:"o,attr"`
- Timestamp Timestamp `xml:"Timestamp"`
-}
-
-type Timestamp struct {
- Text string `xml:",chardata"`
- ID string `xml:"Id,attr"`
- Created Text `xml:"Created"`
- Expires Text `xml:"Expires"`
-}
-
-type Text struct {
- Text string `xml:",chardata"`
-}
-
-type Body struct {
- Text string `xml:",chardata"`
- RequestSecurityTokenResponseCollection RequestSecurityTokenResponseCollection `xml:"RequestSecurityTokenResponseCollection"`
-}
-
-type RequestSecurityTokenResponseCollection struct {
- Text string `xml:",chardata"`
- Trust string `xml:"trust,attr"`
- RequestSecurityTokenResponse []RequestSecurityTokenResponse `xml:"RequestSecurityTokenResponse"`
-}
-
-type RequestSecurityTokenResponse struct {
- Text string `xml:",chardata"`
- Lifetime Lifetime `xml:"Lifetime"`
- AppliesTo AppliesTo `xml:"AppliesTo"`
- RequestedSecurityToken RequestedSecurityToken `xml:"RequestedSecurityToken"`
- RequestedAttachedReference RequestedAttachedReference `xml:"RequestedAttachedReference"`
- RequestedUnattachedReference RequestedUnattachedReference `xml:"RequestedUnattachedReference"`
- TokenType Text `xml:"TokenType"`
- RequestType Text `xml:"RequestType"`
- KeyType Text `xml:"KeyType"`
-}
-
-type Lifetime struct {
- Text string `xml:",chardata"`
- Created WSUTimestamp `xml:"Created"`
- Expires WSUTimestamp `xml:"Expires"`
-}
-
-type WSUTimestamp struct {
- Text string `xml:",chardata"`
- Wsu string `xml:"wsu,attr"`
-}
-
-type AppliesTo struct {
- Text string `xml:",chardata"`
- Wsp string `xml:"wsp,attr"`
- EndpointReference EndpointReference `xml:"EndpointReference"`
-}
-
-type EndpointReference struct {
- Text string `xml:",chardata"`
- Wsa string `xml:"wsa,attr"`
- Address Text `xml:"Address"`
-}
-
-type RequestedSecurityToken struct {
- Text string `xml:",chardata"`
- AssertionRawXML string `xml:",innerxml"`
- Assertion Assertion `xml:"Assertion"`
-}
-
-type Assertion struct {
- XMLName xml.Name // Normally its `xml:"Assertion"`, but I think they want to capture the xmlns
- Text string `xml:",chardata"`
- MajorVersion string `xml:"MajorVersion,attr"`
- MinorVersion string `xml:"MinorVersion,attr"`
- AssertionID string `xml:"AssertionID,attr"`
- Issuer string `xml:"Issuer,attr"`
- IssueInstant string `xml:"IssueInstant,attr"`
- Saml string `xml:"saml,attr"`
- Conditions Conditions `xml:"Conditions"`
- AttributeStatement AttributeStatement `xml:"AttributeStatement"`
- AuthenticationStatement AuthenticationStatement `xml:"AuthenticationStatement"`
- Signature Signature `xml:"Signature"`
-}
-
-type Conditions struct {
- Text string `xml:",chardata"`
- NotBefore string `xml:"NotBefore,attr"`
- NotOnOrAfter string `xml:"NotOnOrAfter,attr"`
- AudienceRestrictionCondition AudienceRestrictionCondition `xml:"AudienceRestrictionCondition"`
-}
-
-type AudienceRestrictionCondition struct {
- Text string `xml:",chardata"`
- Audience Text `xml:"Audience"`
-}
-
-type AttributeStatement struct {
- Text string `xml:",chardata"`
- Subject Subject `xml:"Subject"`
- Attribute []Attribute `xml:"Attribute"`
-}
-
-type Subject struct {
- Text string `xml:",chardata"`
- NameIdentifier NameIdentifier `xml:"NameIdentifier"`
- SubjectConfirmation SubjectConfirmation `xml:"SubjectConfirmation"`
-}
-
-type NameIdentifier struct {
- Text string `xml:",chardata"`
- Format string `xml:"Format,attr"`
-}
-
-type SubjectConfirmation struct {
- Text string `xml:",chardata"`
- ConfirmationMethod Text `xml:"ConfirmationMethod"`
-}
-
-type Attribute struct {
- Text string `xml:",chardata"`
- AttributeName string `xml:"AttributeName,attr"`
- AttributeNamespace string `xml:"AttributeNamespace,attr"`
- AttributeValue Text `xml:"AttributeValue"`
-}
-
-type AuthenticationStatement struct {
- Text string `xml:",chardata"`
- AuthenticationMethod string `xml:"AuthenticationMethod,attr"`
- AuthenticationInstant string `xml:"AuthenticationInstant,attr"`
- Subject Subject `xml:"Subject"`
-}
-
-type Signature struct {
- Text string `xml:",chardata"`
- Ds string `xml:"ds,attr"`
- SignedInfo SignedInfo `xml:"SignedInfo"`
- SignatureValue Text `xml:"SignatureValue"`
- KeyInfo KeyInfo `xml:"KeyInfo"`
-}
-
-type SignedInfo struct {
- Text string `xml:",chardata"`
- CanonicalizationMethod Method `xml:"CanonicalizationMethod"`
- SignatureMethod Method `xml:"SignatureMethod"`
- Reference Reference `xml:"Reference"`
-}
-
-type Method struct {
- Text string `xml:",chardata"`
- Algorithm string `xml:"Algorithm,attr"`
-}
-
-type Reference struct {
- Text string `xml:",chardata"`
- URI string `xml:"URI,attr"`
- Transforms Transforms `xml:"Transforms"`
- DigestMethod Method `xml:"DigestMethod"`
- DigestValue Text `xml:"DigestValue"`
-}
-
-type Transforms struct {
- Text string `xml:",chardata"`
- Transform []Method `xml:"Transform"`
-}
-
-type KeyInfo struct {
- Text string `xml:",chardata"`
- Xmlns string `xml:"xmlns,attr"`
- X509Data X509Data `xml:"X509Data"`
-}
-
-type X509Data struct {
- Text string `xml:",chardata"`
- X509Certificate Text `xml:"X509Certificate"`
-}
-
-type RequestedAttachedReference struct {
- Text string `xml:",chardata"`
- SecurityTokenReference SecurityTokenReference `xml:"SecurityTokenReference"`
-}
-
-type SecurityTokenReference struct {
- Text string `xml:",chardata"`
- TokenType string `xml:"TokenType,attr"`
- O string `xml:"o,attr"`
- K string `xml:"k,attr"`
- KeyIdentifier KeyIdentifier `xml:"KeyIdentifier"`
-}
-
-type KeyIdentifier struct {
- Text string `xml:",chardata"`
- ValueType string `xml:"ValueType,attr"`
-}
-
-type RequestedUnattachedReference struct {
- Text string `xml:",chardata"`
- SecurityTokenReference SecurityTokenReference `xml:"SecurityTokenReference"`
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go
deleted file mode 100644
index 6fe5efa8..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/version_string.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Code generated by "stringer -type=Version"; DO NOT EDIT.
-
-package defs
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[TrustUnknown-0]
- _ = x[Trust2005-1]
- _ = x[Trust13-2]
-}
-
-const _Version_name = "TrustUnknownTrust2005Trust13"
-
-var _Version_index = [...]uint8{0, 12, 21, 28}
-
-func (i Version) String() string {
- if i < 0 || i >= Version(len(_Version_index)-1) {
- return "Version(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _Version_name[_Version_index[i]:_Version_index[i+1]]
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go
deleted file mode 100644
index 8fad5efb..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_endpoint.go
+++ /dev/null
@@ -1,199 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-package defs
-
-import (
- "encoding/xml"
- "fmt"
- "time"
-
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
- uuid "github.com/google/uuid"
-)
-
-//go:generate stringer -type=Version
-
-type Version int
-
-const (
- TrustUnknown Version = iota
- Trust2005
- Trust13
-)
-
-// Endpoint represents a WSTrust endpoint.
-type Endpoint struct {
- // Version is the version of the endpoint.
- Version Version
- // URL is the URL of the endpoint.
- URL string
-}
-
-type wsTrustTokenRequestEnvelope struct {
- XMLName xml.Name `xml:"s:Envelope"`
- Text string `xml:",chardata"`
- S string `xml:"xmlns:s,attr"`
- Wsa string `xml:"xmlns:wsa,attr"`
- Wsu string `xml:"xmlns:wsu,attr"`
- Header struct {
- Text string `xml:",chardata"`
- Action struct {
- Text string `xml:",chardata"`
- MustUnderstand string `xml:"s:mustUnderstand,attr"`
- } `xml:"wsa:Action"`
- MessageID struct {
- Text string `xml:",chardata"`
- } `xml:"wsa:messageID"`
- ReplyTo struct {
- Text string `xml:",chardata"`
- Address struct {
- Text string `xml:",chardata"`
- } `xml:"wsa:Address"`
- } `xml:"wsa:ReplyTo"`
- To struct {
- Text string `xml:",chardata"`
- MustUnderstand string `xml:"s:mustUnderstand,attr"`
- } `xml:"wsa:To"`
- Security struct {
- Text string `xml:",chardata"`
- MustUnderstand string `xml:"s:mustUnderstand,attr"`
- Wsse string `xml:"xmlns:wsse,attr"`
- Timestamp struct {
- Text string `xml:",chardata"`
- ID string `xml:"wsu:Id,attr"`
- Created struct {
- Text string `xml:",chardata"`
- } `xml:"wsu:Created"`
- Expires struct {
- Text string `xml:",chardata"`
- } `xml:"wsu:Expires"`
- } `xml:"wsu:Timestamp"`
- UsernameToken struct {
- Text string `xml:",chardata"`
- ID string `xml:"wsu:Id,attr"`
- Username struct {
- Text string `xml:",chardata"`
- } `xml:"wsse:Username"`
- Password struct {
- Text string `xml:",chardata"`
- } `xml:"wsse:Password"`
- } `xml:"wsse:UsernameToken"`
- } `xml:"wsse:Security"`
- } `xml:"s:Header"`
- Body struct {
- Text string `xml:",chardata"`
- RequestSecurityToken struct {
- Text string `xml:",chardata"`
- Wst string `xml:"xmlns:wst,attr"`
- AppliesTo struct {
- Text string `xml:",chardata"`
- Wsp string `xml:"xmlns:wsp,attr"`
- EndpointReference struct {
- Text string `xml:",chardata"`
- Address struct {
- Text string `xml:",chardata"`
- } `xml:"wsa:Address"`
- } `xml:"wsa:EndpointReference"`
- } `xml:"wsp:AppliesTo"`
- KeyType struct {
- Text string `xml:",chardata"`
- } `xml:"wst:KeyType"`
- RequestType struct {
- Text string `xml:",chardata"`
- } `xml:"wst:RequestType"`
- } `xml:"wst:RequestSecurityToken"`
- } `xml:"s:Body"`
-}
-
-func buildTimeString(t time.Time) string {
- // Golang time formats are weird: https://stackoverflow.com/questions/20234104/how-to-format-current-time-using-a-yyyymmddhhmmss-format
- return t.Format("2006-01-02T15:04:05.000Z")
-}
-
-func (wte *Endpoint) buildTokenRequestMessage(authType authority.AuthorizeType, cloudAudienceURN string, username string, password string) (string, error) {
- var soapAction string
- var trustNamespace string
- var keyType string
- var requestType string
-
- createdTime := time.Now().UTC()
- expiresTime := createdTime.Add(10 * time.Minute)
-
- switch wte.Version {
- case Trust2005:
- soapAction = trust2005Spec
- trustNamespace = "http://schemas.xmlsoap.org/ws/2005/02/trust"
- keyType = "http://schemas.xmlsoap.org/ws/2005/05/identity/NoProofKey"
- requestType = "http://schemas.xmlsoap.org/ws/2005/02/trust/Issue"
- case Trust13:
- soapAction = trust13Spec
- trustNamespace = "http://docs.oasis-open.org/ws-sx/ws-trust/200512"
- keyType = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/Bearer"
- requestType = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/Issue"
- default:
- return "", fmt.Errorf("buildTokenRequestMessage had Version == %q, which is not recognized", wte.Version)
- }
-
- var envelope wsTrustTokenRequestEnvelope
-
- messageUUID := uuid.New()
-
- envelope.S = "http://www.w3.org/2003/05/soap-envelope"
- envelope.Wsa = "http://www.w3.org/2005/08/addressing"
- envelope.Wsu = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd"
-
- envelope.Header.Action.MustUnderstand = "1"
- envelope.Header.Action.Text = soapAction
- envelope.Header.MessageID.Text = "urn:uuid:" + messageUUID.String()
- envelope.Header.ReplyTo.Address.Text = "http://www.w3.org/2005/08/addressing/anonymous"
- envelope.Header.To.MustUnderstand = "1"
- envelope.Header.To.Text = wte.URL
-
- switch authType {
- case authority.ATUnknown:
- return "", fmt.Errorf("buildTokenRequestMessage had no authority type(%v)", authType)
- case authority.ATUsernamePassword:
- endpointUUID := uuid.New()
-
- var trustID string
- if wte.Version == Trust2005 {
- trustID = "UnPwSecTok2005-" + endpointUUID.String()
- } else {
- trustID = "UnPwSecTok13-" + endpointUUID.String()
- }
-
- envelope.Header.Security.MustUnderstand = "1"
- envelope.Header.Security.Wsse = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd"
- envelope.Header.Security.Timestamp.ID = "MSATimeStamp"
- envelope.Header.Security.Timestamp.Created.Text = buildTimeString(createdTime)
- envelope.Header.Security.Timestamp.Expires.Text = buildTimeString(expiresTime)
- envelope.Header.Security.UsernameToken.ID = trustID
- envelope.Header.Security.UsernameToken.Username.Text = username
- envelope.Header.Security.UsernameToken.Password.Text = password
- default:
- // This is just to note that we don't do anything for other cases.
- // We aren't missing anything I know of.
- }
-
- envelope.Body.RequestSecurityToken.Wst = trustNamespace
- envelope.Body.RequestSecurityToken.AppliesTo.Wsp = "http://schemas.xmlsoap.org/ws/2004/09/policy"
- envelope.Body.RequestSecurityToken.AppliesTo.EndpointReference.Address.Text = cloudAudienceURN
- envelope.Body.RequestSecurityToken.KeyType.Text = keyType
- envelope.Body.RequestSecurityToken.RequestType.Text = requestType
-
- output, err := xml.Marshal(envelope)
- if err != nil {
- return "", err
- }
-
- return string(output), nil
-}
-
-func (wte *Endpoint) BuildTokenRequestMessageWIA(cloudAudienceURN string) (string, error) {
- return wte.buildTokenRequestMessage(authority.ATWindowsIntegrated, cloudAudienceURN, "", "")
-}
-
-func (wte *Endpoint) BuildTokenRequestMessageUsernamePassword(cloudAudienceURN string, username string, password string) (string, error) {
- return wte.buildTokenRequestMessage(authority.ATUsernamePassword, cloudAudienceURN, username, password)
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go
deleted file mode 100644
index e3d19886..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs/wstrust_mex_document.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-package defs
-
-import (
- "errors"
- "fmt"
- "strings"
-)
-
-//go:generate stringer -type=endpointType
-
-type endpointType int
-
-const (
- etUnknown endpointType = iota
- etUsernamePassword
- etWindowsTransport
-)
-
-type wsEndpointData struct {
- Version Version
- EndpointType endpointType
-}
-
-const trust13Spec string = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/RST/Issue"
-const trust2005Spec string = "http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue"
-
-type MexDocument struct {
- UsernamePasswordEndpoint Endpoint
- WindowsTransportEndpoint Endpoint
- policies map[string]endpointType
- bindings map[string]wsEndpointData
-}
-
-func updateEndpoint(cached *Endpoint, found Endpoint) {
- if cached == nil || cached.Version == TrustUnknown {
- *cached = found
- return
- }
- if (*cached).Version == Trust2005 && found.Version == Trust13 {
- *cached = found
- return
- }
-}
-
-// TODO(msal): Someone needs to write tests for everything below.
-
-// NewFromDef creates a new MexDocument.
-func NewFromDef(defs Definitions) (MexDocument, error) {
- policies, err := policies(defs)
- if err != nil {
- return MexDocument{}, err
- }
-
- bindings, err := bindings(defs, policies)
- if err != nil {
- return MexDocument{}, err
- }
-
- userPass, windows, err := endpoints(defs, bindings)
- if err != nil {
- return MexDocument{}, err
- }
-
- return MexDocument{
- UsernamePasswordEndpoint: userPass,
- WindowsTransportEndpoint: windows,
- policies: policies,
- bindings: bindings,
- }, nil
-}
-
-func policies(defs Definitions) (map[string]endpointType, error) {
- policies := make(map[string]endpointType, len(defs.Policy))
-
- for _, policy := range defs.Policy {
- if policy.ExactlyOne.All.NegotiateAuthentication.XMLName.Local != "" {
- if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" {
- policies["#"+policy.ID] = etWindowsTransport
- }
- }
-
- if policy.ExactlyOne.All.SignedEncryptedSupportingTokens.Policy.UsernameToken.Policy.WSSUsernameToken10.XMLName.Local != "" {
- if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" {
- policies["#"+policy.ID] = etUsernamePassword
- }
- }
- if policy.ExactlyOne.All.SignedSupportingTokens.Policy.UsernameToken.Policy.WSSUsernameToken10.XMLName.Local != "" {
- if policy.ExactlyOne.All.TransportBinding.SP != "" && policy.ID != "" {
- policies["#"+policy.ID] = etUsernamePassword
- }
- }
- }
-
- if len(policies) == 0 {
- return policies, errors.New("no policies for mex document")
- }
-
- return policies, nil
-}
-
-func bindings(defs Definitions, policies map[string]endpointType) (map[string]wsEndpointData, error) {
- bindings := make(map[string]wsEndpointData, len(defs.Binding))
-
- for _, binding := range defs.Binding {
- policyName := binding.PolicyReference.URI
- transport := binding.Binding.Transport
-
- if transport == "http://schemas.xmlsoap.org/soap/http" {
- if policy, ok := policies[policyName]; ok {
- bindingName := binding.Name
- specVersion := binding.Operation.Operation.SoapAction
-
- if specVersion == trust13Spec {
- bindings[bindingName] = wsEndpointData{Trust13, policy}
- } else if specVersion == trust2005Spec {
- bindings[bindingName] = wsEndpointData{Trust2005, policy}
- } else {
- return nil, errors.New("found unknown spec version in mex document")
- }
- }
- }
- }
- return bindings, nil
-}
-
-func endpoints(defs Definitions, bindings map[string]wsEndpointData) (userPass, windows Endpoint, err error) {
- for _, port := range defs.Service.Port {
- bindingName := port.Binding
-
- index := strings.Index(bindingName, ":")
- if index != -1 {
- bindingName = bindingName[index+1:]
- }
-
- if binding, ok := bindings[bindingName]; ok {
- url := strings.TrimSpace(port.EndpointReference.Address.Text)
- if url == "" {
- return Endpoint{}, Endpoint{}, fmt.Errorf("MexDocument cannot have blank URL endpoint")
- }
- if binding.Version == TrustUnknown {
- return Endpoint{}, Endpoint{}, fmt.Errorf("endpoint version unknown")
- }
- endpoint := Endpoint{Version: binding.Version, URL: url}
-
- switch binding.EndpointType {
- case etUsernamePassword:
- updateEndpoint(&userPass, endpoint)
- case etWindowsTransport:
- updateEndpoint(&windows, endpoint)
- default:
- return Endpoint{}, Endpoint{}, errors.New("found unknown port type in MEX document")
- }
- }
- }
- return userPass, windows, nil
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go
deleted file mode 100644
index 47cd4c69..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/wstrust.go
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-/*
-Package wstrust provides a client for communicating with a WSTrust (https://en.wikipedia.org/wiki/WS-Trust#:~:text=WS%2DTrust%20is%20a%20WS,in%20a%20secure%20message%20exchange.)
-for the purposes of extracting metadata from the service. This data can be used to acquire
-tokens using the accesstokens.Client.GetAccessTokenFromSamlGrant() call.
-*/
-package wstrust
-
-import (
- "context"
- "errors"
- "fmt"
- "net/http"
- "net/url"
-
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/grant"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs"
-)
-
-type xmlCaller interface {
- XMLCall(ctx context.Context, endpoint string, headers http.Header, qv url.Values, resp interface{}) error
- SOAPCall(ctx context.Context, endpoint, action string, headers http.Header, qv url.Values, body string, resp interface{}) error
-}
-
-type SamlTokenInfo struct {
- AssertionType string // Should be either constants SAMLV1Grant or SAMLV2Grant.
- Assertion string
-}
-
-// Client represents the REST calls to get tokens from token generator backends.
-type Client struct {
- // Comm provides the HTTP transport client.
- Comm xmlCaller
-}
-
-// TODO(msal): This allows me to call Mex without having a real Def file on line 45.
-// This would fail because policies() would not find a policy. This is easy enough to
-// fix in test data, but.... Definitions is defined with built in structs. That needs
-// to be pulled apart and until then I have this hack in.
-var newFromDef = defs.NewFromDef
-
-// Mex provides metadata about a wstrust service.
-func (c Client) Mex(ctx context.Context, federationMetadataURL string) (defs.MexDocument, error) {
- resp := defs.Definitions{}
- err := c.Comm.XMLCall(
- ctx,
- federationMetadataURL,
- http.Header{},
- nil,
- &resp,
- )
- if err != nil {
- return defs.MexDocument{}, err
- }
-
- return newFromDef(resp)
-}
-
-const (
- SoapActionDefault = "http://docs.oasis-open.org/ws-sx/ws-trust/200512/RST/Issue"
-
- // Note: Commented out because this action is not supported. It was in the original code
- // but only used in a switch where it errored. Since there was only one value, a default
- // worked better. However, buildTokenRequestMessage() had 2005 support. I'm not actually
- // sure what's going on here. It like we have half support. For now this is here just
- // for documentation purposes in case we are going to add support.
- //
- // SoapActionWSTrust2005 = "http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue"
-)
-
-// SAMLTokenInfo provides SAML information that is used to generate a SAML token.
-func (c Client) SAMLTokenInfo(ctx context.Context, authParameters authority.AuthParams, cloudAudienceURN string, endpoint defs.Endpoint) (SamlTokenInfo, error) {
- var wsTrustRequestMessage string
- var err error
-
- switch authParameters.AuthorizationType {
- case authority.ATWindowsIntegrated:
- wsTrustRequestMessage, err = endpoint.BuildTokenRequestMessageWIA(cloudAudienceURN)
- if err != nil {
- return SamlTokenInfo{}, err
- }
- case authority.ATUsernamePassword:
- wsTrustRequestMessage, err = endpoint.BuildTokenRequestMessageUsernamePassword(
- cloudAudienceURN, authParameters.Username, authParameters.Password)
- if err != nil {
- return SamlTokenInfo{}, err
- }
- default:
- return SamlTokenInfo{}, fmt.Errorf("unknown auth type %v", authParameters.AuthorizationType)
- }
-
- var soapAction string
- switch endpoint.Version {
- case defs.Trust13:
- soapAction = SoapActionDefault
- case defs.Trust2005:
- return SamlTokenInfo{}, errors.New("WS Trust 2005 support is not implemented")
- default:
- return SamlTokenInfo{}, fmt.Errorf("the SOAP endpoint for a wstrust call had an invalid version: %v", endpoint.Version)
- }
-
- resp := defs.SAMLDefinitions{}
- err = c.Comm.SOAPCall(ctx, endpoint.URL, soapAction, http.Header{}, nil, wsTrustRequestMessage, &resp)
- if err != nil {
- return SamlTokenInfo{}, err
- }
-
- return c.samlAssertion(resp)
-}
-
-const (
- samlv1Assertion = "urn:oasis:names:tc:SAML:1.0:assertion"
- samlv2Assertion = "urn:oasis:names:tc:SAML:2.0:assertion"
-)
-
-func (c Client) samlAssertion(def defs.SAMLDefinitions) (SamlTokenInfo, error) {
- for _, tokenResponse := range def.Body.RequestSecurityTokenResponseCollection.RequestSecurityTokenResponse {
- token := tokenResponse.RequestedSecurityToken
- if token.Assertion.XMLName.Local != "" {
- assertion := token.AssertionRawXML
-
- samlVersion := token.Assertion.Saml
- switch samlVersion {
- case samlv1Assertion:
- return SamlTokenInfo{AssertionType: grant.SAMLV1, Assertion: assertion}, nil
- case samlv2Assertion:
- return SamlTokenInfo{AssertionType: grant.SAMLV2, Assertion: assertion}, nil
- }
- return SamlTokenInfo{}, fmt.Errorf("couldn't parse SAML assertion, version unknown: %q", samlVersion)
- }
- }
- return SamlTokenInfo{}, errors.New("unknown WS-Trust version")
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go
deleted file mode 100644
index 0ade4117..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-// TODO(msal): Write some tests. The original code this came from didn't have tests and I'm too
-// tired at this point to do it. It, like many other *Manager code I found was broken because
-// they didn't have mutex protection.
-
-package oauth
-
-import (
- "context"
- "errors"
- "fmt"
- "strings"
- "sync"
-
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
-)
-
-// ADFS is an active directory federation service authority type.
-const ADFS = "ADFS"
-
-type cacheEntry struct {
- Endpoints authority.Endpoints
- ValidForDomainsInList map[string]bool
-}
-
-func createcacheEntry(endpoints authority.Endpoints) cacheEntry {
- return cacheEntry{endpoints, map[string]bool{}}
-}
-
-// AuthorityEndpoint retrieves endpoints from an authority for auth and token acquisition.
-type authorityEndpoint struct {
- rest *ops.REST
-
- mu sync.Mutex
- cache map[string]cacheEntry
-}
-
-// newAuthorityEndpoint is the constructor for AuthorityEndpoint.
-func newAuthorityEndpoint(rest *ops.REST) *authorityEndpoint {
- m := &authorityEndpoint{rest: rest, cache: map[string]cacheEntry{}}
- return m
-}
-
-// ResolveEndpoints gets the authorization and token endpoints and creates an AuthorityEndpoints instance
-func (m *authorityEndpoint) ResolveEndpoints(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, error) {
-
- if endpoints, found := m.cachedEndpoints(authorityInfo, userPrincipalName); found {
- return endpoints, nil
- }
-
- endpoint, err := m.openIDConfigurationEndpoint(ctx, authorityInfo, userPrincipalName)
- if err != nil {
- return authority.Endpoints{}, err
- }
-
- resp, err := m.rest.Authority().GetTenantDiscoveryResponse(ctx, endpoint)
- if err != nil {
- return authority.Endpoints{}, err
- }
- if err := resp.Validate(); err != nil {
- return authority.Endpoints{}, fmt.Errorf("ResolveEndpoints(): %w", err)
- }
-
- tenant := authorityInfo.Tenant
-
- endpoints := authority.NewEndpoints(
- strings.Replace(resp.AuthorizationEndpoint, "{tenant}", tenant, -1),
- strings.Replace(resp.TokenEndpoint, "{tenant}", tenant, -1),
- strings.Replace(resp.Issuer, "{tenant}", tenant, -1),
- authorityInfo.Host)
-
- m.addCachedEndpoints(authorityInfo, userPrincipalName, endpoints)
-
- return endpoints, nil
-}
-
-// cachedEndpoints returns a the cached endpoints if they exists. If not, we return false.
-func (m *authorityEndpoint) cachedEndpoints(authorityInfo authority.Info, userPrincipalName string) (authority.Endpoints, bool) {
- m.mu.Lock()
- defer m.mu.Unlock()
-
- if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok {
- if authorityInfo.AuthorityType == ADFS {
- domain, err := adfsDomainFromUpn(userPrincipalName)
- if err == nil {
- if _, ok := cacheEntry.ValidForDomainsInList[domain]; ok {
- return cacheEntry.Endpoints, true
- }
- }
- }
- return cacheEntry.Endpoints, true
- }
- return authority.Endpoints{}, false
-}
-
-func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, userPrincipalName string, endpoints authority.Endpoints) {
- m.mu.Lock()
- defer m.mu.Unlock()
-
- updatedCacheEntry := createcacheEntry(endpoints)
-
- if authorityInfo.AuthorityType == ADFS {
- // Since we're here, we've made a call to the backend. We want to ensure we're caching
- // the latest values from the server.
- if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok {
- for k := range cacheEntry.ValidForDomainsInList {
- updatedCacheEntry.ValidForDomainsInList[k] = true
- }
- }
- domain, err := adfsDomainFromUpn(userPrincipalName)
- if err == nil {
- updatedCacheEntry.ValidForDomainsInList[domain] = true
- }
- }
-
- m.cache[authorityInfo.CanonicalAuthorityURI] = updatedCacheEntry
-}
-
-func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (string, error) {
- if authorityInfo.Tenant == "adfs" {
- return fmt.Sprintf("https://%s/adfs/.well-known/openid-configuration", authorityInfo.Host), nil
- } else if authorityInfo.ValidateAuthority && !authority.TrustedHost(authorityInfo.Host) {
- resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo)
- if err != nil {
- return "", err
- }
- return resp.TenantDiscoveryEndpoint, nil
- } else if authorityInfo.Region != "" {
- resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo)
- if err != nil {
- return "", err
- }
- return resp.TenantDiscoveryEndpoint, nil
-
- }
-
- return authorityInfo.CanonicalAuthorityURI + "v2.0/.well-known/openid-configuration", nil
-}
-
-func adfsDomainFromUpn(userPrincipalName string) (string, error) {
- parts := strings.Split(userPrincipalName, "@")
- if len(parts) < 2 {
- return "", errors.New("no @ present in user principal name")
- }
- return parts[1], nil
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go
deleted file mode 100644
index 4561d72d..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options/options.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-package options
-
-import (
- "errors"
- "fmt"
-)
-
-// CallOption implements an optional argument to a method call. See
-// https://blog.devgenius.io/go-call-option-that-can-be-used-with-multiple-methods-6c81734f3dbe
-// for an explanation of the usage pattern.
-type CallOption interface {
- Do(any) error
- callOption()
-}
-
-// ApplyOptions applies all the callOptions to options. options must be a pointer to a struct and
-// callOptions must be a list of objects that implement CallOption.
-func ApplyOptions[O, C any](options O, callOptions []C) error {
- for _, o := range callOptions {
- if t, ok := any(o).(CallOption); !ok {
- return fmt.Errorf("unexpected option type %T", o)
- } else if err := t.Do(options); err != nil {
- return err
- }
- }
- return nil
-}
-
-// NewCallOption returns a new CallOption whose Do() method calls function "f".
-func NewCallOption(f func(any) error) CallOption {
- if f == nil {
- // This isn't a practical concern because only an MSAL maintainer can get
- // us here, by implementing a do-nothing option. But if someone does that,
- // the below ensures the method invoked with the option returns an error.
- return callOption(func(any) error {
- return errors.New("invalid option: missing implementation")
- })
- }
- return callOption(f)
-}
-
-// callOption is an adapter for a function to a CallOption
-type callOption func(any) error
-
-func (c callOption) Do(a any) error {
- return c(a)
-}
-
-func (callOption) callOption() {}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go
deleted file mode 100644
index d8ab7135..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared/shared.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-package shared
-
-import (
- "net/http"
- "reflect"
- "strings"
-)
-
-const (
- // CacheKeySeparator is used in creating the keys of the cache.
- CacheKeySeparator = "-"
-)
-
-type Account struct {
- HomeAccountID string `json:"home_account_id,omitempty"`
- Environment string `json:"environment,omitempty"`
- Realm string `json:"realm,omitempty"`
- LocalAccountID string `json:"local_account_id,omitempty"`
- AuthorityType string `json:"authority_type,omitempty"`
- PreferredUsername string `json:"username,omitempty"`
- GivenName string `json:"given_name,omitempty"`
- FamilyName string `json:"family_name,omitempty"`
- MiddleName string `json:"middle_name,omitempty"`
- Name string `json:"name,omitempty"`
- AlternativeID string `json:"alternative_account_id,omitempty"`
- RawClientInfo string `json:"client_info,omitempty"`
- UserAssertionHash string `json:"user_assertion_hash,omitempty"`
-
- AdditionalFields map[string]interface{}
-}
-
-// NewAccount creates an account.
-func NewAccount(homeAccountID, env, realm, localAccountID, authorityType, username string) Account {
- return Account{
- HomeAccountID: homeAccountID,
- Environment: env,
- Realm: realm,
- LocalAccountID: localAccountID,
- AuthorityType: authorityType,
- PreferredUsername: username,
- }
-}
-
-// Key creates the key for storing accounts in the cache.
-func (acc Account) Key() string {
- key := strings.Join([]string{acc.HomeAccountID, acc.Environment, acc.Realm}, CacheKeySeparator)
- return strings.ToLower(key)
-}
-
-// IsZero checks the zero value of account.
-func (acc Account) IsZero() bool {
- v := reflect.ValueOf(acc)
- for i := 0; i < v.NumField(); i++ {
- field := v.Field(i)
- if !field.IsZero() {
- switch field.Kind() {
- case reflect.Map, reflect.Slice:
- if field.Len() == 0 {
- continue
- }
- }
- return false
- }
- }
- return true
-}
-
-// DefaultClient is our default shared HTTP client.
-var DefaultClient = &http.Client{}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go
deleted file mode 100644
index eb16b405..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-// Package version keeps the version number of the client package.
-package version
-
-// Version is the version of this client package that is communicated to the server.
-const Version = "1.2.0"
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go
deleted file mode 100644
index 392e5e43..00000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go
+++ /dev/null
@@ -1,756 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-/*
-Package public provides a client for authentication of "public" applications. A "public"
-application is defined as an app that runs on client devices (android, ios, windows, linux, ...).
-These devices are "untrusted" and access resources via web APIs that must authenticate.
-*/
-package public
-
-/*
-Design note:
-
-public.Client uses client.Base as an embedded type. client.Base statically assigns its attributes
-during creation. As it doesn't have any pointers in it, anything borrowed from it, such as
-Base.AuthParams is a copy that is free to be manipulated here.
-*/
-
-// TODO(msal): This should have example code for each method on client using Go's example doc framework.
-// base usage details should be includee in the package documentation.
-
-import (
- "context"
- "crypto/rand"
- "crypto/sha256"
- "encoding/base64"
- "errors"
- "fmt"
- "net/url"
- "reflect"
- "strconv"
-
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/options"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared"
- "github.com/google/uuid"
- "github.com/pkg/browser"
-)
-
-// AuthResult contains the results of one token acquisition operation.
-// For details see https://aka.ms/msal-net-authenticationresult
-type AuthResult = base.AuthResult
-
-type AuthenticationScheme = authority.AuthenticationScheme
-
-type Account = shared.Account
-
-var errNoAccount = errors.New("no account was specified with public.WithSilentAccount(), or the specified account is invalid")
-
-// clientOptions configures the Client's behavior.
-type clientOptions struct {
- accessor cache.ExportReplace
- authority string
- capabilities []string
- disableInstanceDiscovery bool
- httpClient ops.HTTPClient
-}
-
-func (p *clientOptions) validate() error {
- u, err := url.Parse(p.authority)
- if err != nil {
- return fmt.Errorf("Authority options cannot be URL parsed: %w", err)
- }
- if u.Scheme != "https" {
- return fmt.Errorf("Authority(%s) did not start with https://", u.String())
- }
- return nil
-}
-
-// Option is an optional argument to the New constructor.
-type Option func(o *clientOptions)
-
-// WithAuthority allows for a custom authority to be set. This must be a valid https url.
-func WithAuthority(authority string) Option {
- return func(o *clientOptions) {
- o.authority = authority
- }
-}
-
-// WithCache provides an accessor that will read and write authentication data to an externally managed cache.
-func WithCache(accessor cache.ExportReplace) Option {
- return func(o *clientOptions) {
- o.accessor = accessor
- }
-}
-
-// WithClientCapabilities allows configuring one or more client capabilities such as "CP1"
-func WithClientCapabilities(capabilities []string) Option {
- return func(o *clientOptions) {
- // there's no danger of sharing the slice's underlying memory with the application because
- // this slice is simply passed to base.WithClientCapabilities, which copies its data
- o.capabilities = capabilities
- }
-}
-
-// WithHTTPClient allows for a custom HTTP client to be set.
-func WithHTTPClient(httpClient ops.HTTPClient) Option {
- return func(o *clientOptions) {
- o.httpClient = httpClient
- }
-}
-
-// WithInstanceDiscovery set to false to disable authority validation (to support private cloud scenarios)
-func WithInstanceDiscovery(enabled bool) Option {
- return func(o *clientOptions) {
- o.disableInstanceDiscovery = !enabled
- }
-}
-
-// Client is a representation of authentication client for public applications as defined in the
-// package doc. For more information, visit https://docs.microsoft.com/azure/active-directory/develop/msal-client-applications.
-type Client struct {
- base base.Client
-}
-
-// New is the constructor for Client.
-func New(clientID string, options ...Option) (Client, error) {
- opts := clientOptions{
- authority: base.AuthorityPublicCloud,
- httpClient: shared.DefaultClient,
- }
-
- for _, o := range options {
- o(&opts)
- }
- if err := opts.validate(); err != nil {
- return Client{}, err
- }
-
- base, err := base.New(clientID, opts.authority, oauth.New(opts.httpClient), base.WithCacheAccessor(opts.accessor), base.WithClientCapabilities(opts.capabilities), base.WithInstanceDiscovery(!opts.disableInstanceDiscovery))
- if err != nil {
- return Client{}, err
- }
- return Client{base}, nil
-}
-
-// authCodeURLOptions contains options for AuthCodeURL
-type authCodeURLOptions struct {
- claims, loginHint, tenantID, domainHint string
-}
-
-// AuthCodeURLOption is implemented by options for AuthCodeURL
-type AuthCodeURLOption interface {
- authCodeURLOption()
-}
-
-// AuthCodeURL creates a URL used to acquire an authorization code.
-//
-// Options: [WithClaims], [WithDomainHint], [WithLoginHint], [WithTenantID]
-func (pca Client) AuthCodeURL(ctx context.Context, clientID, redirectURI string, scopes []string, opts ...AuthCodeURLOption) (string, error) {
- o := authCodeURLOptions{}
- if err := options.ApplyOptions(&o, opts); err != nil {
- return "", err
- }
- ap, err := pca.base.AuthParams.WithTenant(o.tenantID)
- if err != nil {
- return "", err
- }
- ap.Claims = o.claims
- ap.LoginHint = o.loginHint
- ap.DomainHint = o.domainHint
- return pca.base.AuthCodeURL(ctx, clientID, redirectURI, scopes, ap)
-}
-
-// WithClaims sets additional claims to request for the token, such as those required by conditional access policies.
-// Use this option when Azure AD returned a claims challenge for a prior request. The argument must be decoded.
-// This option is valid for any token acquisition method.
-func WithClaims(claims string) interface {
- AcquireByAuthCodeOption
- AcquireByDeviceCodeOption
- AcquireByUsernamePasswordOption
- AcquireInteractiveOption
- AcquireSilentOption
- AuthCodeURLOption
- options.CallOption
-} {
- return struct {
- AcquireByAuthCodeOption
- AcquireByDeviceCodeOption
- AcquireByUsernamePasswordOption
- AcquireInteractiveOption
- AcquireSilentOption
- AuthCodeURLOption
- options.CallOption
- }{
- CallOption: options.NewCallOption(
- func(a any) error {
- switch t := a.(type) {
- case *acquireTokenByAuthCodeOptions:
- t.claims = claims
- case *acquireTokenByDeviceCodeOptions:
- t.claims = claims
- case *acquireTokenByUsernamePasswordOptions:
- t.claims = claims
- case *acquireTokenSilentOptions:
- t.claims = claims
- case *authCodeURLOptions:
- t.claims = claims
- case *interactiveAuthOptions:
- t.claims = claims
- default:
- return fmt.Errorf("unexpected options type %T", a)
- }
- return nil
- },
- ),
- }
-}
-
-// WithAuthenticationScheme is an extensibility mechanism designed to be used only by Azure Arc for proof of possession access tokens.
-func WithAuthenticationScheme(authnScheme AuthenticationScheme) interface {
- AcquireSilentOption
- AcquireInteractiveOption
- AcquireByUsernamePasswordOption
- options.CallOption
-} {
- return struct {
- AcquireSilentOption
- AcquireInteractiveOption
- AcquireByUsernamePasswordOption
- options.CallOption
- }{
- CallOption: options.NewCallOption(
- func(a any) error {
- switch t := a.(type) {
- case *acquireTokenSilentOptions:
- t.authnScheme = authnScheme
- case *interactiveAuthOptions:
- t.authnScheme = authnScheme
- case *acquireTokenByUsernamePasswordOptions:
- t.authnScheme = authnScheme
- default:
- return fmt.Errorf("unexpected options type %T", a)
- }
- return nil
- },
- ),
- }
-}
-
-// WithTenantID specifies a tenant for a single authentication. It may be different than the tenant set in [New] by [WithAuthority].
-// This option is valid for any token acquisition method.
-func WithTenantID(tenantID string) interface {
- AcquireByAuthCodeOption
- AcquireByDeviceCodeOption
- AcquireByUsernamePasswordOption
- AcquireInteractiveOption
- AcquireSilentOption
- AuthCodeURLOption
- options.CallOption
-} {
- return struct {
- AcquireByAuthCodeOption
- AcquireByDeviceCodeOption
- AcquireByUsernamePasswordOption
- AcquireInteractiveOption
- AcquireSilentOption
- AuthCodeURLOption
- options.CallOption
- }{
- CallOption: options.NewCallOption(
- func(a any) error {
- switch t := a.(type) {
- case *acquireTokenByAuthCodeOptions:
- t.tenantID = tenantID
- case *acquireTokenByDeviceCodeOptions:
- t.tenantID = tenantID
- case *acquireTokenByUsernamePasswordOptions:
- t.tenantID = tenantID
- case *acquireTokenSilentOptions:
- t.tenantID = tenantID
- case *authCodeURLOptions:
- t.tenantID = tenantID
- case *interactiveAuthOptions:
- t.tenantID = tenantID
- default:
- return fmt.Errorf("unexpected options type %T", a)
- }
- return nil
- },
- ),
- }
-}
-
-// acquireTokenSilentOptions are all the optional settings to an AcquireTokenSilent() call.
-// These are set by using various AcquireTokenSilentOption functions.
-type acquireTokenSilentOptions struct {
- account Account
- claims, tenantID string
- authnScheme AuthenticationScheme
-}
-
-// AcquireSilentOption is implemented by options for AcquireTokenSilent
-type AcquireSilentOption interface {
- acquireSilentOption()
-}
-
-// WithSilentAccount uses the passed account during an AcquireTokenSilent() call.
-func WithSilentAccount(account Account) interface {
- AcquireSilentOption
- options.CallOption
-} {
- return struct {
- AcquireSilentOption
- options.CallOption
- }{
- CallOption: options.NewCallOption(
- func(a any) error {
- switch t := a.(type) {
- case *acquireTokenSilentOptions:
- t.account = account
- default:
- return fmt.Errorf("unexpected options type %T", a)
- }
- return nil
- },
- ),
- }
-}
-
-// AcquireTokenSilent acquires a token from either the cache or using a refresh token.
-//
-// Options: [WithClaims], [WithSilentAccount], [WithTenantID]
-func (pca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts ...AcquireSilentOption) (AuthResult, error) {
- o := acquireTokenSilentOptions{}
- if err := options.ApplyOptions(&o, opts); err != nil {
- return AuthResult{}, err
- }
- // an account is required to find user tokens in the cache
- if reflect.ValueOf(o.account).IsZero() {
- return AuthResult{}, errNoAccount
- }
-
- silentParameters := base.AcquireTokenSilentParameters{
- Scopes: scopes,
- Account: o.account,
- Claims: o.claims,
- RequestType: accesstokens.ATPublic,
- IsAppCache: false,
- TenantID: o.tenantID,
- AuthnScheme: o.authnScheme,
- }
-
- return pca.base.AcquireTokenSilent(ctx, silentParameters)
-}
-
-// acquireTokenByUsernamePasswordOptions contains optional configuration for AcquireTokenByUsernamePassword
-type acquireTokenByUsernamePasswordOptions struct {
- claims, tenantID string
- authnScheme AuthenticationScheme
-}
-
-// AcquireByUsernamePasswordOption is implemented by options for AcquireTokenByUsernamePassword
-type AcquireByUsernamePasswordOption interface {
- acquireByUsernamePasswordOption()
-}
-
-// AcquireTokenByUsernamePassword acquires a security token from the authority, via Username/Password Authentication.
-// NOTE: this flow is NOT recommended.
-//
-// Options: [WithClaims], [WithTenantID]
-func (pca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username, password string, opts ...AcquireByUsernamePasswordOption) (AuthResult, error) {
- o := acquireTokenByUsernamePasswordOptions{}
- if err := options.ApplyOptions(&o, opts); err != nil {
- return AuthResult{}, err
- }
- authParams, err := pca.base.AuthParams.WithTenant(o.tenantID)
- if err != nil {
- return AuthResult{}, err
- }
- authParams.Scopes = scopes
- authParams.AuthorizationType = authority.ATUsernamePassword
- authParams.Claims = o.claims
- authParams.Username = username
- authParams.Password = password
- if o.authnScheme != nil {
- authParams.AuthnScheme = o.authnScheme
- }
-
- token, err := pca.base.Token.UsernamePassword(ctx, authParams)
- if err != nil {
- return AuthResult{}, err
- }
- return pca.base.AuthResultFromToken(ctx, authParams, token, true)
-}
-
-type DeviceCodeResult = accesstokens.DeviceCodeResult
-
-// DeviceCode provides the results of the device code flows first stage (containing the code)
-// that must be entered on the second device and provides a method to retrieve the AuthenticationResult
-// once that code has been entered and verified.
-type DeviceCode struct {
- // Result holds the information about the device code (such as the code).
- Result DeviceCodeResult
-
- authParams authority.AuthParams
- client Client
- dc oauth.DeviceCode
-}
-
-// AuthenticationResult retreives the AuthenticationResult once the user enters the code
-// on the second device. Until then it blocks until the .AcquireTokenByDeviceCode() context
-// is cancelled or the token expires.
-func (d DeviceCode) AuthenticationResult(ctx context.Context) (AuthResult, error) {
- token, err := d.dc.Token(ctx)
- if err != nil {
- return AuthResult{}, err
- }
- return d.client.base.AuthResultFromToken(ctx, d.authParams, token, true)
-}
-
-// acquireTokenByDeviceCodeOptions contains optional configuration for AcquireTokenByDeviceCode
-type acquireTokenByDeviceCodeOptions struct {
- claims, tenantID string
-}
-
-// AcquireByDeviceCodeOption is implemented by options for AcquireTokenByDeviceCode
-type AcquireByDeviceCodeOption interface {
- acquireByDeviceCodeOptions()
-}
-
-// AcquireTokenByDeviceCode acquires a security token from the authority, by acquiring a device code and using that to acquire the token.
-// Users need to create an AcquireTokenDeviceCodeParameters instance and pass it in.
-//
-// Options: [WithClaims], [WithTenantID]
-func (pca Client) AcquireTokenByDeviceCode(ctx context.Context, scopes []string, opts ...AcquireByDeviceCodeOption) (DeviceCode, error) {
- o := acquireTokenByDeviceCodeOptions{}
- if err := options.ApplyOptions(&o, opts); err != nil {
- return DeviceCode{}, err
- }
- authParams, err := pca.base.AuthParams.WithTenant(o.tenantID)
- if err != nil {
- return DeviceCode{}, err
- }
- authParams.Scopes = scopes
- authParams.AuthorizationType = authority.ATDeviceCode
- authParams.Claims = o.claims
-
- dc, err := pca.base.Token.DeviceCode(ctx, authParams)
- if err != nil {
- return DeviceCode{}, err
- }
-
- return DeviceCode{Result: dc.Result, authParams: authParams, client: pca, dc: dc}, nil
-}
-
-// acquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow.
-type acquireTokenByAuthCodeOptions struct {
- challenge, claims, tenantID string
-}
-
-// AcquireByAuthCodeOption is implemented by options for AcquireTokenByAuthCode
-type AcquireByAuthCodeOption interface {
- acquireByAuthCodeOption()
-}
-
-// WithChallenge allows you to provide a code for the .AcquireTokenByAuthCode() call.
-func WithChallenge(challenge string) interface {
- AcquireByAuthCodeOption
- options.CallOption
-} {
- return struct {
- AcquireByAuthCodeOption
- options.CallOption
- }{
- CallOption: options.NewCallOption(
- func(a any) error {
- switch t := a.(type) {
- case *acquireTokenByAuthCodeOptions:
- t.challenge = challenge
- default:
- return fmt.Errorf("unexpected options type %T", a)
- }
- return nil
- },
- ),
- }
-}
-
-// AcquireTokenByAuthCode is a request to acquire a security token from the authority, using an authorization code.
-// The specified redirect URI must be the same URI that was used when the authorization code was requested.
-//
-// Options: [WithChallenge], [WithClaims], [WithTenantID]
-func (pca Client) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, opts ...AcquireByAuthCodeOption) (AuthResult, error) {
- o := acquireTokenByAuthCodeOptions{}
- if err := options.ApplyOptions(&o, opts); err != nil {
- return AuthResult{}, err
- }
-
- params := base.AcquireTokenAuthCodeParameters{
- Scopes: scopes,
- Code: code,
- Challenge: o.challenge,
- Claims: o.claims,
- AppType: accesstokens.ATPublic,
- RedirectURI: redirectURI,
- TenantID: o.tenantID,
- }
-
- return pca.base.AcquireTokenByAuthCode(ctx, params)
-}
-
-// Accounts gets all the accounts in the token cache.
-// If there are no accounts in the cache the returned slice is empty.
-func (pca Client) Accounts(ctx context.Context) ([]Account, error) {
- return pca.base.AllAccounts(ctx)
-}
-
-// RemoveAccount signs the account out and forgets account from token cache.
-func (pca Client) RemoveAccount(ctx context.Context, account Account) error {
- return pca.base.RemoveAccount(ctx, account)
-}
-
-// interactiveAuthOptions contains the optional parameters used to acquire an access token for interactive auth code flow.
-type interactiveAuthOptions struct {
- claims, domainHint, loginHint, redirectURI, tenantID string
- openURL func(url string) error
- authnScheme AuthenticationScheme
-}
-
-// AcquireInteractiveOption is implemented by options for AcquireTokenInteractive
-type AcquireInteractiveOption interface {
- acquireInteractiveOption()
-}
-
-// WithLoginHint pre-populates the login prompt with a username.
-func WithLoginHint(username string) interface {
- AcquireInteractiveOption
- AuthCodeURLOption
- options.CallOption
-} {
- return struct {
- AcquireInteractiveOption
- AuthCodeURLOption
- options.CallOption
- }{
- CallOption: options.NewCallOption(
- func(a any) error {
- switch t := a.(type) {
- case *authCodeURLOptions:
- t.loginHint = username
- case *interactiveAuthOptions:
- t.loginHint = username
- default:
- return fmt.Errorf("unexpected options type %T", a)
- }
- return nil
- },
- ),
- }
-}
-
-// WithDomainHint adds the IdP domain as domain_hint query parameter in the auth url.
-func WithDomainHint(domain string) interface {
- AcquireInteractiveOption
- AuthCodeURLOption
- options.CallOption
-} {
- return struct {
- AcquireInteractiveOption
- AuthCodeURLOption
- options.CallOption
- }{
- CallOption: options.NewCallOption(
- func(a any) error {
- switch t := a.(type) {
- case *authCodeURLOptions:
- t.domainHint = domain
- case *interactiveAuthOptions:
- t.domainHint = domain
- default:
- return fmt.Errorf("unexpected options type %T", a)
- }
- return nil
- },
- ),
- }
-}
-
-// WithRedirectURI sets a port for the local server used in interactive authentication, for
-// example http://localhost:port. All URI components other than the port are ignored.
-func WithRedirectURI(redirectURI string) interface {
- AcquireInteractiveOption
- options.CallOption
-} {
- return struct {
- AcquireInteractiveOption
- options.CallOption
- }{
- CallOption: options.NewCallOption(
- func(a any) error {
- switch t := a.(type) {
- case *interactiveAuthOptions:
- t.redirectURI = redirectURI
- default:
- return fmt.Errorf("unexpected options type %T", a)
- }
- return nil
- },
- ),
- }
-}
-
-// WithOpenURL allows you to provide a function to open the browser to complete the interactive login, instead of launching the system default browser.
-func WithOpenURL(openURL func(url string) error) interface {
- AcquireInteractiveOption
- options.CallOption
-} {
- return struct {
- AcquireInteractiveOption
- options.CallOption
- }{
- CallOption: options.NewCallOption(
- func(a any) error {
- switch t := a.(type) {
- case *interactiveAuthOptions:
- t.openURL = openURL
- default:
- return fmt.Errorf("unexpected options type %T", a)
- }
- return nil
- },
- ),
- }
-}
-
-// AcquireTokenInteractive acquires a security token from the authority using the default web browser to select the account.
-// https://docs.microsoft.com/en-us/azure/active-directory/develop/msal-authentication-flows#interactive-and-non-interactive-authentication
-//
-// Options: [WithDomainHint], [WithLoginHint], [WithOpenURL], [WithRedirectURI], [WithTenantID]
-func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string, opts ...AcquireInteractiveOption) (AuthResult, error) {
- o := interactiveAuthOptions{}
- if err := options.ApplyOptions(&o, opts); err != nil {
- return AuthResult{}, err
- }
- // the code verifier is a random 32-byte sequence that's been base-64 encoded without padding.
- // it's used to prevent MitM attacks during auth code flow, see https://tools.ietf.org/html/rfc7636
- cv, challenge, err := codeVerifier()
- if err != nil {
- return AuthResult{}, err
- }
- var redirectURL *url.URL
- if o.redirectURI != "" {
- redirectURL, err = url.Parse(o.redirectURI)
- if err != nil {
- return AuthResult{}, err
- }
- }
- if o.openURL == nil {
- o.openURL = browser.OpenURL
- }
- authParams, err := pca.base.AuthParams.WithTenant(o.tenantID)
- if err != nil {
- return AuthResult{}, err
- }
- authParams.Scopes = scopes
- authParams.AuthorizationType = authority.ATInteractive
- authParams.Claims = o.claims
- authParams.CodeChallenge = challenge
- authParams.CodeChallengeMethod = "S256"
- authParams.LoginHint = o.loginHint
- authParams.DomainHint = o.domainHint
- authParams.State = uuid.New().String()
- authParams.Prompt = "select_account"
- if o.authnScheme != nil {
- authParams.AuthnScheme = o.authnScheme
- }
- res, err := pca.browserLogin(ctx, redirectURL, authParams, o.openURL)
- if err != nil {
- return AuthResult{}, err
- }
- authParams.Redirecturi = res.redirectURI
-
- req, err := accesstokens.NewCodeChallengeRequest(authParams, accesstokens.ATPublic, nil, res.authCode, cv)
- if err != nil {
- return AuthResult{}, err
- }
-
- token, err := pca.base.Token.AuthCode(ctx, req)
- if err != nil {
- return AuthResult{}, err
- }
-
- return pca.base.AuthResultFromToken(ctx, authParams, token, true)
-}
-
-type interactiveAuthResult struct {
- authCode string
- redirectURI string
-}
-
-// parses the port number from the provided URL.
-// returns 0 if nil or no port is specified.
-func parsePort(u *url.URL) (int, error) {
- if u == nil {
- return 0, nil
- }
- p := u.Port()
- if p == "" {
- return 0, nil
- }
- return strconv.Atoi(p)
-}
-
-// browserLogin calls openURL and waits for a user to log in
-func (pca Client) browserLogin(ctx context.Context, redirectURI *url.URL, params authority.AuthParams, openURL func(string) error) (interactiveAuthResult, error) {
- // start local redirect server so login can call us back
- port, err := parsePort(redirectURI)
- if err != nil {
- return interactiveAuthResult{}, err
- }
- srv, err := local.New(params.State, port)
- if err != nil {
- return interactiveAuthResult{}, err
- }
- defer srv.Shutdown()
- params.Scopes = accesstokens.AppendDefaultScopes(params)
- authURL, err := pca.base.AuthCodeURL(ctx, params.ClientID, srv.Addr, params.Scopes, params)
- if err != nil {
- return interactiveAuthResult{}, err
- }
- // open browser window so user can select credentials
- if err := openURL(authURL); err != nil {
- return interactiveAuthResult{}, err
- }
- // now wait until the logic calls us back
- res := srv.Result(ctx)
- if res.Err != nil {
- return interactiveAuthResult{}, res.Err
- }
- return interactiveAuthResult{
- authCode: res.Code,
- redirectURI: srv.Addr,
- }, nil
-}
-
-// creates a code verifier string along with its SHA256 hash which
-// is used as the challenge when requesting an auth code.
-// used in interactive auth flow for PKCE.
-func codeVerifier() (codeVerifier string, challenge string, err error) {
- cvBytes := make([]byte, 32)
- if _, err = rand.Read(cvBytes); err != nil {
- return
- }
- codeVerifier = base64.RawURLEncoding.EncodeToString(cvBytes)
- // for PKCE, create a hash of the code verifier
- cvh := sha256.Sum256([]byte(codeVerifier))
- challenge = base64.RawURLEncoding.EncodeToString(cvh[:])
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/AUTHORS b/vendor/github.com/ProtonMail/go-crypto/AUTHORS
deleted file mode 100644
index 2b00ddba..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/AUTHORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code refers to The Go Authors for copyright purposes.
-# The master list of authors is in the main Go distribution,
-# visible at https://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS b/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS
deleted file mode 100644
index 1fbd3e97..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code was written by the Go contributors.
-# The master list of contributors is in the main Go distribution,
-# visible at https://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/ProtonMail/go-crypto/LICENSE b/vendor/github.com/ProtonMail/go-crypto/LICENSE
deleted file mode 100644
index 6a66aea5..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/ProtonMail/go-crypto/PATENTS b/vendor/github.com/ProtonMail/go-crypto/PATENTS
deleted file mode 100644
index 73309904..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/PATENTS
+++ /dev/null
@@ -1,22 +0,0 @@
-Additional IP Rights Grant (Patents)
-
-"This implementation" means the copyrightable works distributed by
-Google as part of the Go project.
-
-Google hereby grants to You a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer and otherwise run, modify and propagate the contents of this
-implementation of Go, where such license applies only to those patent
-claims, both currently owned or controlled by Google and acquired in
-the future, licensable by Google that are necessarily infringed by this
-implementation of Go. This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation. If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of Go or any code incorporated within this
-implementation of Go constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of Go
-shall terminate as of the date such litigation is filed.
diff --git a/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go b/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go
deleted file mode 100644
index c85e6bef..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go
+++ /dev/null
@@ -1,381 +0,0 @@
-package bitcurves
-
-// Copyright 2010 The Go Authors. All rights reserved.
-// Copyright 2011 ThePiachu. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package bitelliptic implements several Koblitz elliptic curves over prime
-// fields.
-
-// This package operates, internally, on Jacobian coordinates. For a given
-// (x, y) position on the curve, the Jacobian coordinates are (x1, y1, z1)
-// where x = x1/z1² and y = y1/z1³. The greatest speedups come when the whole
-// calculation can be performed within the transform (as in ScalarMult and
-// ScalarBaseMult). But even for Add and Double, it's faster to apply and
-// reverse the transform than to operate in affine coordinates.
-
-import (
- "crypto/elliptic"
- "io"
- "math/big"
- "sync"
-)
-
-// A BitCurve represents a Koblitz Curve with a=0.
-// See http://www.hyperelliptic.org/EFD/g1p/auto-shortw.html
-type BitCurve struct {
- Name string
- P *big.Int // the order of the underlying field
- N *big.Int // the order of the base point
- B *big.Int // the constant of the BitCurve equation
- Gx, Gy *big.Int // (x,y) of the base point
- BitSize int // the size of the underlying field
-}
-
-// Params returns the parameters of the given BitCurve (see BitCurve struct)
-func (bitCurve *BitCurve) Params() (cp *elliptic.CurveParams) {
- cp = new(elliptic.CurveParams)
- cp.Name = bitCurve.Name
- cp.P = bitCurve.P
- cp.N = bitCurve.N
- cp.Gx = bitCurve.Gx
- cp.Gy = bitCurve.Gy
- cp.BitSize = bitCurve.BitSize
- return cp
-}
-
-// IsOnCurve returns true if the given (x,y) lies on the BitCurve.
-func (bitCurve *BitCurve) IsOnCurve(x, y *big.Int) bool {
- // y² = x³ + b
- y2 := new(big.Int).Mul(y, y) //y²
- y2.Mod(y2, bitCurve.P) //y²%P
-
- x3 := new(big.Int).Mul(x, x) //x²
- x3.Mul(x3, x) //x³
-
- x3.Add(x3, bitCurve.B) //x³+B
- x3.Mod(x3, bitCurve.P) //(x³+B)%P
-
- return x3.Cmp(y2) == 0
-}
-
-// affineFromJacobian reverses the Jacobian transform. See the comment at the
-// top of the file.
-func (bitCurve *BitCurve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) {
- if z.Cmp(big.NewInt(0)) == 0 {
- panic("bitcurve: Can't convert to affine with Jacobian Z = 0")
- }
- // x = YZ^2 mod P
- zinv := new(big.Int).ModInverse(z, bitCurve.P)
- zinvsq := new(big.Int).Mul(zinv, zinv)
-
- xOut = new(big.Int).Mul(x, zinvsq)
- xOut.Mod(xOut, bitCurve.P)
- // y = YZ^3 mod P
- zinvsq.Mul(zinvsq, zinv)
- yOut = new(big.Int).Mul(y, zinvsq)
- yOut.Mod(yOut, bitCurve.P)
- return xOut, yOut
-}
-
-// Add returns the sum of (x1,y1) and (x2,y2)
-func (bitCurve *BitCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {
- z := new(big.Int).SetInt64(1)
- x, y, z := bitCurve.addJacobian(x1, y1, z, x2, y2, z)
- return bitCurve.affineFromJacobian(x, y, z)
-}
-
-// addJacobian takes two points in Jacobian coordinates, (x1, y1, z1) and
-// (x2, y2, z2) and returns their sum, also in Jacobian form.
-func (bitCurve *BitCurve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) {
- // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl
- z1z1 := new(big.Int).Mul(z1, z1)
- z1z1.Mod(z1z1, bitCurve.P)
- z2z2 := new(big.Int).Mul(z2, z2)
- z2z2.Mod(z2z2, bitCurve.P)
-
- u1 := new(big.Int).Mul(x1, z2z2)
- u1.Mod(u1, bitCurve.P)
- u2 := new(big.Int).Mul(x2, z1z1)
- u2.Mod(u2, bitCurve.P)
- h := new(big.Int).Sub(u2, u1)
- if h.Sign() == -1 {
- h.Add(h, bitCurve.P)
- }
- i := new(big.Int).Lsh(h, 1)
- i.Mul(i, i)
- j := new(big.Int).Mul(h, i)
-
- s1 := new(big.Int).Mul(y1, z2)
- s1.Mul(s1, z2z2)
- s1.Mod(s1, bitCurve.P)
- s2 := new(big.Int).Mul(y2, z1)
- s2.Mul(s2, z1z1)
- s2.Mod(s2, bitCurve.P)
- r := new(big.Int).Sub(s2, s1)
- if r.Sign() == -1 {
- r.Add(r, bitCurve.P)
- }
- r.Lsh(r, 1)
- v := new(big.Int).Mul(u1, i)
-
- x3 := new(big.Int).Set(r)
- x3.Mul(x3, x3)
- x3.Sub(x3, j)
- x3.Sub(x3, v)
- x3.Sub(x3, v)
- x3.Mod(x3, bitCurve.P)
-
- y3 := new(big.Int).Set(r)
- v.Sub(v, x3)
- y3.Mul(y3, v)
- s1.Mul(s1, j)
- s1.Lsh(s1, 1)
- y3.Sub(y3, s1)
- y3.Mod(y3, bitCurve.P)
-
- z3 := new(big.Int).Add(z1, z2)
- z3.Mul(z3, z3)
- z3.Sub(z3, z1z1)
- if z3.Sign() == -1 {
- z3.Add(z3, bitCurve.P)
- }
- z3.Sub(z3, z2z2)
- if z3.Sign() == -1 {
- z3.Add(z3, bitCurve.P)
- }
- z3.Mul(z3, h)
- z3.Mod(z3, bitCurve.P)
-
- return x3, y3, z3
-}
-
-// Double returns 2*(x,y)
-func (bitCurve *BitCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {
- z1 := new(big.Int).SetInt64(1)
- return bitCurve.affineFromJacobian(bitCurve.doubleJacobian(x1, y1, z1))
-}
-
-// doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and
-// returns its double, also in Jacobian form.
-func (bitCurve *BitCurve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) {
- // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l
-
- a := new(big.Int).Mul(x, x) //X1²
- b := new(big.Int).Mul(y, y) //Y1²
- c := new(big.Int).Mul(b, b) //B²
-
- d := new(big.Int).Add(x, b) //X1+B
- d.Mul(d, d) //(X1+B)²
- d.Sub(d, a) //(X1+B)²-A
- d.Sub(d, c) //(X1+B)²-A-C
- d.Mul(d, big.NewInt(2)) //2*((X1+B)²-A-C)
-
- e := new(big.Int).Mul(big.NewInt(3), a) //3*A
- f := new(big.Int).Mul(e, e) //E²
-
- x3 := new(big.Int).Mul(big.NewInt(2), d) //2*D
- x3.Sub(f, x3) //F-2*D
- x3.Mod(x3, bitCurve.P)
-
- y3 := new(big.Int).Sub(d, x3) //D-X3
- y3.Mul(e, y3) //E*(D-X3)
- y3.Sub(y3, new(big.Int).Mul(big.NewInt(8), c)) //E*(D-X3)-8*C
- y3.Mod(y3, bitCurve.P)
-
- z3 := new(big.Int).Mul(y, z) //Y1*Z1
- z3.Mul(big.NewInt(2), z3) //3*Y1*Z1
- z3.Mod(z3, bitCurve.P)
-
- return x3, y3, z3
-}
-
-// TODO: double check if it is okay
-// ScalarMult returns k*(Bx,By) where k is a number in big-endian form.
-func (bitCurve *BitCurve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) {
- // We have a slight problem in that the identity of the group (the
- // point at infinity) cannot be represented in (x, y) form on a finite
- // machine. Thus the standard add/double algorithm has to be tweaked
- // slightly: our initial state is not the identity, but x, and we
- // ignore the first true bit in |k|. If we don't find any true bits in
- // |k|, then we return nil, nil, because we cannot return the identity
- // element.
-
- Bz := new(big.Int).SetInt64(1)
- x := Bx
- y := By
- z := Bz
-
- seenFirstTrue := false
- for _, byte := range k {
- for bitNum := 0; bitNum < 8; bitNum++ {
- if seenFirstTrue {
- x, y, z = bitCurve.doubleJacobian(x, y, z)
- }
- if byte&0x80 == 0x80 {
- if !seenFirstTrue {
- seenFirstTrue = true
- } else {
- x, y, z = bitCurve.addJacobian(Bx, By, Bz, x, y, z)
- }
- }
- byte <<= 1
- }
- }
-
- if !seenFirstTrue {
- return nil, nil
- }
-
- return bitCurve.affineFromJacobian(x, y, z)
-}
-
-// ScalarBaseMult returns k*G, where G is the base point of the group and k is
-// an integer in big-endian form.
-func (bitCurve *BitCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {
- return bitCurve.ScalarMult(bitCurve.Gx, bitCurve.Gy, k)
-}
-
-var mask = []byte{0xff, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f}
-
-// TODO: double check if it is okay
-// GenerateKey returns a public/private key pair. The private key is generated
-// using the given reader, which must return random data.
-func (bitCurve *BitCurve) GenerateKey(rand io.Reader) (priv []byte, x, y *big.Int, err error) {
- byteLen := (bitCurve.BitSize + 7) >> 3
- priv = make([]byte, byteLen)
-
- for x == nil {
- _, err = io.ReadFull(rand, priv)
- if err != nil {
- return
- }
- // We have to mask off any excess bits in the case that the size of the
- // underlying field is not a whole number of bytes.
- priv[0] &= mask[bitCurve.BitSize%8]
- // This is because, in tests, rand will return all zeros and we don't
- // want to get the point at infinity and loop forever.
- priv[1] ^= 0x42
- x, y = bitCurve.ScalarBaseMult(priv)
- }
- return
-}
-
-// Marshal converts a point into the form specified in section 4.3.6 of ANSI
-// X9.62.
-func (bitCurve *BitCurve) Marshal(x, y *big.Int) []byte {
- byteLen := (bitCurve.BitSize + 7) >> 3
-
- ret := make([]byte, 1+2*byteLen)
- ret[0] = 4 // uncompressed point
-
- xBytes := x.Bytes()
- copy(ret[1+byteLen-len(xBytes):], xBytes)
- yBytes := y.Bytes()
- copy(ret[1+2*byteLen-len(yBytes):], yBytes)
- return ret
-}
-
-// Unmarshal converts a point, serialised by Marshal, into an x, y pair. On
-// error, x = nil.
-func (bitCurve *BitCurve) Unmarshal(data []byte) (x, y *big.Int) {
- byteLen := (bitCurve.BitSize + 7) >> 3
- if len(data) != 1+2*byteLen {
- return
- }
- if data[0] != 4 { // uncompressed form
- return
- }
- x = new(big.Int).SetBytes(data[1 : 1+byteLen])
- y = new(big.Int).SetBytes(data[1+byteLen:])
- return
-}
-
-//curve parameters taken from:
-//http://www.secg.org/collateral/sec2_final.pdf
-
-var initonce sync.Once
-var secp160k1 *BitCurve
-var secp192k1 *BitCurve
-var secp224k1 *BitCurve
-var secp256k1 *BitCurve
-
-func initAll() {
- initS160()
- initS192()
- initS224()
- initS256()
-}
-
-func initS160() {
- // See SEC 2 section 2.4.1
- secp160k1 = new(BitCurve)
- secp160k1.Name = "secp160k1"
- secp160k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73", 16)
- secp160k1.N, _ = new(big.Int).SetString("0100000000000000000001B8FA16DFAB9ACA16B6B3", 16)
- secp160k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000007", 16)
- secp160k1.Gx, _ = new(big.Int).SetString("3B4C382CE37AA192A4019E763036F4F5DD4D7EBB", 16)
- secp160k1.Gy, _ = new(big.Int).SetString("938CF935318FDCED6BC28286531733C3F03C4FEE", 16)
- secp160k1.BitSize = 160
-}
-
-func initS192() {
- // See SEC 2 section 2.5.1
- secp192k1 = new(BitCurve)
- secp192k1.Name = "secp192k1"
- secp192k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFEE37", 16)
- secp192k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFE26F2FC170F69466A74DEFD8D", 16)
- secp192k1.B, _ = new(big.Int).SetString("000000000000000000000000000000000000000000000003", 16)
- secp192k1.Gx, _ = new(big.Int).SetString("DB4FF10EC057E9AE26B07D0280B7F4341DA5D1B1EAE06C7D", 16)
- secp192k1.Gy, _ = new(big.Int).SetString("9B2F2F6D9C5628A7844163D015BE86344082AA88D95E2F9D", 16)
- secp192k1.BitSize = 192
-}
-
-func initS224() {
- // See SEC 2 section 2.6.1
- secp224k1 = new(BitCurve)
- secp224k1.Name = "secp224k1"
- secp224k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFE56D", 16)
- secp224k1.N, _ = new(big.Int).SetString("010000000000000000000000000001DCE8D2EC6184CAF0A971769FB1F7", 16)
- secp224k1.B, _ = new(big.Int).SetString("00000000000000000000000000000000000000000000000000000005", 16)
- secp224k1.Gx, _ = new(big.Int).SetString("A1455B334DF099DF30FC28A169A467E9E47075A90F7E650EB6B7A45C", 16)
- secp224k1.Gy, _ = new(big.Int).SetString("7E089FED7FBA344282CAFBD6F7E319F7C0B0BD59E2CA4BDB556D61A5", 16)
- secp224k1.BitSize = 224
-}
-
-func initS256() {
- // See SEC 2 section 2.7.1
- secp256k1 = new(BitCurve)
- secp256k1.Name = "secp256k1"
- secp256k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16)
- secp256k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16)
- secp256k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000007", 16)
- secp256k1.Gx, _ = new(big.Int).SetString("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 16)
- secp256k1.Gy, _ = new(big.Int).SetString("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 16)
- secp256k1.BitSize = 256
-}
-
-// S160 returns a BitCurve which implements secp160k1 (see SEC 2 section 2.4.1)
-func S160() *BitCurve {
- initonce.Do(initAll)
- return secp160k1
-}
-
-// S192 returns a BitCurve which implements secp192k1 (see SEC 2 section 2.5.1)
-func S192() *BitCurve {
- initonce.Do(initAll)
- return secp192k1
-}
-
-// S224 returns a BitCurve which implements secp224k1 (see SEC 2 section 2.6.1)
-func S224() *BitCurve {
- initonce.Do(initAll)
- return secp224k1
-}
-
-// S256 returns a BitCurve which implements bitcurves (see SEC 2 section 2.7.1)
-func S256() *BitCurve {
- initonce.Do(initAll)
- return secp256k1
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go b/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go
deleted file mode 100644
index cb6676de..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Package brainpool implements Brainpool elliptic curves.
-// Implementation of rcurves is from github.com/ebfe/brainpool
-// Note that these curves are implemented with naive, non-constant time operations
-// and are likely not suitable for environments where timing attacks are a concern.
-package brainpool
-
-import (
- "crypto/elliptic"
- "math/big"
- "sync"
-)
-
-var (
- once sync.Once
- p256t1, p384t1, p512t1 *elliptic.CurveParams
- p256r1, p384r1, p512r1 *rcurve
-)
-
-func initAll() {
- initP256t1()
- initP384t1()
- initP512t1()
- initP256r1()
- initP384r1()
- initP512r1()
-}
-
-func initP256t1() {
- p256t1 = &elliptic.CurveParams{Name: "brainpoolP256t1"}
- p256t1.P, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377", 16)
- p256t1.N, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7", 16)
- p256t1.B, _ = new(big.Int).SetString("662C61C430D84EA4FE66A7733D0B76B7BF93EBC4AF2F49256AE58101FEE92B04", 16)
- p256t1.Gx, _ = new(big.Int).SetString("A3E8EB3CC1CFE7B7732213B23A656149AFA142C47AAFBC2B79A191562E1305F4", 16)
- p256t1.Gy, _ = new(big.Int).SetString("2D996C823439C56D7F7B22E14644417E69BCB6DE39D027001DABE8F35B25C9BE", 16)
- p256t1.BitSize = 256
-}
-
-func initP256r1() {
- twisted := p256t1
- params := &elliptic.CurveParams{
- Name: "brainpoolP256r1",
- P: twisted.P,
- N: twisted.N,
- BitSize: twisted.BitSize,
- }
- params.Gx, _ = new(big.Int).SetString("8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262", 16)
- params.Gy, _ = new(big.Int).SetString("547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997", 16)
- z, _ := new(big.Int).SetString("3E2D4BD9597B58639AE7AA669CAB9837CF5CF20A2C852D10F655668DFC150EF0", 16)
- p256r1 = newrcurve(twisted, params, z)
-}
-
-func initP384t1() {
- p384t1 = &elliptic.CurveParams{Name: "brainpoolP384t1"}
- p384t1.P, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53", 16)
- p384t1.N, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7CF3AB6AF6B7FC3103B883202E9046565", 16)
- p384t1.B, _ = new(big.Int).SetString("7F519EADA7BDA81BD826DBA647910F8C4B9346ED8CCDC64E4B1ABD11756DCE1D2074AA263B88805CED70355A33B471EE", 16)
- p384t1.Gx, _ = new(big.Int).SetString("18DE98B02DB9A306F2AFCD7235F72A819B80AB12EBD653172476FECD462AABFFC4FF191B946A5F54D8D0AA2F418808CC", 16)
- p384t1.Gy, _ = new(big.Int).SetString("25AB056962D30651A114AFD2755AD336747F93475B7A1FCA3B88F2B6A208CCFE469408584DC2B2912675BF5B9E582928", 16)
- p384t1.BitSize = 384
-}
-
-func initP384r1() {
- twisted := p384t1
- params := &elliptic.CurveParams{
- Name: "brainpoolP384r1",
- P: twisted.P,
- N: twisted.N,
- BitSize: twisted.BitSize,
- }
- params.Gx, _ = new(big.Int).SetString("1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8E826E03436D646AAEF87B2E247D4AF1E", 16)
- params.Gy, _ = new(big.Int).SetString("8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF99129280E4646217791811142820341263C5315", 16)
- z, _ := new(big.Int).SetString("41DFE8DD399331F7166A66076734A89CD0D2BCDB7D068E44E1F378F41ECBAE97D2D63DBC87BCCDDCCC5DA39E8589291C", 16)
- p384r1 = newrcurve(twisted, params, z)
-}
-
-func initP512t1() {
- p512t1 = &elliptic.CurveParams{Name: "brainpoolP512t1"}
- p512t1.P, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3", 16)
- p512t1.N, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069", 16)
- p512t1.B, _ = new(big.Int).SetString("7CBBBCF9441CFAB76E1890E46884EAE321F70C0BCB4981527897504BEC3E36A62BCDFA2304976540F6450085F2DAE145C22553B465763689180EA2571867423E", 16)
- p512t1.Gx, _ = new(big.Int).SetString("640ECE5C12788717B9C1BA06CBC2A6FEBA85842458C56DDE9DB1758D39C0313D82BA51735CDB3EA499AA77A7D6943A64F7A3F25FE26F06B51BAA2696FA9035DA", 16)
- p512t1.Gy, _ = new(big.Int).SetString("5B534BD595F5AF0FA2C892376C84ACE1BB4E3019B71634C01131159CAE03CEE9D9932184BEEF216BD71DF2DADF86A627306ECFF96DBB8BACE198B61E00F8B332", 16)
- p512t1.BitSize = 512
-}
-
-func initP512r1() {
- twisted := p512t1
- params := &elliptic.CurveParams{
- Name: "brainpoolP512r1",
- P: twisted.P,
- N: twisted.N,
- BitSize: twisted.BitSize,
- }
- params.Gx, _ = new(big.Int).SetString("81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F822", 16)
- params.Gy, _ = new(big.Int).SetString("7DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892", 16)
- z, _ := new(big.Int).SetString("12EE58E6764838B69782136F0F2D3BA06E27695716054092E60A80BEDB212B64E585D90BCE13761F85C3F1D2A64E3BE8FEA2220F01EBA5EEB0F35DBD29D922AB", 16)
- p512r1 = newrcurve(twisted, params, z)
-}
-
-// P256t1 returns a Curve which implements Brainpool P256t1 (see RFC 5639, section 3.4)
-func P256t1() elliptic.Curve {
- once.Do(initAll)
- return p256t1
-}
-
-// P256r1 returns a Curve which implements Brainpool P256r1 (see RFC 5639, section 3.4)
-func P256r1() elliptic.Curve {
- once.Do(initAll)
- return p256r1
-}
-
-// P384t1 returns a Curve which implements Brainpool P384t1 (see RFC 5639, section 3.6)
-func P384t1() elliptic.Curve {
- once.Do(initAll)
- return p384t1
-}
-
-// P384r1 returns a Curve which implements Brainpool P384r1 (see RFC 5639, section 3.6)
-func P384r1() elliptic.Curve {
- once.Do(initAll)
- return p384r1
-}
-
-// P512t1 returns a Curve which implements Brainpool P512t1 (see RFC 5639, section 3.7)
-func P512t1() elliptic.Curve {
- once.Do(initAll)
- return p512t1
-}
-
-// P512r1 returns a Curve which implements Brainpool P512r1 (see RFC 5639, section 3.7)
-func P512r1() elliptic.Curve {
- once.Do(initAll)
- return p512r1
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go b/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go
deleted file mode 100644
index 7e291d6a..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package brainpool
-
-import (
- "crypto/elliptic"
- "math/big"
-)
-
-var _ elliptic.Curve = (*rcurve)(nil)
-
-type rcurve struct {
- twisted elliptic.Curve
- params *elliptic.CurveParams
- z *big.Int
- zinv *big.Int
- z2 *big.Int
- z3 *big.Int
- zinv2 *big.Int
- zinv3 *big.Int
-}
-
-var (
- two = big.NewInt(2)
- three = big.NewInt(3)
-)
-
-func newrcurve(twisted elliptic.Curve, params *elliptic.CurveParams, z *big.Int) *rcurve {
- zinv := new(big.Int).ModInverse(z, params.P)
- return &rcurve{
- twisted: twisted,
- params: params,
- z: z,
- zinv: zinv,
- z2: new(big.Int).Exp(z, two, params.P),
- z3: new(big.Int).Exp(z, three, params.P),
- zinv2: new(big.Int).Exp(zinv, two, params.P),
- zinv3: new(big.Int).Exp(zinv, three, params.P),
- }
-}
-
-func (curve *rcurve) toTwisted(x, y *big.Int) (*big.Int, *big.Int) {
- var tx, ty big.Int
- tx.Mul(x, curve.z2)
- tx.Mod(&tx, curve.params.P)
- ty.Mul(y, curve.z3)
- ty.Mod(&ty, curve.params.P)
- return &tx, &ty
-}
-
-func (curve *rcurve) fromTwisted(tx, ty *big.Int) (*big.Int, *big.Int) {
- var x, y big.Int
- x.Mul(tx, curve.zinv2)
- x.Mod(&x, curve.params.P)
- y.Mul(ty, curve.zinv3)
- y.Mod(&y, curve.params.P)
- return &x, &y
-}
-
-func (curve *rcurve) Params() *elliptic.CurveParams {
- return curve.params
-}
-
-func (curve *rcurve) IsOnCurve(x, y *big.Int) bool {
- return curve.twisted.IsOnCurve(curve.toTwisted(x, y))
-}
-
-func (curve *rcurve) Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int) {
- tx1, ty1 := curve.toTwisted(x1, y1)
- tx2, ty2 := curve.toTwisted(x2, y2)
- return curve.fromTwisted(curve.twisted.Add(tx1, ty1, tx2, ty2))
-}
-
-func (curve *rcurve) Double(x1, y1 *big.Int) (x, y *big.Int) {
- return curve.fromTwisted(curve.twisted.Double(curve.toTwisted(x1, y1)))
-}
-
-func (curve *rcurve) ScalarMult(x1, y1 *big.Int, scalar []byte) (x, y *big.Int) {
- tx1, ty1 := curve.toTwisted(x1, y1)
- return curve.fromTwisted(curve.twisted.ScalarMult(tx1, ty1, scalar))
-}
-
-func (curve *rcurve) ScalarBaseMult(scalar []byte) (x, y *big.Int) {
- return curve.fromTwisted(curve.twisted.ScalarBaseMult(scalar))
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/eax.go b/vendor/github.com/ProtonMail/go-crypto/eax/eax.go
deleted file mode 100644
index 3ae91d59..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/eax/eax.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright (C) 2019 ProtonTech AG
-
-// Package eax provides an implementation of the EAX
-// (encrypt-authenticate-translate) mode of operation, as described in
-// Bellare, Rogaway, and Wagner "THE EAX MODE OF OPERATION: A TWO-PASS
-// AUTHENTICATED-ENCRYPTION SCHEME OPTIMIZED FOR SIMPLICITY AND EFFICIENCY."
-// In FSE'04, volume 3017 of LNCS, 2004
-package eax
-
-import (
- "crypto/cipher"
- "crypto/subtle"
- "errors"
- "github.com/ProtonMail/go-crypto/internal/byteutil"
-)
-
-const (
- defaultTagSize = 16
- defaultNonceSize = 16
-)
-
-type eax struct {
- block cipher.Block // Only AES-{128, 192, 256} supported
- tagSize int // At least 12 bytes recommended
- nonceSize int
-}
-
-func (e *eax) NonceSize() int {
- return e.nonceSize
-}
-
-func (e *eax) Overhead() int {
- return e.tagSize
-}
-
-// NewEAX returns an EAX instance with AES-{KEYLENGTH} and default nonce and
-// tag lengths. Supports {128, 192, 256}- bit key length.
-func NewEAX(block cipher.Block) (cipher.AEAD, error) {
- return NewEAXWithNonceAndTagSize(block, defaultNonceSize, defaultTagSize)
-}
-
-// NewEAXWithNonceAndTagSize returns an EAX instance with AES-{keyLength} and
-// given nonce and tag lengths in bytes. Panics on zero nonceSize and
-// exceedingly long tags.
-//
-// It is recommended to use at least 12 bytes as tag length (see, for instance,
-// NIST SP 800-38D).
-//
-// Only to be used for compatibility with existing cryptosystems with
-// non-standard parameters. For all other cases, prefer NewEAX.
-func NewEAXWithNonceAndTagSize(
- block cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) {
- if nonceSize < 1 {
- return nil, eaxError("Cannot initialize EAX with nonceSize = 0")
- }
- if tagSize > block.BlockSize() {
- return nil, eaxError("Custom tag length exceeds blocksize")
- }
- return &eax{
- block: block,
- tagSize: tagSize,
- nonceSize: nonceSize,
- }, nil
-}
-
-func (e *eax) Seal(dst, nonce, plaintext, adata []byte) []byte {
- if len(nonce) > e.nonceSize {
- panic("crypto/eax: Nonce too long for this instance")
- }
- ret, out := byteutil.SliceForAppend(dst, len(plaintext)+e.tagSize)
- omacNonce := e.omacT(0, nonce)
- omacAdata := e.omacT(1, adata)
-
- // Encrypt message using CTR mode and omacNonce as IV
- ctr := cipher.NewCTR(e.block, omacNonce)
- ciphertextData := out[:len(plaintext)]
- ctr.XORKeyStream(ciphertextData, plaintext)
-
- omacCiphertext := e.omacT(2, ciphertextData)
-
- tag := out[len(plaintext):]
- for i := 0; i < e.tagSize; i++ {
- tag[i] = omacCiphertext[i] ^ omacNonce[i] ^ omacAdata[i]
- }
- return ret
-}
-
-func (e *eax) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) {
- if len(nonce) > e.nonceSize {
- panic("crypto/eax: Nonce too long for this instance")
- }
- if len(ciphertext) < e.tagSize {
- return nil, eaxError("Ciphertext shorter than tag length")
- }
- sep := len(ciphertext) - e.tagSize
-
- // Compute tag
- omacNonce := e.omacT(0, nonce)
- omacAdata := e.omacT(1, adata)
- omacCiphertext := e.omacT(2, ciphertext[:sep])
-
- tag := make([]byte, e.tagSize)
- for i := 0; i < e.tagSize; i++ {
- tag[i] = omacCiphertext[i] ^ omacNonce[i] ^ omacAdata[i]
- }
-
- // Compare tags
- if subtle.ConstantTimeCompare(ciphertext[sep:], tag) != 1 {
- return nil, eaxError("Tag authentication failed")
- }
-
- // Decrypt ciphertext
- ret, out := byteutil.SliceForAppend(dst, len(ciphertext))
- ctr := cipher.NewCTR(e.block, omacNonce)
- ctr.XORKeyStream(out, ciphertext[:sep])
-
- return ret[:sep], nil
-}
-
-// Tweakable OMAC - Calls OMAC_K([t]_n || plaintext)
-func (e *eax) omacT(t byte, plaintext []byte) []byte {
- blockSize := e.block.BlockSize()
- byteT := make([]byte, blockSize)
- byteT[blockSize-1] = t
- concat := append(byteT, plaintext...)
- return e.omac(concat)
-}
-
-func (e *eax) omac(plaintext []byte) []byte {
- blockSize := e.block.BlockSize()
- // L ← E_K(0^n); B ← 2L; P ← 4L
- L := make([]byte, blockSize)
- e.block.Encrypt(L, L)
- B := byteutil.GfnDouble(L)
- P := byteutil.GfnDouble(B)
-
- // CBC with IV = 0
- cbc := cipher.NewCBCEncrypter(e.block, make([]byte, blockSize))
- padded := e.pad(plaintext, B, P)
- cbcCiphertext := make([]byte, len(padded))
- cbc.CryptBlocks(cbcCiphertext, padded)
-
- return cbcCiphertext[len(cbcCiphertext)-blockSize:]
-}
-
-func (e *eax) pad(plaintext, B, P []byte) []byte {
- // if |M| in {n, 2n, 3n, ...}
- blockSize := e.block.BlockSize()
- if len(plaintext) != 0 && len(plaintext)%blockSize == 0 {
- return byteutil.RightXor(plaintext, B)
- }
-
- // else return (M || 1 || 0^(n−1−(|M| % n))) xor→ P
- ending := make([]byte, blockSize-len(plaintext)%blockSize)
- ending[0] = 0x80
- padded := append(plaintext, ending...)
- return byteutil.RightXor(padded, P)
-}
-
-func eaxError(err string) error {
- return errors.New("crypto/eax: " + err)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go b/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go
deleted file mode 100644
index ddb53d07..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package eax
-
-// Test vectors from
-// https://web.cs.ucdavis.edu/~rogaway/papers/eax.pdf
-var testVectors = []struct {
- msg, key, nonce, header, ciphertext string
-}{
- {"",
- "233952DEE4D5ED5F9B9C6D6FF80FF478",
- "62EC67F9C3A4A407FCB2A8C49031A8B3",
- "6BFB914FD07EAE6B",
- "E037830E8389F27B025A2D6527E79D01"},
- {"F7FB",
- "91945D3F4DCBEE0BF45EF52255F095A4",
- "BECAF043B0A23D843194BA972C66DEBD",
- "FA3BFD4806EB53FA",
- "19DD5C4C9331049D0BDAB0277408F67967E5"},
- {"1A47CB4933",
- "01F74AD64077F2E704C0F60ADA3DD523",
- "70C3DB4F0D26368400A10ED05D2BFF5E",
- "234A3463C1264AC6",
- "D851D5BAE03A59F238A23E39199DC9266626C40F80"},
- {"481C9E39B1",
- "D07CF6CBB7F313BDDE66B727AFD3C5E8",
- "8408DFFF3C1A2B1292DC199E46B7D617",
- "33CCE2EABFF5A79D",
- "632A9D131AD4C168A4225D8E1FF755939974A7BEDE"},
- {"40D0C07DA5E4",
- "35B6D0580005BBC12B0587124557D2C2",
- "FDB6B06676EEDC5C61D74276E1F8E816",
- "AEB96EAEBE2970E9",
- "071DFE16C675CB0677E536F73AFE6A14B74EE49844DD"},
- {"4DE3B35C3FC039245BD1FB7D",
- "BD8E6E11475E60B268784C38C62FEB22",
- "6EAC5C93072D8E8513F750935E46DA1B",
- "D4482D1CA78DCE0F",
- "835BB4F15D743E350E728414ABB8644FD6CCB86947C5E10590210A4F"},
- {"8B0A79306C9CE7ED99DAE4F87F8DD61636",
- "7C77D6E813BED5AC98BAA417477A2E7D",
- "1A8C98DCD73D38393B2BF1569DEEFC19",
- "65D2017990D62528",
- "02083E3979DA014812F59F11D52630DA30137327D10649B0AA6E1C181DB617D7F2"},
- {"1BDA122BCE8A8DBAF1877D962B8592DD2D56",
- "5FFF20CAFAB119CA2FC73549E20F5B0D",
- "DDE59B97D722156D4D9AFF2BC7559826",
- "54B9F04E6A09189A",
- "2EC47B2C4954A489AFC7BA4897EDCDAE8CC33B60450599BD02C96382902AEF7F832A"},
- {"6CF36720872B8513F6EAB1A8A44438D5EF11",
- "A4A4782BCFFD3EC5E7EF6D8C34A56123",
- "B781FCF2F75FA5A8DE97A9CA48E522EC",
- "899A175897561D7E",
- "0DE18FD0FDD91E7AF19F1D8EE8733938B1E8E7F6D2231618102FDB7FE55FF1991700"},
- {"CA40D7446E545FFAED3BD12A740A659FFBBB3CEAB7",
- "8395FCF1E95BEBD697BD010BC766AAC3",
- "22E7ADD93CFC6393C57EC0B3C17D6B44",
- "126735FCC320D25A",
- "CB8920F87A6C75CFF39627B56E3ED197C552D295A7CFC46AFC253B4652B1AF3795B124AB6E"},
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go b/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go
deleted file mode 100644
index 4eb19f28..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go
+++ /dev/null
@@ -1,131 +0,0 @@
-// These vectors include key length in {128, 192, 256}, tag size 128, and
-// random nonce, header, and plaintext lengths.
-
-// This file was automatically generated.
-
-package eax
-
-var randomVectors = []struct {
- key, nonce, header, plaintext, ciphertext string
-}{
- {"DFDE093F36B0356E5A81F609786982E3",
- "1D8AC604419001816905BA72B14CED7E",
- "152A1517A998D7A24163FCDD146DE81AC347C8B97088F502093C1ABB8F6E33D9A219C34D7603A18B1F5ABE02E56661B7D7F67E81EC08C1302EF38D80A859486D450E94A4F26AD9E68EEBBC0C857A0FC5CF9E641D63D565A7E361BC8908F5A8DC8FD6",
- "1C8EAAB71077FE18B39730A3156ADE29C5EE824C7EE86ED2A253B775603FB237116E654F6FEC588DD27F523A0E01246FE73FE348491F2A8E9ABC6CA58D663F71CDBCF4AD798BE46C42AE6EE8B599DB44A1A48D7BBBBA0F7D2750181E1C5E66967F7D57CBD30AFBDA5727",
- "79E7E150934BBEBF7013F61C60462A14D8B15AF7A248AFB8A344EF021C1500E16666891D6E973D8BB56B71A371F12CA34660C4410C016982B20F547E3762A58B7BF4F20236CADCF559E2BE7D783B13723B2741FC7CDC8997D839E39A3DDD2BADB96743DD7049F1BDB0516A262869915B3F70498AFB7B191BF960"},
- {"F10619EF02E5D94D7550EB84ED364A21",
- "8DC0D4F2F745BBAE835CC5574B942D20",
- "FE561358F2E8DF7E1024FF1AE9A8D36EBD01352214505CB99D644777A8A1F6027FA2BDBFC529A9B91136D5F2416CFC5F0F4EC3A1AFD32BDDA23CA504C5A5CB451785FABF4DFE4CD50D817491991A60615B30286361C100A95D1712F2A45F8E374461F4CA2B",
- "D7B5A971FC219631D30EFC3664AE3127D9CF3097DAD9C24AC7905D15E8D9B25B026B31D68CAE00975CDB81EB1FD96FD5E1A12E2BB83FA25F1B1D91363457657FC03875C27F2946C5",
- "2F336ED42D3CC38FC61660C4CD60BA4BD438B05F5965D8B7B399D2E7167F5D34F792D318F94DB15D67463AC449E13D568CC09BFCE32A35EE3EE96A041927680AE329811811E27F2D1E8E657707AF99BA96D13A478D695D59"},
- {"429F514EFC64D98A698A9247274CFF45",
- "976AA5EB072F912D126ACEBC954FEC38",
- "A71D89DC5B6CEDBB7451A27C3C2CAE09126DB4C421",
- "5632FE62AB1DC549D54D3BC3FC868ACCEDEFD9ECF5E9F8",
- "848AE4306CA8C7F416F8707625B7F55881C0AB430353A5C967CDA2DA787F581A70E34DBEBB2385"},
- {"398138F309085F47F8457CDF53895A63",
- "F8A8A7F2D28E5FFF7BBC2F24353F7A36",
- "5D633C21BA7764B8855CAB586F3746E236AD486039C83C6B56EFA9C651D38A41D6B20DAEE3418BFEA44B8BD6",
- "A3BBAA91920AF5E10659818B1B3B300AC79BFC129C8329E75251F73A66D3AE0128EB91D5031E0A65C329DB7D1E9C0493E268",
- "D078097267606E5FB07CFB7E2B4B718172A82C6A4CEE65D549A4DFB9838003BD2FBF64A7A66988AC1A632FD88F9E9FBB57C5A78AD2E086EACBA3DB68511D81C2970A"},
- {"7A4151EBD3901B42CBA45DAFB2E931BA",
- "0FC88ACEE74DD538040321C330974EB8",
- "250464FB04733BAB934C59E6AD2D6AE8D662CBCFEFBE61E5A308D4211E58C4C25935B72C69107722E946BFCBF416796600542D76AEB73F2B25BF53BAF97BDEB36ED3A7A51C31E7F170EB897457E7C17571D1BA0A908954E9",
- "88C41F3EBEC23FAB8A362D969CAC810FAD4F7CA6A7F7D0D44F060F92E37E1183768DD4A8C733F71C96058D362A39876D183B86C103DE",
- "74A25B2182C51096D48A870D80F18E1CE15867778E34FCBA6BD7BFB3739FDCD42AD0F2D9F4EBA29085285C6048C15BCE5E5166F1F962D3337AA88E6062F05523029D0A7F0BF9"},
- {"BFB147E1CD5459424F8C0271FC0E0DC5",
- "EABCC126442BF373969EA3015988CC45",
- "4C0880E1D71AA2C7",
- "BE1B5EC78FBF73E7A6682B21BA7E0E5D2D1C7ABE",
- "5660D7C1380E2F306895B1402CB2D6C37876504276B414D120F4CF92FDDDBB293A238EA0"},
- {"595DD6F52D18BC2CA8EB4EDAA18D9FA3",
- "0F84B5D36CF4BC3B863313AF3B4D2E97",
- "30AE6CC5F99580F12A779D98BD379A60948020C0B6FBD5746B30BA3A15C6CD33DAF376C70A9F15B6C0EB410A93161F7958AE23",
- "8EF3687A1642B070970B0B91462229D1D76ABC154D18211F7152AA9FF368",
- "317C1DDB11417E5A9CC4DDE7FDFF6659A5AC4B31DE025212580A05CDAC6024D3E4AE7C2966E52B9129E9ECDBED86"},
- {"44E6F2DC8FDC778AD007137D11410F50",
- "270A237AD977F7187AA6C158A0BAB24F",
- "509B0F0EB12E2AA5C5BA2DE553C07FAF4CE0C9E926531AA709A3D6224FCB783ACCF1559E10B1123EBB7D52E8AB54E6B5352A9ED0D04124BF0E9D9BACFD7E32B817B2E625F5EE94A64EDE9E470DE7FE6886C19B294F9F828209FE257A78",
- "8B3D7815DF25618A5D0C55A601711881483878F113A12EC36CF64900549A3199555528559DC118F789788A55FAFD944E6E99A9CA3F72F238CD3F4D88223F7A745992B3FAED1848",
- "1CC00D79F7AD82FDA71B58D286E5F34D0CC4CEF30704E771CC1E50746BDF83E182B078DB27149A42BAE619DF0F85B0B1090AD55D3B4471B0D6F6ECCD09C8F876B30081F0E7537A9624F8AAF29DA85E324122EFB4D68A56"},
- {"BB7BC352A03044B4428D8DBB4B0701FDEC4649FD17B81452",
- "8B4BBE26CCD9859DCD84884159D6B0A4",
- "2212BEB0E78E0F044A86944CF33C8D5C80D9DBE1034BF3BCF73611835C7D3A52F5BD2D81B68FD681B68540A496EE5DA16FD8AC8824E60E1EC2042BE28FB0BFAD4E4B03596446BDD8C37D936D9B3D5295BE19F19CF5ACE1D33A46C952CE4DE5C12F92C1DD051E04AEED",
- "9037234CC44FFF828FABED3A7084AF40FA7ABFF8E0C0EFB57A1CC361E18FC4FAC1AB54F3ABFE9FF77263ACE16C3A",
- "A9391B805CCD956081E0B63D282BEA46E7025126F1C1631239C33E92AA6F92CD56E5A4C56F00FF9658E93D48AF4EF0EF81628E34AD4DB0CDAEDCD2A17EE7"},
- {"99C0AD703196D2F60A74E6B378B838B31F82EA861F06FC4E",
- "92745C018AA708ECFEB1667E9F3F1B01",
- "828C69F376C0C0EC651C67749C69577D589EE39E51404D80EBF70C8660A8F5FD375473F4A7C611D59CB546A605D67446CE2AA844135FCD78BB5FBC90222A00D42920BB1D7EEDFB0C4672554F583EF23184F89063CDECBE482367B5F9AF3ACBC3AF61392BD94CBCD9B64677",
- "A879214658FD0A5B0E09836639BF82E05EC7A5EF71D4701934BDA228435C68AC3D5CEB54997878B06A655EEACEFB1345C15867E7FE6C6423660C8B88DF128EBD6BCD85118DBAE16E9252FFB204324E5C8F38CA97759BDBF3CB0083",
- "51FE87996F194A2585E438B023B345439EA60D1AEBED4650CDAF48A4D4EEC4FC77DC71CC4B09D3BEEF8B7B7AF716CE2B4EFFB3AC9E6323C18AC35E0AA6E2BBBC8889490EB6226C896B0D105EAB42BFE7053CCF00ED66BA94C1BA09A792AA873F0C3B26C5C5F9A936E57B25"},
- {"7086816D00D648FB8304AA8C9E552E1B69A9955FB59B25D1",
- "0F45CF7F0BF31CCEB85D9DA10F4D749F",
- "93F27C60A417D9F0669E86ACC784FC8917B502DAF30A6338F11B30B94D74FEFE2F8BE1BBE2EAD10FAB7EED3C6F72B7C3ECEE1937C32ED4970A6404E139209C05",
- "877F046601F3CBE4FB1491943FA29487E738F94B99AF206262A1D6FF856C9AA0B8D4D08A54370C98F8E88FA3DCC2B14C1F76D71B2A4C7963AEE8AF960464C5BEC8357AD00DC8",
- "FE96906B895CE6A8E72BC72344E2C8BB3C63113D70EAFA26C299BAFE77A8A6568172EB447FB3E86648A0AF3512DEB1AAC0819F3EC553903BF28A9FB0F43411237A774BF9EE03E445D280FBB9CD12B9BAAB6EF5E52691"},
- {"062F65A896D5BF1401BADFF70E91B458E1F9BD4888CB2E4D",
- "5B11EA1D6008EBB41CF892FCA5B943D1",
- "BAF4FF5C8242",
- "A8870E091238355984EB2F7D61A865B9170F440BFF999A5993DD41A10F4440D21FF948DDA2BF663B2E03AC3324492DC5E40262ECC6A65C07672353BE23E7FB3A9D79FF6AA38D97960905A38DECC312CB6A59E5467ECF06C311CD43ADC0B543EDF34FE8BE611F176460D5627CA51F8F8D9FED71F55C",
- "B10E127A632172CF8AA7539B140D2C9C2590E6F28C3CB892FC498FCE56A34F732FBFF32E79C7B9747D9094E8635A0C084D6F0247F9768FB5FF83493799A9BEC6C39572120C40E9292C8C947AE8573462A9108C36D9D7112E6995AE5867E6C8BB387D1C5D4BEF524F391B9FD9F0A3B4BFA079E915BCD920185CFD38D114C558928BD7D47877"},
- {"38A8E45D6D705A11AF58AED5A1344896998EACF359F2E26A",
- "FD82B5B31804FF47D44199B533D0CF84",
- "DE454D4E62FE879F2050EE3E25853623D3E9AC52EEC1A1779A48CFAF5ECA0BFDE44749391866D1",
- "B804",
- "164BB965C05EBE0931A1A63293EDF9C38C27"},
- {"34C33C97C6D7A0850DA94D78A58DC61EC717CD7574833068",
- "343BE00DA9483F05C14F2E9EB8EA6AE8",
- "78312A43EFDE3CAE34A65796FF059A3FE15304EEA5CF1D9306949FE5BF3349D4977D4EBE76C040FE894C5949E4E4D6681153DA87FB9AC5062063CA2EA183566343362370944CE0362D25FC195E124FD60E8682E665D13F2229DDA3E4B2CB1DCA",
- "CC11BB284B1153578E4A5ED9D937B869DAF00F5B1960C23455CA9CC43F486A3BE0B66254F1041F04FDF459C8640465B6E1D2CF899A381451E8E7FCB50CF87823BE77E24B132BBEEDC72E53369B275E1D8F49ECE59F4F215230AC4FE133FC80E4F634EE80BA4682B62C86",
- "E7F703DC31A95E3A4919FF957836CB76C063D81702AEA4703E1C2BF30831E58C4609D626EC6810E12EAA5B930F049FF9EFC22C3E3F1EBD4A1FB285CB02A1AC5AD46B425199FC0A85670A5C4E3DAA9636C8F64C199F42F18AAC8EA7457FD377F322DD7752D7D01B946C8F0A97E6113F0D50106F319AFD291AAACE"},
- {"C6ECF7F053573E403E61B83052A343D93CBCC179D1E835BE",
- "E280E13D7367042E3AA09A80111B6184",
- "21486C9D7A9647",
- "5F2639AFA6F17931853791CD8C92382BBB677FD72D0AB1A080D0E49BFAA21810E963E4FACD422E92F65CBFAD5884A60CD94740DF31AF02F95AA57DA0C4401B0ED906",
- "5C51DB20755302070C45F52E50128A67C8B2E4ED0EACB7E29998CCE2E8C289DD5655913EC1A51CC3AABE5CDC2402B2BE7D6D4BF6945F266FBD70BA9F37109067157AE7530678B45F64475D4EBFCB5FFF46A5"},
- {"5EC6CF7401BC57B18EF154E8C38ACCA8959E57D2F3975FF5",
- "656B41CB3F9CF8C08BAD7EBFC80BD225",
- "6B817C2906E2AF425861A7EF59BA5801F143EE2A139EE72697CDE168B4",
- "2C0E1DDC9B1E5389BA63845B18B1F8A1DB062037151BCC56EF7C21C0BB4DAE366636BBA975685D7CC5A94AFBE89C769016388C56FB7B57CE750A12B718A8BDCF70E80E8659A8330EFC8F86640F21735E8C80E23FE43ABF23507CE3F964AE4EC99D",
- "ED780CF911E6D1AA8C979B889B0B9DC1ABE261832980BDBFB576901D9EF5AB8048998E31A15BE54B3E5845A4D136AD24D0BDA1C3006168DF2F8AC06729CB0818867398150020131D8F04EDF1923758C9EABB5F735DE5EA1758D4BC0ACFCA98AFD202E9839B8720253693B874C65586C6F0"},
- {"C92F678EB2208662F5BCF3403EC05F5961E957908A3E79421E1D25FC19054153",
- "DA0F3A40983D92F2D4C01FED33C7A192",
- "2B6E9D26DB406A0FAB47608657AA10EFC2B4AA5F459B29FF85AC9A40BFFE7AEB04F77E9A11FAAA116D7F6D4DA417671A9AB02C588E0EF59CB1BFB4B1CC931B63A3B3A159FCEC97A04D1E6F0C7E6A9CEF6B0ABB04758A69F1FE754DF4C2610E8C46B6CF413BDB31351D55BEDCB7B4A13A1C98E10984475E0F2F957853",
- "F37326A80E08",
- "83519E53E321D334F7C10B568183775C0E9AAE55F806"},
- {"6847E0491BE57E72995D186D50094B0B3593957A5146798FCE68B287B2FB37B5",
- "3EE1182AEBB19A02B128F28E1D5F7F99",
- "D9F35ABB16D776CE",
- "DB7566ED8EA95BDF837F23DB277BAFBC5E70D1105ADFD0D9EF15475051B1EF94709C67DCA9F8D5",
- "2CDCED0C9EBD6E2A508822A685F7DCD1CDD99E7A5FCA786C234E7F7F1D27EC49751AD5DCFA30C5EDA87C43CAE3B919B6BBCFE34C8EDA59"},
- {"82B019673642C08388D3E42075A4D5D587558C229E4AB8F660E37650C4C41A0A",
- "336F5D681E0410FAE7B607246092C6DC",
- "D430CBD8FE435B64214E9E9CDC5DE99D31CFCFB8C10AA0587A49DF276611",
- "998404153AD77003E1737EDE93ED79859EE6DCCA93CB40C4363AA817ABF2DBBD46E42A14A7183B6CC01E12A577888141363D0AE011EB6E8D28C0B235",
- "9BEF69EEB60BD3D6065707B7557F25292A8872857CFBD24F2F3C088E4450995333088DA50FD9121221C504DF1D0CD5EFE6A12666C5D5BB12282CF4C19906E9CFAB97E9BDF7F49DC17CFC384B"},
- {"747B2E269B1859F0622C15C8BAD6A725028B1F94B8DB7326948D1E6ED663A8BC",
- "AB91F7245DDCE3F1C747872D47BE0A8A",
- "3B03F786EF1DDD76E1D42646DA4CD2A5165DC5383CE86D1A0B5F13F910DC278A4E451EE0192CBA178E13B3BA27FDC7840DF73D2E104B",
- "6B803F4701114F3E5FE21718845F8416F70F626303F545BE197189E0A2BA396F37CE06D389EB2658BC7D56D67868708F6D0D32",
- "1570DDB0BCE75AA25D1957A287A2C36B1A5F2270186DA81BA6112B7F43B0F3D1D0ED072591DCF1F1C99BBB25621FC39B896FF9BD9413A2845363A9DCD310C32CF98E57"},
- {"02E59853FB29AEDA0FE1C5F19180AD99A12FF2F144670BB2B8BADF09AD812E0A",
- "C691294EF67CD04D1B9242AF83DD1421",
- "879334DAE3",
- "1E17F46A98FEF5CBB40759D95354",
- "FED8C3FF27DDF6313AED444A2985B36CBA268AAD6AAC563C0BA28F6DB5DB"},
- {"F6C1FB9B4188F2288FF03BD716023198C3582CF2A037FC2F29760916C2B7FCDB",
- "4228DA0678CA3534588859E77DFF014C",
- "D8153CAF35539A61DD8D05B3C9B44F01E564FB9348BCD09A1C23B84195171308861058F0A3CD2A55B912A3AAEE06FF4D356C77275828F2157C2FC7C115DA39E443210CCC56BEDB0CC99BBFB227ABD5CC454F4E7F547C7378A659EEB6A7E809101A84F866503CB18D4484E1FA09B3EC7FC75EB2E35270800AA7",
- "23B660A779AD285704B12EC1C580387A47BEC7B00D452C6570",
- "5AA642BBABA8E49849002A2FAF31DB8FC7773EFDD656E469CEC19B3206D4174C9A263D0A05484261F6"},
- {"8FF6086F1FADB9A3FBE245EAC52640C43B39D43F89526BB5A6EBA47710931446",
- "943188480C99437495958B0AE4831AA9",
- "AD5CD0BDA426F6EBA23C8EB23DC73FF9FEC173355EDBD6C9344C4C4383F211888F7CE6B29899A6801DF6B38651A7C77150941A",
- "80CD5EA8D7F81DDF5070B934937912E8F541A5301877528EB41AB60C020968D459960ED8FB73083329841A",
- "ABAE8EB7F36FCA2362551E72DAC890BA1BB6794797E0FC3B67426EC9372726ED4725D379EA0AC9147E48DCD0005C502863C2C5358A38817C8264B5"},
- {"A083B54E6B1FE01B65D42FCD248F97BB477A41462BBFE6FD591006C022C8FD84",
- "B0490F5BD68A52459556B3749ACDF40E",
- "8892E047DA5CFBBDF7F3CFCBD1BD21C6D4C80774B1826999234394BD3E513CC7C222BB40E1E3140A152F19B3802F0D036C24A590512AD0E8",
- "D7B15752789DC94ED0F36778A5C7BBB207BEC32BAC66E702B39966F06E381E090C6757653C3D26A81EC6AD6C364D66867A334C91BB0B8A8A4B6EACDF0783D09010AEBA2DD2062308FE99CC1F",
- "C071280A732ADC93DF272BF1E613B2BB7D46FC6665EF2DC1671F3E211D6BDE1D6ADDD28DF3AA2E47053FC8BB8AE9271EC8BC8B2CFFA320D225B451685B6D23ACEFDD241FE284F8ADC8DB07F456985B14330BBB66E0FB212213E05B3E"},
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go
deleted file mode 100644
index affb74a7..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright (C) 2019 ProtonTech AG
-// This file contains necessary tools for the aex and ocb packages.
-//
-// These functions SHOULD NOT be used elsewhere, since they are optimized for
-// specific input nature in the EAX and OCB modes of operation.
-
-package byteutil
-
-// GfnDouble computes 2 * input in the field of 2^n elements.
-// The irreducible polynomial in the finite field for n=128 is
-// x^128 + x^7 + x^2 + x + 1 (equals 0x87)
-// Constant-time execution in order to avoid side-channel attacks
-func GfnDouble(input []byte) []byte {
- if len(input) != 16 {
- panic("Doubling in GFn only implemented for n = 128")
- }
- // If the first bit is zero, return 2L = L << 1
- // Else return (L << 1) xor 0^120 10000111
- shifted := ShiftBytesLeft(input)
- shifted[15] ^= ((input[0] >> 7) * 0x87)
- return shifted
-}
-
-// ShiftBytesLeft outputs the byte array corresponding to x << 1 in binary.
-func ShiftBytesLeft(x []byte) []byte {
- l := len(x)
- dst := make([]byte, l)
- for i := 0; i < l-1; i++ {
- dst[i] = (x[i] << 1) | (x[i+1] >> 7)
- }
- dst[l-1] = x[l-1] << 1
- return dst
-}
-
-// ShiftNBytesLeft puts in dst the byte array corresponding to x << n in binary.
-func ShiftNBytesLeft(dst, x []byte, n int) {
- // Erase first n / 8 bytes
- copy(dst, x[n/8:])
-
- // Shift the remaining n % 8 bits
- bits := uint(n % 8)
- l := len(dst)
- for i := 0; i < l-1; i++ {
- dst[i] = (dst[i] << bits) | (dst[i+1] >> uint(8-bits))
- }
- dst[l-1] = dst[l-1] << bits
-
- // Append trailing zeroes
- dst = append(dst, make([]byte, n/8)...)
-}
-
-// XorBytesMut assumes equal input length, replaces X with X XOR Y
-func XorBytesMut(X, Y []byte) {
- for i := 0; i < len(X); i++ {
- X[i] ^= Y[i]
- }
-}
-
-// XorBytes assumes equal input length, puts X XOR Y into Z
-func XorBytes(Z, X, Y []byte) {
- for i := 0; i < len(X); i++ {
- Z[i] = X[i] ^ Y[i]
- }
-}
-
-// RightXor XORs smaller input (assumed Y) at the right of the larger input (assumed X)
-func RightXor(X, Y []byte) []byte {
- offset := len(X) - len(Y)
- xored := make([]byte, len(X))
- copy(xored, X)
- for i := 0; i < len(Y); i++ {
- xored[offset+i] ^= Y[i]
- }
- return xored
-}
-
-// SliceForAppend takes a slice and a requested number of bytes. It returns a
-// slice with the contents of the given slice followed by that many bytes and a
-// second slice that aliases into it and contains only the extra bytes. If the
-// original slice has sufficient capacity then no allocation is performed.
-func SliceForAppend(in []byte, n int) (head, tail []byte) {
- if total := len(in) + n; cap(in) >= total {
- head = in[:total]
- } else {
- head = make([]byte, total)
- copy(head, in)
- }
- tail = head[len(in):]
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go
deleted file mode 100644
index 5022285b..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go
+++ /dev/null
@@ -1,318 +0,0 @@
-// Copyright (C) 2019 ProtonTech AG
-
-// Package ocb provides an implementation of the OCB (offset codebook) mode of
-// operation, as described in RFC-7253 of the IRTF and in Rogaway, Bellare,
-// Black and Krovetz - OCB: A BLOCK-CIPHER MODE OF OPERATION FOR EFFICIENT
-// AUTHENTICATED ENCRYPTION (2003).
-// Security considerations (from RFC-7253): A private key MUST NOT be used to
-// encrypt more than 2^48 blocks. Tag length should be at least 12 bytes (a
-// brute-force forging adversary succeeds after 2^{tag length} attempts). A
-// single key SHOULD NOT be used to decrypt ciphertext with different tag
-// lengths. Nonces need not be secret, but MUST NOT be reused.
-// This package only supports underlying block ciphers with 128-bit blocks,
-// such as AES-{128, 192, 256}, but may be extended to other sizes.
-package ocb
-
-import (
- "bytes"
- "crypto/cipher"
- "crypto/subtle"
- "errors"
- "math/bits"
-
- "github.com/ProtonMail/go-crypto/internal/byteutil"
-)
-
-type ocb struct {
- block cipher.Block
- tagSize int
- nonceSize int
- mask mask
- // Optimized en/decrypt: For each nonce N used to en/decrypt, the 'Ktop'
- // internal variable can be reused for en/decrypting with nonces sharing
- // all but the last 6 bits with N. The prefix of the first nonce used to
- // compute the new Ktop, and the Ktop value itself, are stored in
- // reusableKtop. If using incremental nonces, this saves one block cipher
- // call every 63 out of 64 OCB encryptions, and stores one nonce and one
- // output of the block cipher in memory only.
- reusableKtop reusableKtop
-}
-
-type mask struct {
- // L_*, L_$, (L_i)_{i ∈ N}
- lAst []byte
- lDol []byte
- L [][]byte
-}
-
-type reusableKtop struct {
- noncePrefix []byte
- Ktop []byte
-}
-
-const (
- defaultTagSize = 16
- defaultNonceSize = 15
-)
-
-const (
- enc = iota
- dec
-)
-
-func (o *ocb) NonceSize() int {
- return o.nonceSize
-}
-
-func (o *ocb) Overhead() int {
- return o.tagSize
-}
-
-// NewOCB returns an OCB instance with the given block cipher and default
-// tag and nonce sizes.
-func NewOCB(block cipher.Block) (cipher.AEAD, error) {
- return NewOCBWithNonceAndTagSize(block, defaultNonceSize, defaultTagSize)
-}
-
-// NewOCBWithNonceAndTagSize returns an OCB instance with the given block
-// cipher, nonce length, and tag length. Panics on zero nonceSize and
-// exceedingly long tag size.
-//
-// It is recommended to use at least 12 bytes as tag length.
-func NewOCBWithNonceAndTagSize(
- block cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) {
- if block.BlockSize() != 16 {
- return nil, ocbError("Block cipher must have 128-bit blocks")
- }
- if nonceSize < 1 {
- return nil, ocbError("Incorrect nonce length")
- }
- if nonceSize >= block.BlockSize() {
- return nil, ocbError("Nonce length exceeds blocksize - 1")
- }
- if tagSize > block.BlockSize() {
- return nil, ocbError("Custom tag length exceeds blocksize")
- }
- return &ocb{
- block: block,
- tagSize: tagSize,
- nonceSize: nonceSize,
- mask: initializeMaskTable(block),
- reusableKtop: reusableKtop{
- noncePrefix: nil,
- Ktop: nil,
- },
- }, nil
-}
-
-func (o *ocb) Seal(dst, nonce, plaintext, adata []byte) []byte {
- if len(nonce) > o.nonceSize {
- panic("crypto/ocb: Incorrect nonce length given to OCB")
- }
- ret, out := byteutil.SliceForAppend(dst, len(plaintext)+o.tagSize)
- o.crypt(enc, out, nonce, adata, plaintext)
- return ret
-}
-
-func (o *ocb) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) {
- if len(nonce) > o.nonceSize {
- panic("Nonce too long for this instance")
- }
- if len(ciphertext) < o.tagSize {
- return nil, ocbError("Ciphertext shorter than tag length")
- }
- sep := len(ciphertext) - o.tagSize
- ret, out := byteutil.SliceForAppend(dst, len(ciphertext))
- ciphertextData := ciphertext[:sep]
- tag := ciphertext[sep:]
- o.crypt(dec, out, nonce, adata, ciphertextData)
- if subtle.ConstantTimeCompare(ret[sep:], tag) == 1 {
- ret = ret[:sep]
- return ret, nil
- }
- for i := range out {
- out[i] = 0
- }
- return nil, ocbError("Tag authentication failed")
-}
-
-// On instruction enc (resp. dec), crypt is the encrypt (resp. decrypt)
-// function. It returns the resulting plain/ciphertext with the tag appended.
-func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte {
- //
- // Consider X as a sequence of 128-bit blocks
- //
- // Note: For encryption (resp. decryption), X is the plaintext (resp., the
- // ciphertext without the tag).
- blockSize := o.block.BlockSize()
-
- //
- // Nonce-dependent and per-encryption variables
- //
- // Zero out the last 6 bits of the nonce into truncatedNonce to see if Ktop
- // is already computed.
- truncatedNonce := make([]byte, len(nonce))
- copy(truncatedNonce, nonce)
- truncatedNonce[len(truncatedNonce)-1] &= 192
- var Ktop []byte
- if bytes.Equal(truncatedNonce, o.reusableKtop.noncePrefix) {
- Ktop = o.reusableKtop.Ktop
- } else {
- // Nonce = num2str(TAGLEN mod 128, 7) || zeros(120 - bitlen(N)) || 1 || N
- paddedNonce := append(make([]byte, blockSize-1-len(nonce)), 1)
- paddedNonce = append(paddedNonce, truncatedNonce...)
- paddedNonce[0] |= byte(((8 * o.tagSize) % (8 * blockSize)) << 1)
- // Last 6 bits of paddedNonce are already zero. Encrypt into Ktop
- paddedNonce[blockSize-1] &= 192
- Ktop = paddedNonce
- o.block.Encrypt(Ktop, Ktop)
- o.reusableKtop.noncePrefix = truncatedNonce
- o.reusableKtop.Ktop = Ktop
- }
-
- // Stretch = Ktop || ((lower half of Ktop) XOR (lower half of Ktop << 8))
- xorHalves := make([]byte, blockSize/2)
- byteutil.XorBytes(xorHalves, Ktop[:blockSize/2], Ktop[1:1+blockSize/2])
- stretch := append(Ktop, xorHalves...)
- bottom := int(nonce[len(nonce)-1] & 63)
- offset := make([]byte, len(stretch))
- byteutil.ShiftNBytesLeft(offset, stretch, bottom)
- offset = offset[:blockSize]
-
- //
- // Process any whole blocks
- //
- // Note: For encryption Y is ciphertext || tag, for decryption Y is
- // plaintext || tag.
- checksum := make([]byte, blockSize)
- m := len(X) / blockSize
- for i := 0; i < m; i++ {
- index := bits.TrailingZeros(uint(i + 1))
- if len(o.mask.L)-1 < index {
- o.mask.extendTable(index)
- }
- byteutil.XorBytesMut(offset, o.mask.L[bits.TrailingZeros(uint(i+1))])
- blockX := X[i*blockSize : (i+1)*blockSize]
- blockY := Y[i*blockSize : (i+1)*blockSize]
- byteutil.XorBytes(blockY, blockX, offset)
- switch instruction {
- case enc:
- o.block.Encrypt(blockY, blockY)
- byteutil.XorBytesMut(blockY, offset)
- byteutil.XorBytesMut(checksum, blockX)
- case dec:
- o.block.Decrypt(blockY, blockY)
- byteutil.XorBytesMut(blockY, offset)
- byteutil.XorBytesMut(checksum, blockY)
- }
- }
- //
- // Process any final partial block and compute raw tag
- //
- tag := make([]byte, blockSize)
- if len(X)%blockSize != 0 {
- byteutil.XorBytesMut(offset, o.mask.lAst)
- pad := make([]byte, blockSize)
- o.block.Encrypt(pad, offset)
- chunkX := X[blockSize*m:]
- chunkY := Y[blockSize*m : len(X)]
- byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)])
- // P_* || bit(1) || zeroes(127) - len(P_*)
- switch instruction {
- case enc:
- paddedY := append(chunkX, byte(128))
- paddedY = append(paddedY, make([]byte, blockSize-len(chunkX)-1)...)
- byteutil.XorBytesMut(checksum, paddedY)
- case dec:
- paddedX := append(chunkY, byte(128))
- paddedX = append(paddedX, make([]byte, blockSize-len(chunkY)-1)...)
- byteutil.XorBytesMut(checksum, paddedX)
- }
- byteutil.XorBytes(tag, checksum, offset)
- byteutil.XorBytesMut(tag, o.mask.lDol)
- o.block.Encrypt(tag, tag)
- byteutil.XorBytesMut(tag, o.hash(adata))
- copy(Y[blockSize*m+len(chunkY):], tag[:o.tagSize])
- } else {
- byteutil.XorBytes(tag, checksum, offset)
- byteutil.XorBytesMut(tag, o.mask.lDol)
- o.block.Encrypt(tag, tag)
- byteutil.XorBytesMut(tag, o.hash(adata))
- copy(Y[blockSize*m:], tag[:o.tagSize])
- }
- return Y
-}
-
-// This hash function is used to compute the tag. Per design, on empty input it
-// returns a slice of zeros, of the same length as the underlying block cipher
-// block size.
-func (o *ocb) hash(adata []byte) []byte {
- //
- // Consider A as a sequence of 128-bit blocks
- //
- A := make([]byte, len(adata))
- copy(A, adata)
- blockSize := o.block.BlockSize()
-
- //
- // Process any whole blocks
- //
- sum := make([]byte, blockSize)
- offset := make([]byte, blockSize)
- m := len(A) / blockSize
- for i := 0; i < m; i++ {
- chunk := A[blockSize*i : blockSize*(i+1)]
- index := bits.TrailingZeros(uint(i + 1))
- // If the mask table is too short
- if len(o.mask.L)-1 < index {
- o.mask.extendTable(index)
- }
- byteutil.XorBytesMut(offset, o.mask.L[index])
- byteutil.XorBytesMut(chunk, offset)
- o.block.Encrypt(chunk, chunk)
- byteutil.XorBytesMut(sum, chunk)
- }
-
- //
- // Process any final partial block; compute final hash value
- //
- if len(A)%blockSize != 0 {
- byteutil.XorBytesMut(offset, o.mask.lAst)
- // Pad block with 1 || 0 ^ 127 - bitlength(a)
- ending := make([]byte, blockSize-len(A)%blockSize)
- ending[0] = 0x80
- encrypted := append(A[blockSize*m:], ending...)
- byteutil.XorBytesMut(encrypted, offset)
- o.block.Encrypt(encrypted, encrypted)
- byteutil.XorBytesMut(sum, encrypted)
- }
- return sum
-}
-
-func initializeMaskTable(block cipher.Block) mask {
- //
- // Key-dependent variables
- //
- lAst := make([]byte, block.BlockSize())
- block.Encrypt(lAst, lAst)
- lDol := byteutil.GfnDouble(lAst)
- L := make([][]byte, 1)
- L[0] = byteutil.GfnDouble(lDol)
-
- return mask{
- lAst: lAst,
- lDol: lDol,
- L: L,
- }
-}
-
-// Extends the L array of mask m up to L[limit], with L[i] = GfnDouble(L[i-1])
-func (m *mask) extendTable(limit int) {
- for i := len(m.L); i <= limit; i++ {
- m.L = append(m.L, byteutil.GfnDouble(m.L[i-1]))
- }
-}
-
-func ocbError(err string) error {
- return errors.New("crypto/ocb: " + err)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go b/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go
deleted file mode 100644
index 0efaf344..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go
+++ /dev/null
@@ -1,136 +0,0 @@
-// In the test vectors provided by RFC 7253, the "bottom"
-// internal variable, which defines "offset" for the first time, does not
-// exceed 15. However, it can attain values up to 63.
-
-// These vectors include key length in {128, 192, 256}, tag size 128, and
-// random nonce, header, and plaintext lengths.
-
-// This file was automatically generated.
-
-package ocb
-
-var randomVectors = []struct {
- key, nonce, header, plaintext, ciphertext string
-}{
-
- {"9438C5D599308EAF13F800D2D31EA7F0",
- "C38EE4801BEBFFA1CD8635BE",
- "0E507B7DADD8A98CDFE272D3CB6B3E8332B56AE583FB049C0874D4200BED16BD1A044182434E9DA0E841F182DFD5B3016B34641CED0784F1745F63AB3D0DA22D3351C9EF9A658B8081E24498EBF61FCE40DA6D8E184536",
- "962D227786FB8913A8BAD5DC3250",
- "EEDEF5FFA5986D1E3BF86DDD33EF9ADC79DCA06E215FA772CCBA814F63AD"},
- {"BA7DE631C7D6712167C6724F5B9A2B1D",
- "35263EBDA05765DC0E71F1F5",
- "0103257B4224507C0242FEFE821EA7FA42E0A82863E5F8B68F7D881B4B44FA428A2B6B21D2F591260802D8AB6D83",
- "9D6D1FC93AE8A64E7889B7B2E3521EFA9B920A8DDB692E6F833DDC4A38AFA535E5E2A3ED82CB7E26404AB86C54D01C4668F28398C2DF33D5D561CBA1C8DCFA7A912F5048E545B59483C0E3221F54B14DAA2E4EB657B3BEF9554F34CAD69B2724AE962D3D8A",
- "E93852D1985C5E775655E937FA79CE5BF28A585F2AF53A5018853B9634BE3C84499AC0081918FDCE0624494D60E25F76ACD6853AC7576E3C350F332249BFCABD4E73CEABC36BE4EDDA40914E598AE74174A0D7442149B26990899491BDDFE8FC54D6C18E83AE9E9A6FFBF5D376565633862EEAD88D"},
- {"2E74B25289F6FD3E578C24866E9C72A5",
- "FD912F15025AF8414642BA1D1D",
- "FB5FB8C26F365EEDAB5FE260C6E3CCD27806729C8335F146063A7F9EA93290E56CF84576EB446350D22AD730547C267B1F0BBB97EB34E1E2C41A",
- "6C092EBF78F76EE8C1C6E592277D9545BA16EDB67BC7D8480B9827702DC2F8A129E2B08A2CE710CA7E1DA45CE162BB6CD4B512E632116E2211D3C90871EFB06B8D4B902681C7FB",
- "6AC0A77F26531BF4F354A1737F99E49BE32ECD909A7A71AD69352906F54B08A9CE9B8CA5D724CBFFC5673437F23F630697F3B84117A1431D6FA8CC13A974FB4AD360300522E09511B99E71065D5AC4BBCB1D791E864EF4"},
- {"E7EC507C802528F790AFF5303A017B17",
- "4B97A7A568940A9E3CE7A99E93031E",
- "28349BDC5A09390C480F9B8AA3EDEA3DDB8B9D64BCA322C570B8225DF0E31190DAB25A4014BA39519E02ABFB12B89AA28BBFD29E486E7FB28734258C817B63CED9912DBAFEBB93E2798AB2890DE3B0ACFCFF906AB15563EF7823CE83D27CDB251195E22BD1337BCBDE65E7C2C427321C463C2777BFE5AEAA",
- "9455B3EA706B74",
- "7F33BA3EA848D48A96B9530E26888F43EBD4463C9399B6"},
- {"6C928AA3224736F28EE7378DE0090191",
- "8936138E2E4C6A13280017A1622D",
- "6202717F2631565BDCDC57C6584543E72A7C8BD444D0D108ED35069819633C",
- "DA0691439E5F035F3E455269D14FE5C201C8C9B0A3FE2D3F86BCC59387C868FE65733D388360B31E3CE28B4BF6A8BE636706B536D5720DB66B47CF1C7A5AFD6F61E0EF90F1726D6B0E169F9A768B2B7AE4EE00A17F630AC905FCAAA1B707FFF25B3A1AAE83B504837C64A5639B2A34002B300EC035C9B43654DA55",
- "B8804D182AB0F0EEB464FA7BD1329AD6154F982013F3765FEDFE09E26DAC078C9C1439BFC1159D6C02A25E3FF83EF852570117B315852AD5EE20E0FA3AA0A626B0E43BC0CEA38B44579DD36803455FB46989B90E6D229F513FD727AF8372517E9488384C515D6067704119C931299A0982EDDFB9C2E86A90C450C077EB222511EC9CCABC9FCFDB19F70088"},
- {"ECEA315CA4B3F425B0C9957A17805EA4",
- "664CDAE18403F4F9BA13015A44FC",
- "642AFB090D6C6DB46783F08B01A3EF2A8FEB5736B531EAC226E7888FCC8505F396818F83105065FACB3267485B9E5E4A0261F621041C08FCCB2A809A49AB5252A91D0971BCC620B9D614BD77E57A0EED2FA5",
- "6852C31F8083E20E364CEA21BB7854D67CEE812FE1C9ED2425C0932A90D3780728D1BB",
- "2ECEF962A9695A463ADABB275BDA9FF8B2BA57AEC2F52EFFB700CD9271A74D2A011C24AEA946051BD6291776429B7E681BA33E"},
- {"4EE616C4A58AAA380878F71A373461F6",
- "91B8C9C176D9C385E9C47E52",
- "CDA440B7F9762C572A718AC754EDEECC119E5EE0CCB9FEA4FFB22EEE75087C032EBF3DA9CDD8A28CC010B99ED45143B41A4BA50EA2A005473F89639237838867A57F23B0F0ED3BF22490E4501DAC9C658A9B9F",
- "D6E645FA9AE410D15B8123FD757FA356A8DBE9258DDB5BE88832E615910993F497EC",
- "B70ED7BF959FB2AAED4F36174A2A99BFB16992C8CDF369C782C4DB9C73DE78C5DB8E0615F647243B97ACDB24503BC9CADC48"},
- {"DCD475773136C830D5E3D0C5FE05B7FF",
- "BB8E1FBB483BE7616A922C4A",
- "36FEF2E1CB29E76A6EA663FC3AF66ECD7404F466382F7B040AABED62293302B56E8783EF7EBC21B4A16C3E78A7483A0A403F253A2CDC5BBF79DC3DAE6C73F39A961D8FBBE8D41B",
- "441E886EA38322B2437ECA7DEB5282518865A66780A454E510878E61BFEC3106A3CD93D2A02052E6F9E1832F9791053E3B76BF4C07EFDD6D4106E3027FABB752E60C1AA425416A87D53938163817A1051EBA1D1DEEB4B9B25C7E97368B52E5911A31810B0EC5AF547559B6142D9F4C4A6EF24A4CF75271BF9D48F62B",
- "1BE4DD2F4E25A6512C2CC71D24BBB07368589A94C2714962CD0ACE5605688F06342587521E75F0ACAFFD86212FB5C34327D238DB36CF2B787794B9A4412E7CD1410EA5DDD2450C265F29CF96013CD213FD2880657694D718558964BC189B4A84AFCF47EB012935483052399DBA5B088B0A0477F20DFE0E85DCB735E21F22A439FB837DD365A93116D063E607"},
- {"3FBA2B3D30177FFE15C1C59ED2148BB2C091F5615FBA7C07",
- "FACF804A4BEBF998505FF9DE",
- "8213B9263B2971A5BDA18DBD02208EE1",
- "15B323926993B326EA19F892D704439FC478828322AF72118748284A1FD8A6D814E641F70512FD706980337379F31DC63355974738D7FEA87AD2858C0C2EBBFBE74371C21450072373C7B651B334D7C4D43260B9D7CCD3AF9EDB",
- "6D35DC1469B26E6AAB26272A41B46916397C24C485B61162E640A062D9275BC33DDCFD3D9E1A53B6C8F51AC89B66A41D59B3574197A40D9B6DCF8A4E2A001409C8112F16B9C389E0096179DB914E05D6D11ED0005AD17E1CE105A2F0BAB8F6B1540DEB968B7A5428FF44"},
- {"53B52B8D4D748BCDF1DDE68857832FA46227FA6E2F32EFA1",
- "0B0EF53D4606B28D1398355F",
- "F23882436349094AF98BCACA8218E81581A043B19009E28EFBF2DE37883E04864148CC01D240552CA8844EC1456F42034653067DA67E80F87105FD06E14FF771246C9612867BE4D215F6D761",
- "F15030679BD4088D42CAC9BF2E9606EAD4798782FA3ED8C57EBE7F84A53236F51B25967C6489D0CD20C9EEA752F9BC",
- "67B96E2D67C3729C96DAEAEDF821D61C17E648643A2134C5621FEC621186915AD80864BFD1EB5B238BF526A679385E012A457F583AFA78134242E9D9C1B4E4"},
- {"0272DD80F23399F49BFC320381A5CD8225867245A49A7D41",
- "5C83F4896D0738E1366B1836",
- "69B0337289B19F73A12BAEEA857CCAF396C11113715D9500CCCF48BA08CFF12BC8B4BADB3084E63B85719DB5058FA7C2C11DEB096D7943CFA7CAF5",
- "C01AD10FC8B562CD17C7BC2FAB3E26CBDFF8D7F4DEA816794BBCC12336991712972F52816AABAB244EB43B0137E2BAC1DD413CE79531E78BEF782E6B439612BB3AEF154DE3502784F287958EBC159419F9EBA27916A28D6307324129F506B1DE80C1755A929F87",
- "FEFE52DD7159C8DD6E8EC2D3D3C0F37AB6CB471A75A071D17EC4ACDD8F3AA4D7D4F7BB559F3C09099E3D9003E5E8AA1F556B79CECDE66F85B08FA5955E6976BF2695EA076388A62D2AD5BAB7CBF1A7F3F4C8D5CDF37CDE99BD3E30B685D9E5EEE48C7C89118EF4878EB89747F28271FA2CC45F8E9E7601"},
- {"3EEAED04A455D6E5E5AB53CFD5AFD2F2BC625C7BF4BE49A5",
- "36B88F63ADBB5668588181D774",
- "D367E3CB3703E762D23C6533188EF7028EFF9D935A3977150361997EC9DEAF1E4794BDE26AA8B53C124980B1362EC86FCDDFC7A90073171C1BAEE351A53234B86C66E8AB92FAE99EC6967A6D3428892D80",
- "573454C719A9A55E04437BF7CBAAF27563CCCD92ADD5E515CD63305DFF0687E5EEF790C5DCA5C0033E9AB129505E2775438D92B38F08F3B0356BA142C6F694",
- "E9F79A5B432D9E682C9AAA5661CFC2E49A0FCB81A431E54B42EB73DD3BED3F377FEC556ABA81624BA64A5D739AD41467460088F8D4F442180A9382CA635745473794C382FCDDC49BA4EB6D8A44AE3C"},
- {"B695C691538F8CBD60F039D0E28894E3693CC7C36D92D79D",
- "BC099AEB637361BAC536B57618",
- "BFFF1A65AE38D1DC142C71637319F5F6508E2CB33C9DCB94202B359ED5A5ED8042E7F4F09231D32A7242976677E6F4C549BF65FADC99E5AF43F7A46FD95E16C2",
- "081DF3FD85B415D803F0BE5AC58CFF0023FDDED99788296C3731D8",
- "E50C64E3614D94FE69C47092E46ACC9957C6FEA2CCBF96BC62FBABE7424753C75F9C147C42AE26FE171531"},
- {"C9ACBD2718F0689A1BE9802A551B6B8D9CF5614DAF5E65ED",
- "B1B0AAF373B8B026EB80422051D8",
- "6648C0E61AC733C76119D23FB24548D637751387AA2EAE9D80E912B7BD486CAAD9EAF4D7A5FE2B54AAD481E8EC94BB4D558000896E2010462B70C9FED1E7273080D1",
- "189F591F6CB6D59AFEDD14C341741A8F1037DC0DF00FC57CE65C30F49E860255CEA5DC6019380CC0FE8880BC1A9E685F41C239C38F36E3F2A1388865C5C311059C0A",
- "922A5E949B61D03BE34AB5F4E58607D4504EA14017BB363DAE3C873059EA7A1C77A746FB78981671D26C2CF6D9F24952D510044CE02A10177E9DB42D0145211DFE6E84369C5E3BC2669EAB4147B2822895F9"},
- {"7A832BD2CF5BF4919F353CE2A8C86A5E406DA2D52BE16A72",
- "2F2F17CECF7E5A756D10785A3CB9DB",
- "61DA05E3788CC2D8405DBA70C7A28E5AF699863C9F72E6C6770126929F5D6FA267F005EBCF49495CB46400958A3AE80D1289D1C671",
- "44E91121195A41AF14E8CFDBD39A4B517BE0DF1A72977ED8A3EEF8EEDA1166B2EB6DB2C4AE2E74FA0F0C74537F659BFBD141E5DDEC67E64EDA85AABD3F52C85A785B9FB3CECD70E7DF",
- "BEDF596EA21288D2B84901E188F6EE1468B14D5161D3802DBFE00D60203A24E2AB62714BF272A45551489838C3A7FEAADC177B591836E73684867CCF4E12901DCF2064058726BBA554E84ADC5136F507E961188D4AF06943D3"},
- {"1508E8AE9079AA15F1CEC4F776B4D11BCCB061B58AA56C18",
- "BCA625674F41D1E3AB47672DC0C3",
- "8B12CF84F16360F0EAD2A41BC021530FFCEC7F3579CAE658E10E2D3D81870F65AFCED0C77C6C4C6E6BA424FF23088C796BA6195ABA35094BF1829E089662E7A95FC90750AE16D0C8AFA55DAC789D7735B970B58D4BE7CEC7341DA82A0179A01929C27A59C5063215B859EA43",
- "E525422519ECE070E82C",
- "B47BC07C3ED1C0A43BA52C43CBACBCDBB29CAF1001E09FDF7107"},
- {"7550C2761644E911FE9ADD119BAC07376BEA442845FEAD876D7E7AC1B713E464",
- "36D2EC25ADD33CDEDF495205BBC923",
- "7FCFE81A3790DE97FFC3DE160C470847EA7E841177C2F759571CBD837EA004A6CA8C6F4AEBFF2E9FD552D73EB8A30705D58D70C0B67AEEA280CBBF0A477358ACEF1E7508F2735CD9A0E4F9AC92B8C008F575D3B6278F1C18BD01227E3502E5255F3AB1893632AD00C717C588EF652A51A43209E7EE90",
- "2B1A62F8FDFAA3C16470A21AD307C9A7D03ADE8EF72C69B06F8D738CDE578D7AEFD0D40BD9C022FB9F580DF5394C998ACCCEFC5471A3996FB8F1045A81FDC6F32D13502EA65A211390C8D882B8E0BEFD8DD8CBEF51D1597B124E9F7F",
- "C873E02A22DB89EB0787DB6A60B99F7E4A0A085D5C4232A81ADCE2D60AA36F92DDC33F93DD8640AC0E08416B187FB382B3EC3EE85A64B0E6EE41C1366A5AD2A282F66605E87031CCBA2FA7B2DA201D975994AADE3DD1EE122AE09604AD489B84BF0C1AB7129EE16C6934850E"},
- {"A51300285E554FDBDE7F771A9A9A80955639DD87129FAEF74987C91FB9687C71",
- "81691D5D20EC818FCFF24B33DECC",
- "C948093218AA9EB2A8E44A87EEA73FC8B6B75A196819A14BD83709EA323E8DF8B491045220E1D88729A38DBCFFB60D3056DAD4564498FD6574F74512945DEB34B69329ACED9FFC05D5D59DFCD5B973E2ACAFE6AD1EF8BBBC49351A2DD12508ED89ED",
- "EB861165DAF7625F827C6B574ED703F03215",
- "C6CD1CE76D2B3679C1B5AA1CFD67CCB55444B6BFD3E22C81CBC9BB738796B83E54E3"},
- {"8CE0156D26FAEB7E0B9B800BBB2E9D4075B5EAC5C62358B0E7F6FCE610223282",
- "D2A7B94DD12CDACA909D3AD7",
- "E021A78F374FC271389AB9A3E97077D755",
- "7C26000B58929F5095E1CEE154F76C2A299248E299F9B5ADE6C403AA1FD4A67FD4E0232F214CE7B919EE7A1027D2B76C57475715CD078461",
- "C556FB38DF069B56F337B5FF5775CE6EAA16824DFA754F20B78819028EA635C3BB7AA731DE8776B2DCB67DCA2D33EEDF3C7E52EA450013722A41755A0752433ED17BDD5991AAE77A"},
- {"1E8000A2CE00A561C9920A30BF0D7B983FEF8A1014C8F04C35CA6970E6BA02BD",
- "65ED3D63F79F90BBFD19775E",
- "336A8C0B7243582A46B221AA677647FCAE91",
- "134A8B34824A290E7B",
- "914FBEF80D0E6E17F8BDBB6097EBF5FBB0554952DC2B9E5151"},
- {"53D5607BBE690B6E8D8F6D97F3DF2BA853B682597A214B8AA0EA6E598650AF15",
- "C391A856B9FE234E14BA1AC7BB40FF",
- "479682BC21349C4BE1641D5E78FE2C79EC1B9CF5470936DCAD9967A4DCD7C4EFADA593BC9EDE71E6A08829B8580901B61E274227E9D918502DE3",
- "EAD154DC09C5E26C5D26FF33ED148B27120C7F2C23225CC0D0631B03E1F6C6D96FEB88C1A4052ACB4CE746B884B6502931F407021126C6AAB8C514C077A5A38438AE88EE",
- "938821286EBB671D999B87C032E1D6055392EB564E57970D55E545FC5E8BAB90E6E3E3C0913F6320995FC636D72CD9919657CC38BD51552F4A502D8D1FE56DB33EBAC5092630E69EBB986F0E15CEE9FC8C052501"},
- {"294362FCC984F440CEA3E9F7D2C06AF20C53AAC1B3738CA2186C914A6E193ABB",
- "B15B61C8BB39261A8F55AB178EC3",
- "D0729B6B75BB",
- "2BD089ADCE9F334BAE3B065996C7D616DD0C27DF4218DCEEA0FBCA0F968837CE26B0876083327E25681FDDD620A32EC0DA12F73FAE826CC94BFF2B90A54D2651",
- "AC94B25E4E21DE2437B806966CCD5D9385EF0CD4A51AB9FA6DE675C7B8952D67802E9FEC1FDE9F5D1EAB06057498BC0EEA454804FC9D2068982A3E24182D9AC2E7AB9994DDC899A604264583F63D066B"},
- {"959DBFEB039B1A5B8CE6A44649B602AAA5F98A906DB96143D202CD2024F749D9",
- "01D7BDB1133E9C347486C1EFA6",
- "F3843955BD741F379DD750585EDC55E2CDA05CCBA8C1F4622AC2FE35214BC3A019B8BD12C4CC42D9213D1E1556941E8D8450830287FFB3B763A13722DD4140ED9846FB5FFF745D7B0B967D810A068222E10B259AF1D392035B0D83DC1498A6830B11B2418A840212599171E0258A1C203B05362978",
- "A21811232C950FA8B12237C2EBD6A7CD2C3A155905E9E0C7C120",
- "63C1CE397B22F1A03F1FA549B43178BC405B152D3C95E977426D519B3DFCA28498823240592B6EEE7A14"},
- {"096AE499F5294173F34FF2B375F0E5D5AB79D0D03B33B1A74D7D576826345DF4",
- "0C52B3D11D636E5910A4DD76D32C",
- "229E9ECA3053789E937447BC719467075B6138A142DA528DA8F0CF8DDF022FD9AF8E74779BA3AC306609",
- "8B7A00038783E8BAF6EDEAE0C4EAB48FC8FD501A588C7E4A4DB71E3604F2155A97687D3D2FFF8569261375A513CF4398CE0F87CA1658A1050F6EF6C4EA3E25",
- "C20B6CF8D3C8241825FD90B2EDAC7593600646E579A8D8DAAE9E2E40C3835FE801B2BE4379131452BC5182C90307B176DFBE2049544222FE7783147B690774F6D9D7CEF52A91E61E298E9AA15464AC"},
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go
deleted file mode 100644
index 330309ff..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package ocb
-
-import (
- "encoding/hex"
-)
-
-// Test vectors from https://tools.ietf.org/html/rfc7253. Note that key is
-// shared across tests.
-var testKey, _ = hex.DecodeString("000102030405060708090A0B0C0D0E0F")
-
-var rfc7253testVectors = []struct {
- nonce, header, plaintext, ciphertext string
-}{
- {"BBAA99887766554433221100",
- "",
- "",
- "785407BFFFC8AD9EDCC5520AC9111EE6"},
- {"BBAA99887766554433221101",
- "0001020304050607",
- "0001020304050607",
- "6820B3657B6F615A5725BDA0D3B4EB3A257C9AF1F8F03009"},
- {"BBAA99887766554433221102",
- "0001020304050607",
- "",
- "81017F8203F081277152FADE694A0A00"},
- {"BBAA99887766554433221103",
- "",
- "0001020304050607",
- "45DD69F8F5AAE72414054CD1F35D82760B2CD00D2F99BFA9"},
- {"BBAA99887766554433221104",
- "000102030405060708090A0B0C0D0E0F",
- "000102030405060708090A0B0C0D0E0F",
- "571D535B60B277188BE5147170A9A22C3AD7A4FF3835B8C5701C1CCEC8FC3358"},
- {"BBAA99887766554433221105",
- "000102030405060708090A0B0C0D0E0F",
- "",
- "8CF761B6902EF764462AD86498CA6B97"},
- {"BBAA99887766554433221106",
- "",
- "000102030405060708090A0B0C0D0E0F",
- "5CE88EC2E0692706A915C00AEB8B2396F40E1C743F52436BDF06D8FA1ECA343D"},
- {"BBAA99887766554433221107",
- "000102030405060708090A0B0C0D0E0F1011121314151617",
- "000102030405060708090A0B0C0D0E0F1011121314151617",
- "1CA2207308C87C010756104D8840CE1952F09673A448A122C92C62241051F57356D7F3C90BB0E07F"},
- {"BBAA99887766554433221108",
- "000102030405060708090A0B0C0D0E0F1011121314151617",
- "",
- "6DC225A071FC1B9F7C69F93B0F1E10DE"},
- {"BBAA99887766554433221109",
- "",
- "000102030405060708090A0B0C0D0E0F1011121314151617",
- "221BD0DE7FA6FE993ECCD769460A0AF2D6CDED0C395B1C3CE725F32494B9F914D85C0B1EB38357FF"},
- {"BBAA9988776655443322110A",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F",
- "BD6F6C496201C69296C11EFD138A467ABD3C707924B964DEAFFC40319AF5A48540FBBA186C5553C68AD9F592A79A4240"},
- {"BBAA9988776655443322110B",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F",
- "",
- "FE80690BEE8A485D11F32965BC9D2A32"},
- {"BBAA9988776655443322110C",
- "",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F",
- "2942BFC773BDA23CABC6ACFD9BFD5835BD300F0973792EF46040C53F1432BCDFB5E1DDE3BC18A5F840B52E653444D5DF"},
- {"BBAA9988776655443322110D",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627",
- "D5CA91748410C1751FF8A2F618255B68A0A12E093FF454606E59F9C1D0DDC54B65E8628E568BAD7AED07BA06A4A69483A7035490C5769E60"},
- {"BBAA9988776655443322110E",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627",
- "",
- "C5CD9D1850C141E358649994EE701B68"},
- {"BBAA9988776655443322110F",
- "",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627",
- "4412923493C57D5DE0D700F753CCE0D1D2D95060122E9F15A5DDBFC5787E50B5CC55EE507BCB084E479AD363AC366B95A98CA5F3000B1479"},
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go
deleted file mode 100644
index 14a3c336..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package ocb
-
-// Second set of test vectors from https://tools.ietf.org/html/rfc7253
-var rfc7253TestVectorTaglen96 = struct {
- key, nonce, header, plaintext, ciphertext string
-}{"0F0E0D0C0B0A09080706050403020100",
- "BBAA9988776655443322110D",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627",
- "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627",
- "1792A4E31E0755FB03E31B22116E6C2DDF9EFD6E33D536F1A0124B0A55BAE884ED93481529C76B6AD0C515F4D1CDD4FDAC4F02AA"}
-
-var rfc7253AlgorithmTest = []struct {
- KEYLEN, TAGLEN int
- OUTPUT string
-}{
- {128, 128, "67E944D23256C5E0B6C61FA22FDF1EA2"},
- {192, 128, "F673F2C3E7174AAE7BAE986CA9F29E17"},
- {256, 128, "D90EB8E9C977C88B79DD793D7FFA161C"},
- {128, 96, "77A3D8E73589158D25D01209"},
- {192, 96, "05D56EAD2752C86BE6932C5E"},
- {256, 96, "5458359AC23B0CBA9E6330DD"},
- {128, 64, "192C9B7BD90BA06A"},
- {192, 64, "0066BC6E0EF34E24"},
- {256, 64, "7D4EA5D445501CBE"},
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go
deleted file mode 100644
index 3c6251d1..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2014 Matthew Endsley
-// All rights reserved
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted providing that the following conditions
-// are met:
-// 1. Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// 2. Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
-// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
-// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-
-// Package keywrap is an implementation of the RFC 3394 AES key wrapping
-// algorithm. This is used in OpenPGP with elliptic curve keys.
-package keywrap
-
-import (
- "crypto/aes"
- "encoding/binary"
- "errors"
-)
-
-var (
- // ErrWrapPlaintext is returned if the plaintext is not a multiple
- // of 64 bits.
- ErrWrapPlaintext = errors.New("keywrap: plainText must be a multiple of 64 bits")
-
- // ErrUnwrapCiphertext is returned if the ciphertext is not a
- // multiple of 64 bits.
- ErrUnwrapCiphertext = errors.New("keywrap: cipherText must by a multiple of 64 bits")
-
- // ErrUnwrapFailed is returned if unwrapping a key fails.
- ErrUnwrapFailed = errors.New("keywrap: failed to unwrap key")
-
- // NB: the AES NewCipher call only fails if the key is an invalid length.
-
- // ErrInvalidKey is returned when the AES key is invalid.
- ErrInvalidKey = errors.New("keywrap: invalid AES key")
-)
-
-// Wrap a key using the RFC 3394 AES Key Wrap Algorithm.
-func Wrap(key, plainText []byte) ([]byte, error) {
- if len(plainText)%8 != 0 {
- return nil, ErrWrapPlaintext
- }
-
- c, err := aes.NewCipher(key)
- if err != nil {
- return nil, ErrInvalidKey
- }
-
- nblocks := len(plainText) / 8
-
- // 1) Initialize variables.
- var block [aes.BlockSize]byte
- // - Set A = IV, an initial value (see 2.2.3)
- for ii := 0; ii < 8; ii++ {
- block[ii] = 0xA6
- }
-
- // - For i = 1 to n
- // - Set R[i] = P[i]
- intermediate := make([]byte, len(plainText))
- copy(intermediate, plainText)
-
- // 2) Calculate intermediate values.
- for ii := 0; ii < 6; ii++ {
- for jj := 0; jj < nblocks; jj++ {
- // - B = AES(K, A | R[i])
- copy(block[8:], intermediate[jj*8:jj*8+8])
- c.Encrypt(block[:], block[:])
-
- // - A = MSB(64, B) ^ t where t = (n*j)+1
- t := uint64(ii*nblocks + jj + 1)
- val := binary.BigEndian.Uint64(block[:8]) ^ t
- binary.BigEndian.PutUint64(block[:8], val)
-
- // - R[i] = LSB(64, B)
- copy(intermediate[jj*8:jj*8+8], block[8:])
- }
- }
-
- // 3) Output results.
- // - Set C[0] = A
- // - For i = 1 to n
- // - C[i] = R[i]
- return append(block[:8], intermediate...), nil
-}
-
-// Unwrap a key using the RFC 3394 AES Key Wrap Algorithm.
-func Unwrap(key, cipherText []byte) ([]byte, error) {
- if len(cipherText)%8 != 0 {
- return nil, ErrUnwrapCiphertext
- }
-
- c, err := aes.NewCipher(key)
- if err != nil {
- return nil, ErrInvalidKey
- }
-
- nblocks := len(cipherText)/8 - 1
-
- // 1) Initialize variables.
- var block [aes.BlockSize]byte
- // - Set A = C[0]
- copy(block[:8], cipherText[:8])
-
- // - For i = 1 to n
- // - Set R[i] = C[i]
- intermediate := make([]byte, len(cipherText)-8)
- copy(intermediate, cipherText[8:])
-
- // 2) Compute intermediate values.
- for jj := 5; jj >= 0; jj-- {
- for ii := nblocks - 1; ii >= 0; ii-- {
- // - B = AES-1(K, (A ^ t) | R[i]) where t = n*j+1
- // - A = MSB(64, B)
- t := uint64(jj*nblocks + ii + 1)
- val := binary.BigEndian.Uint64(block[:8]) ^ t
- binary.BigEndian.PutUint64(block[:8], val)
-
- copy(block[8:], intermediate[ii*8:ii*8+8])
- c.Decrypt(block[:], block[:])
-
- // - R[i] = LSB(B, 64)
- copy(intermediate[ii*8:ii*8+8], block[8:])
- }
- }
-
- // 3) Output results.
- // - If A is an appropriate initial value (see 2.2.3),
- for ii := 0; ii < 8; ii++ {
- if block[ii] != 0xA6 {
- return nil, ErrUnwrapFailed
- }
- }
-
- // - For i = 1 to n
- // - P[i] = R[i]
- return intermediate, nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go
deleted file mode 100644
index e0a677f2..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is
-// very similar to PEM except that it has an additional CRC checksum.
-package armor // import "github.com/ProtonMail/go-crypto/openpgp/armor"
-
-import (
- "bufio"
- "bytes"
- "encoding/base64"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-// A Block represents an OpenPGP armored structure.
-//
-// The encoded form is:
-//
-// -----BEGIN Type-----
-// Headers
-//
-// base64-encoded Bytes
-// '=' base64 encoded checksum (optional) not checked anymore
-// -----END Type-----
-//
-// where Headers is a possibly empty sequence of Key: Value lines.
-//
-// Since the armored data can be very large, this package presents a streaming
-// interface.
-type Block struct {
- Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE").
- Header map[string]string // Optional headers.
- Body io.Reader // A Reader from which the contents can be read
- lReader lineReader
- oReader openpgpReader
-}
-
-var ArmorCorrupt error = errors.StructuralError("armor invalid")
-
-var armorStart = []byte("-----BEGIN ")
-var armorEnd = []byte("-----END ")
-var armorEndOfLine = []byte("-----")
-
-// lineReader wraps a line based reader. It watches for the end of an armor block
-type lineReader struct {
- in *bufio.Reader
- buf []byte
- eof bool
-}
-
-func (l *lineReader) Read(p []byte) (n int, err error) {
- if l.eof {
- return 0, io.EOF
- }
-
- if len(l.buf) > 0 {
- n = copy(p, l.buf)
- l.buf = l.buf[n:]
- return
- }
-
- line, isPrefix, err := l.in.ReadLine()
- if err != nil {
- return
- }
- if isPrefix {
- return 0, ArmorCorrupt
- }
-
- if bytes.HasPrefix(line, armorEnd) {
- l.eof = true
- return 0, io.EOF
- }
-
- if len(line) == 5 && line[0] == '=' {
- // This is the checksum line
- // Don't check the checksum
-
- l.eof = true
- return 0, io.EOF
- }
-
- if len(line) > 96 {
- return 0, ArmorCorrupt
- }
-
- n = copy(p, line)
- bytesToSave := len(line) - n
- if bytesToSave > 0 {
- if cap(l.buf) < bytesToSave {
- l.buf = make([]byte, 0, bytesToSave)
- }
- l.buf = l.buf[0:bytesToSave]
- copy(l.buf, line[n:])
- }
-
- return
-}
-
-// openpgpReader passes Read calls to the underlying base64 decoder.
-type openpgpReader struct {
- lReader *lineReader
- b64Reader io.Reader
-}
-
-func (r *openpgpReader) Read(p []byte) (n int, err error) {
- n, err = r.b64Reader.Read(p)
- return
-}
-
-// Decode reads a PGP armored block from the given Reader. It will ignore
-// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The
-// given Reader is not usable after calling this function: an arbitrary amount
-// of data may have been read past the end of the block.
-func Decode(in io.Reader) (p *Block, err error) {
- r := bufio.NewReaderSize(in, 100)
- var line []byte
- ignoreNext := false
-
-TryNextBlock:
- p = nil
-
- // Skip leading garbage
- for {
- ignoreThis := ignoreNext
- line, ignoreNext, err = r.ReadLine()
- if err != nil {
- return
- }
- if ignoreNext || ignoreThis {
- continue
- }
- line = bytes.TrimSpace(line)
- if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) {
- break
- }
- }
-
- p = new(Block)
- p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)])
- p.Header = make(map[string]string)
- nextIsContinuation := false
- var lastKey string
-
- // Read headers
- for {
- isContinuation := nextIsContinuation
- line, nextIsContinuation, err = r.ReadLine()
- if err != nil {
- p = nil
- return
- }
- if isContinuation {
- p.Header[lastKey] += string(line)
- continue
- }
- line = bytes.TrimSpace(line)
- if len(line) == 0 {
- break
- }
-
- i := bytes.Index(line, []byte(":"))
- if i == -1 {
- goto TryNextBlock
- }
- lastKey = string(line[:i])
- var value string
- if len(line) > i+2 {
- value = string(line[i+2:])
- }
- p.Header[lastKey] = value
- }
-
- p.lReader.in = r
- p.oReader.lReader = &p.lReader
- p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader)
- p.Body = &p.oReader
-
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go
deleted file mode 100644
index 112f98b8..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package armor
-
-import (
- "encoding/base64"
- "io"
-)
-
-var armorHeaderSep = []byte(": ")
-var blockEnd = []byte("\n=")
-var newline = []byte("\n")
-var armorEndOfLineOut = []byte("-----\n")
-
-const crc24Init = 0xb704ce
-const crc24Poly = 0x1864cfb
-
-// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1
-func crc24(crc uint32, d []byte) uint32 {
- for _, b := range d {
- crc ^= uint32(b) << 16
- for i := 0; i < 8; i++ {
- crc <<= 1
- if crc&0x1000000 != 0 {
- crc ^= crc24Poly
- }
- }
- }
- return crc
-}
-
-// writeSlices writes its arguments to the given Writer.
-func writeSlices(out io.Writer, slices ...[]byte) (err error) {
- for _, s := range slices {
- _, err = out.Write(s)
- if err != nil {
- return err
- }
- }
- return
-}
-
-// lineBreaker breaks data across several lines, all of the same byte length
-// (except possibly the last). Lines are broken with a single '\n'.
-type lineBreaker struct {
- lineLength int
- line []byte
- used int
- out io.Writer
- haveWritten bool
-}
-
-func newLineBreaker(out io.Writer, lineLength int) *lineBreaker {
- return &lineBreaker{
- lineLength: lineLength,
- line: make([]byte, lineLength),
- used: 0,
- out: out,
- }
-}
-
-func (l *lineBreaker) Write(b []byte) (n int, err error) {
- n = len(b)
-
- if n == 0 {
- return
- }
-
- if l.used == 0 && l.haveWritten {
- _, err = l.out.Write([]byte{'\n'})
- if err != nil {
- return
- }
- }
-
- if l.used+len(b) < l.lineLength {
- l.used += copy(l.line[l.used:], b)
- return
- }
-
- l.haveWritten = true
- _, err = l.out.Write(l.line[0:l.used])
- if err != nil {
- return
- }
- excess := l.lineLength - l.used
- l.used = 0
-
- _, err = l.out.Write(b[0:excess])
- if err != nil {
- return
- }
-
- _, err = l.Write(b[excess:])
- return
-}
-
-func (l *lineBreaker) Close() (err error) {
- if l.used > 0 {
- _, err = l.out.Write(l.line[0:l.used])
- if err != nil {
- return
- }
- }
-
- return
-}
-
-// encoding keeps track of a running CRC24 over the data which has been written
-// to it and outputs a OpenPGP checksum when closed, followed by an armor
-// trailer.
-//
-// It's built into a stack of io.Writers:
-//
-// encoding -> base64 encoder -> lineBreaker -> out
-type encoding struct {
- out io.Writer
- breaker *lineBreaker
- b64 io.WriteCloser
- crc uint32
- crcEnabled bool
- blockType []byte
-}
-
-func (e *encoding) Write(data []byte) (n int, err error) {
- if e.crcEnabled {
- e.crc = crc24(e.crc, data)
- }
- return e.b64.Write(data)
-}
-
-func (e *encoding) Close() (err error) {
- err = e.b64.Close()
- if err != nil {
- return
- }
- e.breaker.Close()
-
- if e.crcEnabled {
- var checksumBytes [3]byte
- checksumBytes[0] = byte(e.crc >> 16)
- checksumBytes[1] = byte(e.crc >> 8)
- checksumBytes[2] = byte(e.crc)
-
- var b64ChecksumBytes [4]byte
- base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:])
-
- return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine)
- }
- return writeSlices(e.out, newline, armorEnd, e.blockType, armorEndOfLine)
-}
-
-func encode(out io.Writer, blockType string, headers map[string]string, checksum bool) (w io.WriteCloser, err error) {
- bType := []byte(blockType)
- err = writeSlices(out, armorStart, bType, armorEndOfLineOut)
- if err != nil {
- return
- }
-
- for k, v := range headers {
- err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline)
- if err != nil {
- return
- }
- }
-
- _, err = out.Write(newline)
- if err != nil {
- return
- }
-
- e := &encoding{
- out: out,
- breaker: newLineBreaker(out, 64),
- blockType: bType,
- crc: crc24Init,
- crcEnabled: checksum,
- }
- e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker)
- return e, nil
-}
-
-// Encode returns a WriteCloser which will encode the data written to it in
-// OpenPGP armor.
-func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) {
- return encode(out, blockType, headers, true)
-}
-
-// EncodeWithChecksumOption returns a WriteCloser which will encode the data written to it in
-// OpenPGP armor and provides the option to include a checksum.
-// When forming ASCII Armor, the CRC24 footer SHOULD NOT be generated,
-// unless interoperability with implementations that require the CRC24 footer
-// to be present is a concern.
-func EncodeWithChecksumOption(out io.Writer, blockType string, headers map[string]string, doChecksum bool) (w io.WriteCloser, err error) {
- return encode(out, blockType, headers, doChecksum)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go
deleted file mode 100644
index 5b40e137..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package openpgp
-
-import (
- "hash"
- "io"
-)
-
-// NewCanonicalTextHash reformats text written to it into the canonical
-// form and then applies the hash h. See RFC 4880, section 5.2.1.
-func NewCanonicalTextHash(h hash.Hash) hash.Hash {
- return &canonicalTextHash{h, 0}
-}
-
-type canonicalTextHash struct {
- h hash.Hash
- s int
-}
-
-var newline = []byte{'\r', '\n'}
-
-func writeCanonical(cw io.Writer, buf []byte, s *int) (int, error) {
- start := 0
- for i, c := range buf {
- switch *s {
- case 0:
- if c == '\r' {
- *s = 1
- } else if c == '\n' {
- if _, err := cw.Write(buf[start:i]); err != nil {
- return 0, err
- }
- if _, err := cw.Write(newline); err != nil {
- return 0, err
- }
- start = i + 1
- }
- case 1:
- *s = 0
- }
- }
-
- if _, err := cw.Write(buf[start:]); err != nil {
- return 0, err
- }
- return len(buf), nil
-}
-
-func (cth *canonicalTextHash) Write(buf []byte) (int, error) {
- return writeCanonical(cth.h, buf, &cth.s)
-}
-
-func (cth *canonicalTextHash) Sum(in []byte) []byte {
- return cth.h.Sum(in)
-}
-
-func (cth *canonicalTextHash) Reset() {
- cth.h.Reset()
- cth.s = 0
-}
-
-func (cth *canonicalTextHash) Size() int {
- return cth.h.Size()
-}
-
-func (cth *canonicalTextHash) BlockSize() int {
- return cth.h.BlockSize()
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go
deleted file mode 100644
index 1cc7e67e..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go
+++ /dev/null
@@ -1,285 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ecdh implements ECDH encryption, suitable for OpenPGP,
-// as specified in RFC 6637, section 8.
-package ecdh
-
-import (
- "bytes"
- "errors"
- "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap"
- pgperrors "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
- "github.com/ProtonMail/go-crypto/openpgp/internal/ecc"
- "github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519"
- "io"
-)
-
-const (
- KDFVersion1 = 1
- KDFVersionForwarding = 255
-)
-
-type KDF struct {
- Version int // Defaults to v1; 255 for forwarding
- Hash algorithm.Hash
- Cipher algorithm.Cipher
- ReplacementFingerprint []byte // (forwarding only) fingerprint to use instead of recipient's (20 octets)
-}
-
-func (kdf *KDF) Serialize(w io.Writer) (err error) {
- switch kdf.Version {
- case 0, KDFVersion1: // Default to v1 if unspecified
- return kdf.serializeForHash(w)
- case KDFVersionForwarding:
- // Length || Version || Hash || Cipher || Replacement Fingerprint
- length := byte(3 + len(kdf.ReplacementFingerprint))
- if _, err := w.Write([]byte{length, KDFVersionForwarding, kdf.Hash.Id(), kdf.Cipher.Id()}); err != nil {
- return err
- }
- if _, err := w.Write(kdf.ReplacementFingerprint); err != nil {
- return err
- }
-
- return nil
- default:
- return errors.New("ecdh: invalid KDF version")
- }
-}
-
-func (kdf *KDF) serializeForHash(w io.Writer) (err error) {
- // Length || Version || Hash || Cipher
- if _, err := w.Write([]byte{3, KDFVersion1, kdf.Hash.Id(), kdf.Cipher.Id()}); err != nil {
- return err
- }
- return nil
-}
-
-type PublicKey struct {
- curve ecc.ECDHCurve
- Point []byte
- KDF
-}
-
-type PrivateKey struct {
- PublicKey
- D []byte
-}
-
-func NewPublicKey(curve ecc.ECDHCurve, kdf KDF) *PublicKey {
- return &PublicKey{
- curve: curve,
- KDF: kdf,
- }
-}
-
-func NewPrivateKey(key PublicKey) *PrivateKey {
- return &PrivateKey{
- PublicKey: key,
- }
-}
-
-func (pk *PublicKey) GetCurve() ecc.ECDHCurve {
- return pk.curve
-}
-
-func (pk *PublicKey) MarshalPoint() []byte {
- return pk.curve.MarshalBytePoint(pk.Point)
-}
-
-func (pk *PublicKey) UnmarshalPoint(p []byte) error {
- pk.Point = pk.curve.UnmarshalBytePoint(p)
- if pk.Point == nil {
- return errors.New("ecdh: failed to parse EC point")
- }
- return nil
-}
-
-func (sk *PrivateKey) MarshalByteSecret() []byte {
- return sk.curve.MarshalByteSecret(sk.D)
-}
-
-func (sk *PrivateKey) UnmarshalByteSecret(d []byte) error {
- sk.D = sk.curve.UnmarshalByteSecret(d)
-
- if sk.D == nil {
- return errors.New("ecdh: failed to parse scalar")
- }
- return nil
-}
-
-func GenerateKey(rand io.Reader, c ecc.ECDHCurve, kdf KDF) (priv *PrivateKey, err error) {
- priv = new(PrivateKey)
- priv.PublicKey.curve = c
- priv.PublicKey.KDF = kdf
- priv.PublicKey.Point, priv.D, err = c.GenerateECDH(rand)
- return
-}
-
-func Encrypt(random io.Reader, pub *PublicKey, msg, curveOID, fingerprint []byte) (vsG, c []byte, err error) {
- if len(msg) > 40 {
- return nil, nil, errors.New("ecdh: message too long")
- }
- // the sender MAY use 21, 13, and 5 bytes of padding for AES-128,
- // AES-192, and AES-256, respectively, to provide the same number of
- // octets, 40 total, as an input to the key wrapping method.
- padding := make([]byte, 40-len(msg))
- for i := range padding {
- padding[i] = byte(40 - len(msg))
- }
- m := append(msg, padding...)
-
- ephemeral, zb, err := pub.curve.Encaps(random, pub.Point)
- if err != nil {
- return nil, nil, err
- }
-
- vsG = pub.curve.MarshalBytePoint(ephemeral)
-
- z, err := buildKey(pub, zb, curveOID, fingerprint, false, false)
- if err != nil {
- return nil, nil, err
- }
-
- if c, err = keywrap.Wrap(z, m); err != nil {
- return nil, nil, err
- }
-
- return vsG, c, nil
-
-}
-
-func Decrypt(priv *PrivateKey, vsG, c, curveOID, fingerprint []byte) (msg []byte, err error) {
- var m []byte
- zb, err := priv.PublicKey.curve.Decaps(priv.curve.UnmarshalBytePoint(vsG), priv.D)
-
- // Try buildKey three times to workaround an old bug, see comments in buildKey.
- for i := 0; i < 3; i++ {
- var z []byte
- // RFC6637 §8: "Compute Z = KDF( S, Z_len, Param );"
- z, err = buildKey(&priv.PublicKey, zb, curveOID, fingerprint, i == 1, i == 2)
- if err != nil {
- return nil, err
- }
-
- // RFC6637 §8: "Compute C = AESKeyWrap( Z, c ) as per [RFC3394]"
- m, err = keywrap.Unwrap(z, c)
- if err == nil {
- break
- }
- }
-
- // Only return an error after we've tried all (required) variants of buildKey.
- if err != nil {
- return nil, err
- }
-
- // RFC6637 §8: "m = symm_alg_ID || session key || checksum || pkcs5_padding"
- // The last byte should be the length of the padding, as per PKCS5; strip it off.
- return m[:len(m)-int(m[len(m)-1])], nil
-}
-
-func buildKey(pub *PublicKey, zb []byte, curveOID, fingerprint []byte, stripLeading, stripTrailing bool) ([]byte, error) {
- // Param = curve_OID_len || curve_OID || public_key_alg_ID
- // || KDF_params for AESKeyWrap
- // || "Anonymous Sender " || recipient_fingerprint;
- param := new(bytes.Buffer)
- if _, err := param.Write(curveOID); err != nil {
- return nil, err
- }
- algo := []byte{18}
- if _, err := param.Write(algo); err != nil {
- return nil, err
- }
-
- if err := pub.KDF.serializeForHash(param); err != nil {
- return nil, err
- }
-
- if _, err := param.Write([]byte("Anonymous Sender ")); err != nil {
- return nil, err
- }
- if pub.KDF.ReplacementFingerprint != nil {
- fingerprint = pub.KDF.ReplacementFingerprint
- }
-
- if _, err := param.Write(fingerprint); err != nil {
- return nil, err
- }
-
- // MB = Hash ( 00 || 00 || 00 || 01 || ZB || Param );
- h := pub.KDF.Hash.New()
- if _, err := h.Write([]byte{0x0, 0x0, 0x0, 0x1}); err != nil {
- return nil, err
- }
- zbLen := len(zb)
- i := 0
- j := zbLen - 1
- if stripLeading {
- // Work around old go crypto bug where the leading zeros are missing.
- for i < zbLen && zb[i] == 0 {
- i++
- }
- }
- if stripTrailing {
- // Work around old OpenPGP.js bug where insignificant trailing zeros in
- // this little-endian number are missing.
- // (See https://github.com/openpgpjs/openpgpjs/pull/853.)
- for j >= 0 && zb[j] == 0 {
- j--
- }
- }
- if _, err := h.Write(zb[i : j+1]); err != nil {
- return nil, err
- }
- if _, err := h.Write(param.Bytes()); err != nil {
- return nil, err
- }
- mb := h.Sum(nil)
-
- return mb[:pub.KDF.Cipher.KeySize()], nil // return oBits leftmost bits of MB.
-
-}
-
-func Validate(priv *PrivateKey) error {
- return priv.curve.ValidateECDH(priv.Point, priv.D)
-}
-
-func DeriveProxyParam(recipientKey, forwardeeKey *PrivateKey) (proxyParam []byte, err error) {
- if recipientKey.GetCurve().GetCurveName() != "curve25519" {
- return nil, pgperrors.InvalidArgumentError("recipient subkey is not curve25519")
- }
-
- if forwardeeKey.GetCurve().GetCurveName() != "curve25519" {
- return nil, pgperrors.InvalidArgumentError("forwardee subkey is not curve25519")
- }
-
- c := ecc.NewCurve25519()
-
- // Clamp and reverse two secrets
- proxyParam, err = curve25519.DeriveProxyParam(c.MarshalByteSecret(recipientKey.D), c.MarshalByteSecret(forwardeeKey.D))
-
- return proxyParam, err
-}
-
-func ProxyTransform(ephemeral, proxyParam []byte) ([]byte, error) {
- c := ecc.NewCurve25519()
-
- parsedEphemeral := c.UnmarshalBytePoint(ephemeral)
- if parsedEphemeral == nil {
- return nil, pgperrors.InvalidArgumentError("invalid ephemeral")
- }
-
- if len(proxyParam) != curve25519.ParamSize {
- return nil, pgperrors.InvalidArgumentError("invalid proxy parameter")
- }
-
- transformed, err := curve25519.ProxyTransform(parsedEphemeral, proxyParam)
- if err != nil {
- return nil, err
- }
-
- return c.MarshalBytePoint(transformed), nil
-}
\ No newline at end of file
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go
deleted file mode 100644
index f94ae1b2..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdsa/ecdsa.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Package ecdsa implements ECDSA signature, suitable for OpenPGP,
-// as specified in RFC 6637, section 5.
-package ecdsa
-
-import (
- "errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/ecc"
- "io"
- "math/big"
-)
-
-type PublicKey struct {
- X, Y *big.Int
- curve ecc.ECDSACurve
-}
-
-type PrivateKey struct {
- PublicKey
- D *big.Int
-}
-
-func NewPublicKey(curve ecc.ECDSACurve) *PublicKey {
- return &PublicKey{
- curve: curve,
- }
-}
-
-func NewPrivateKey(key PublicKey) *PrivateKey {
- return &PrivateKey{
- PublicKey: key,
- }
-}
-
-func (pk *PublicKey) GetCurve() ecc.ECDSACurve {
- return pk.curve
-}
-
-func (pk *PublicKey) MarshalPoint() []byte {
- return pk.curve.MarshalIntegerPoint(pk.X, pk.Y)
-}
-
-func (pk *PublicKey) UnmarshalPoint(p []byte) error {
- pk.X, pk.Y = pk.curve.UnmarshalIntegerPoint(p)
- if pk.X == nil {
- return errors.New("ecdsa: failed to parse EC point")
- }
- return nil
-}
-
-func (sk *PrivateKey) MarshalIntegerSecret() []byte {
- return sk.curve.MarshalIntegerSecret(sk.D)
-}
-
-func (sk *PrivateKey) UnmarshalIntegerSecret(d []byte) error {
- sk.D = sk.curve.UnmarshalIntegerSecret(d)
-
- if sk.D == nil {
- return errors.New("ecdsa: failed to parse scalar")
- }
- return nil
-}
-
-func GenerateKey(rand io.Reader, c ecc.ECDSACurve) (priv *PrivateKey, err error) {
- priv = new(PrivateKey)
- priv.PublicKey.curve = c
- priv.PublicKey.X, priv.PublicKey.Y, priv.D, err = c.GenerateECDSA(rand)
- return
-}
-
-func Sign(rand io.Reader, priv *PrivateKey, hash []byte) (r, s *big.Int, err error) {
- return priv.PublicKey.curve.Sign(rand, priv.X, priv.Y, priv.D, hash)
-}
-
-func Verify(pub *PublicKey, hash []byte, r, s *big.Int) bool {
- return pub.curve.Verify(pub.X, pub.Y, hash, r, s)
-}
-
-func Validate(priv *PrivateKey) error {
- return priv.curve.ValidateECDSA(priv.X, priv.Y, priv.D.Bytes())
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ed25519/ed25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ed25519/ed25519.go
deleted file mode 100644
index 6abdf7c4..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/ed25519/ed25519.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Package ed25519 implements the ed25519 signature algorithm for OpenPGP
-// as defined in the Open PGP crypto refresh.
-package ed25519
-
-import (
- "crypto/subtle"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- ed25519lib "github.com/cloudflare/circl/sign/ed25519"
-)
-
-const (
- // PublicKeySize is the size, in bytes, of public keys in this package.
- PublicKeySize = ed25519lib.PublicKeySize
- // SeedSize is the size, in bytes, of private key seeds.
- // The private key representation used by RFC 8032.
- SeedSize = ed25519lib.SeedSize
- // SignatureSize is the size, in bytes, of signatures generated and verified by this package.
- SignatureSize = ed25519lib.SignatureSize
-)
-
-type PublicKey struct {
- // Point represents the elliptic curve point of the public key.
- Point []byte
-}
-
-type PrivateKey struct {
- PublicKey
- // Key the private key representation by RFC 8032,
- // encoded as seed | pub key point.
- Key []byte
-}
-
-// NewPublicKey creates a new empty ed25519 public key.
-func NewPublicKey() *PublicKey {
- return &PublicKey{}
-}
-
-// NewPrivateKey creates a new empty private key referencing the public key.
-func NewPrivateKey(key PublicKey) *PrivateKey {
- return &PrivateKey{
- PublicKey: key,
- }
-}
-
-// Seed returns the ed25519 private key secret seed.
-// The private key representation by RFC 8032.
-func (pk *PrivateKey) Seed() []byte {
- return pk.Key[:SeedSize]
-}
-
-// MarshalByteSecret returns the underlying 32 byte seed of the private key.
-func (pk *PrivateKey) MarshalByteSecret() []byte {
- return pk.Seed()
-}
-
-// UnmarshalByteSecret computes the private key from the secret seed
-// and stores it in the private key object.
-func (sk *PrivateKey) UnmarshalByteSecret(seed []byte) error {
- sk.Key = ed25519lib.NewKeyFromSeed(seed)
- return nil
-}
-
-// GenerateKey generates a fresh private key with the provided randomness source.
-func GenerateKey(rand io.Reader) (*PrivateKey, error) {
- publicKey, privateKey, err := ed25519lib.GenerateKey(rand)
- if err != nil {
- return nil, err
- }
- privateKeyOut := new(PrivateKey)
- privateKeyOut.PublicKey.Point = publicKey[:]
- privateKeyOut.Key = privateKey[:]
- return privateKeyOut, nil
-}
-
-// Sign signs a message with the ed25519 algorithm.
-// priv MUST be a valid key! Check this with Validate() before use.
-func Sign(priv *PrivateKey, message []byte) ([]byte, error) {
- return ed25519lib.Sign(priv.Key, message), nil
-}
-
-// Verify verifies an ed25519 signature.
-func Verify(pub *PublicKey, message []byte, signature []byte) bool {
- return ed25519lib.Verify(pub.Point, message, signature)
-}
-
-// Validate checks if the ed25519 private key is valid.
-func Validate(priv *PrivateKey) error {
- expectedPrivateKey := ed25519lib.NewKeyFromSeed(priv.Seed())
- if subtle.ConstantTimeCompare(priv.Key, expectedPrivateKey) == 0 {
- return errors.KeyInvalidError("ed25519: invalid ed25519 secret")
- }
- if subtle.ConstantTimeCompare(priv.PublicKey.Point, expectedPrivateKey[SeedSize:]) == 0 {
- return errors.KeyInvalidError("ed25519: invalid ed25519 public key")
- }
- return nil
-}
-
-// ENCODING/DECODING signature:
-
-// WriteSignature encodes and writes an ed25519 signature to writer.
-func WriteSignature(writer io.Writer, signature []byte) error {
- _, err := writer.Write(signature)
- return err
-}
-
-// ReadSignature decodes an ed25519 signature from a reader.
-func ReadSignature(reader io.Reader) ([]byte, error) {
- signature := make([]byte, SignatureSize)
- if _, err := io.ReadFull(reader, signature); err != nil {
- return nil, err
- }
- return signature, nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ed448/ed448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ed448/ed448.go
deleted file mode 100644
index b11fb4fb..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/ed448/ed448.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Package ed448 implements the ed448 signature algorithm for OpenPGP
-// as defined in the Open PGP crypto refresh.
-package ed448
-
-import (
- "crypto/subtle"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- ed448lib "github.com/cloudflare/circl/sign/ed448"
-)
-
-const (
- // PublicKeySize is the size, in bytes, of public keys in this package.
- PublicKeySize = ed448lib.PublicKeySize
- // SeedSize is the size, in bytes, of private key seeds.
- // The private key representation used by RFC 8032.
- SeedSize = ed448lib.SeedSize
- // SignatureSize is the size, in bytes, of signatures generated and verified by this package.
- SignatureSize = ed448lib.SignatureSize
-)
-
-type PublicKey struct {
- // Point represents the elliptic curve point of the public key.
- Point []byte
-}
-
-type PrivateKey struct {
- PublicKey
- // Key the private key representation by RFC 8032,
- // encoded as seed | public key point.
- Key []byte
-}
-
-// NewPublicKey creates a new empty ed448 public key.
-func NewPublicKey() *PublicKey {
- return &PublicKey{}
-}
-
-// NewPrivateKey creates a new empty private key referencing the public key.
-func NewPrivateKey(key PublicKey) *PrivateKey {
- return &PrivateKey{
- PublicKey: key,
- }
-}
-
-// Seed returns the ed448 private key secret seed.
-// The private key representation by RFC 8032.
-func (pk *PrivateKey) Seed() []byte {
- return pk.Key[:SeedSize]
-}
-
-// MarshalByteSecret returns the underlying seed of the private key.
-func (pk *PrivateKey) MarshalByteSecret() []byte {
- return pk.Seed()
-}
-
-// UnmarshalByteSecret computes the private key from the secret seed
-// and stores it in the private key object.
-func (sk *PrivateKey) UnmarshalByteSecret(seed []byte) error {
- sk.Key = ed448lib.NewKeyFromSeed(seed)
- return nil
-}
-
-// GenerateKey generates a fresh private key with the provided randomness source.
-func GenerateKey(rand io.Reader) (*PrivateKey, error) {
- publicKey, privateKey, err := ed448lib.GenerateKey(rand)
- if err != nil {
- return nil, err
- }
- privateKeyOut := new(PrivateKey)
- privateKeyOut.PublicKey.Point = publicKey[:]
- privateKeyOut.Key = privateKey[:]
- return privateKeyOut, nil
-}
-
-// Sign signs a message with the ed448 algorithm.
-// priv MUST be a valid key! Check this with Validate() before use.
-func Sign(priv *PrivateKey, message []byte) ([]byte, error) {
- // Ed448 is used with the empty string as a context string.
- // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-08#section-13.7
- return ed448lib.Sign(priv.Key, message, ""), nil
-}
-
-// Verify verifies a ed448 signature
-func Verify(pub *PublicKey, message []byte, signature []byte) bool {
- // Ed448 is used with the empty string as a context string.
- // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-08#section-13.7
- return ed448lib.Verify(pub.Point, message, signature, "")
-}
-
-// Validate checks if the ed448 private key is valid
-func Validate(priv *PrivateKey) error {
- expectedPrivateKey := ed448lib.NewKeyFromSeed(priv.Seed())
- if subtle.ConstantTimeCompare(priv.Key, expectedPrivateKey) == 0 {
- return errors.KeyInvalidError("ed448: invalid ed448 secret")
- }
- if subtle.ConstantTimeCompare(priv.PublicKey.Point, expectedPrivateKey[SeedSize:]) == 0 {
- return errors.KeyInvalidError("ed448: invalid ed448 public key")
- }
- return nil
-}
-
-// ENCODING/DECODING signature:
-
-// WriteSignature encodes and writes an ed448 signature to writer.
-func WriteSignature(writer io.Writer, signature []byte) error {
- _, err := writer.Write(signature)
- return err
-}
-
-// ReadSignature decodes an ed448 signature from a reader.
-func ReadSignature(reader io.Reader) ([]byte, error) {
- signature := make([]byte, SignatureSize)
- if _, err := io.ReadFull(reader, signature); err != nil {
- return nil, err
- }
- return signature, nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go
deleted file mode 100644
index 99ecfc7f..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/eddsa/eddsa.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Package eddsa implements EdDSA signature, suitable for OpenPGP, as specified in
-// https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7
-package eddsa
-
-import (
- "errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/ecc"
- "io"
-)
-
-type PublicKey struct {
- X []byte
- curve ecc.EdDSACurve
-}
-
-type PrivateKey struct {
- PublicKey
- D []byte
-}
-
-func NewPublicKey(curve ecc.EdDSACurve) *PublicKey {
- return &PublicKey{
- curve: curve,
- }
-}
-
-func NewPrivateKey(key PublicKey) *PrivateKey {
- return &PrivateKey{
- PublicKey: key,
- }
-}
-
-func (pk *PublicKey) GetCurve() ecc.EdDSACurve {
- return pk.curve
-}
-
-func (pk *PublicKey) MarshalPoint() []byte {
- return pk.curve.MarshalBytePoint(pk.X)
-}
-
-func (pk *PublicKey) UnmarshalPoint(x []byte) error {
- pk.X = pk.curve.UnmarshalBytePoint(x)
-
- if pk.X == nil {
- return errors.New("eddsa: failed to parse EC point")
- }
- return nil
-}
-
-func (sk *PrivateKey) MarshalByteSecret() []byte {
- return sk.curve.MarshalByteSecret(sk.D)
-}
-
-func (sk *PrivateKey) UnmarshalByteSecret(d []byte) error {
- sk.D = sk.curve.UnmarshalByteSecret(d)
-
- if sk.D == nil {
- return errors.New("eddsa: failed to parse scalar")
- }
- return nil
-}
-
-func GenerateKey(rand io.Reader, c ecc.EdDSACurve) (priv *PrivateKey, err error) {
- priv = new(PrivateKey)
- priv.PublicKey.curve = c
- priv.PublicKey.X, priv.D, err = c.GenerateEdDSA(rand)
- return
-}
-
-func Sign(priv *PrivateKey, message []byte) (r, s []byte, err error) {
- sig, err := priv.PublicKey.curve.Sign(priv.PublicKey.X, priv.D, message)
- if err != nil {
- return nil, nil, err
- }
-
- r, s = priv.PublicKey.curve.MarshalSignature(sig)
- return
-}
-
-func Verify(pub *PublicKey, message, r, s []byte) bool {
- sig := pub.curve.UnmarshalSignature(r, s)
- if sig == nil {
- return false
- }
-
- return pub.curve.Verify(pub.X, message, sig)
-}
-
-func Validate(priv *PrivateKey) error {
- return priv.curve.ValidateEdDSA(priv.PublicKey.X, priv.D)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go
deleted file mode 100644
index bad27743..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package elgamal implements ElGamal encryption, suitable for OpenPGP,
-// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on
-// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31,
-// n. 4, 1985, pp. 469-472.
-//
-// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it
-// unsuitable for other protocols. RSA should be used in preference in any
-// case.
-package elgamal // import "github.com/ProtonMail/go-crypto/openpgp/elgamal"
-
-import (
- "crypto/rand"
- "crypto/subtle"
- "errors"
- "io"
- "math/big"
-)
-
-// PublicKey represents an ElGamal public key.
-type PublicKey struct {
- G, P, Y *big.Int
-}
-
-// PrivateKey represents an ElGamal private key.
-type PrivateKey struct {
- PublicKey
- X *big.Int
-}
-
-// Encrypt encrypts the given message to the given public key. The result is a
-// pair of integers. Errors can result from reading random, or because msg is
-// too large to be encrypted to the public key.
-func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) {
- pLen := (pub.P.BitLen() + 7) / 8
- if len(msg) > pLen-11 {
- err = errors.New("elgamal: message too long")
- return
- }
-
- // EM = 0x02 || PS || 0x00 || M
- em := make([]byte, pLen-1)
- em[0] = 2
- ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):]
- err = nonZeroRandomBytes(ps, random)
- if err != nil {
- return
- }
- em[len(em)-len(msg)-1] = 0
- copy(mm, msg)
-
- m := new(big.Int).SetBytes(em)
-
- k, err := rand.Int(random, pub.P)
- if err != nil {
- return
- }
-
- c1 = new(big.Int).Exp(pub.G, k, pub.P)
- s := new(big.Int).Exp(pub.Y, k, pub.P)
- c2 = s.Mul(s, m)
- c2.Mod(c2, pub.P)
-
- return
-}
-
-// Decrypt takes two integers, resulting from an ElGamal encryption, and
-// returns the plaintext of the message. An error can result only if the
-// ciphertext is invalid. Users should keep in mind that this is a padding
-// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can
-// be used to break the cryptosystem. See “Chosen Ciphertext Attacks
-// Against Protocols Based on the RSA Encryption Standard PKCS #1”, Daniel
-// Bleichenbacher, Advances in Cryptology (Crypto '98),
-func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) {
- s := new(big.Int).Exp(c1, priv.X, priv.P)
- if s.ModInverse(s, priv.P) == nil {
- return nil, errors.New("elgamal: invalid private key")
- }
- s.Mul(s, c2)
- s.Mod(s, priv.P)
- em := s.Bytes()
-
- firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2)
-
- // The remainder of the plaintext must be a string of non-zero random
- // octets, followed by a 0, followed by the message.
- // lookingForIndex: 1 iff we are still looking for the zero.
- // index: the offset of the first zero byte.
- var lookingForIndex, index int
- lookingForIndex = 1
-
- for i := 1; i < len(em); i++ {
- equals0 := subtle.ConstantTimeByteEq(em[i], 0)
- index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index)
- lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex)
- }
-
- if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 {
- return nil, errors.New("elgamal: decryption error")
- }
- return em[index+1:], nil
-}
-
-// nonZeroRandomBytes fills the given slice with non-zero random octets.
-func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) {
- _, err = io.ReadFull(rand, s)
- if err != nil {
- return
- }
-
- for i := 0; i < len(s); i++ {
- for s[i] == 0 {
- _, err = io.ReadFull(rand, s[i:i+1])
- if err != nil {
- return
- }
- }
- }
-
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go
deleted file mode 100644
index 4bcdeddf..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package errors contains common error types for the OpenPGP packages.
-package errors // import "github.com/ProtonMail/go-crypto/v2/openpgp/errors"
-
-import (
- "strconv"
-)
-
-// A StructuralError is returned when OpenPGP data is found to be syntactically
-// invalid.
-type StructuralError string
-
-func (s StructuralError) Error() string {
- return "openpgp: invalid data: " + string(s)
-}
-
-// UnsupportedError indicates that, although the OpenPGP data is valid, it
-// makes use of currently unimplemented features.
-type UnsupportedError string
-
-func (s UnsupportedError) Error() string {
- return "openpgp: unsupported feature: " + string(s)
-}
-
-// InvalidArgumentError indicates that the caller is in error and passed an
-// incorrect value.
-type InvalidArgumentError string
-
-func (i InvalidArgumentError) Error() string {
- return "openpgp: invalid argument: " + string(i)
-}
-
-var InvalidForwardeeKeyError = InvalidArgumentError("invalid forwardee key")
-
-// SignatureError indicates that a syntactically valid signature failed to
-// validate.
-type SignatureError string
-
-func (b SignatureError) Error() string {
- return "openpgp: invalid signature: " + string(b)
-}
-
-var ErrMDCHashMismatch error = SignatureError("MDC hash mismatch")
-var ErrMDCMissing error = SignatureError("MDC packet not found")
-
-type signatureExpiredError int
-
-func (se signatureExpiredError) Error() string {
- return "openpgp: signature expired"
-}
-
-var ErrSignatureExpired error = signatureExpiredError(0)
-
-type keyExpiredError int
-
-func (ke keyExpiredError) Error() string {
- return "openpgp: key expired"
-}
-
-var ErrSignatureOlderThanKey error = signatureOlderThanKeyError(0)
-
-type signatureOlderThanKeyError int
-
-func (ske signatureOlderThanKeyError) Error() string {
- return "openpgp: signature is older than the key"
-}
-
-var ErrKeyExpired error = keyExpiredError(0)
-
-type keyIncorrectError int
-
-func (ki keyIncorrectError) Error() string {
- return "openpgp: incorrect key"
-}
-
-var ErrKeyIncorrect error = keyIncorrectError(0)
-
-// KeyInvalidError indicates that the public key parameters are invalid
-// as they do not match the private ones
-type KeyInvalidError string
-
-func (e KeyInvalidError) Error() string {
- return "openpgp: invalid key: " + string(e)
-}
-
-type unknownIssuerError int
-
-func (unknownIssuerError) Error() string {
- return "openpgp: signature made by unknown entity"
-}
-
-var ErrUnknownIssuer error = unknownIssuerError(0)
-
-type keyRevokedError int
-
-func (keyRevokedError) Error() string {
- return "openpgp: signature made by revoked key"
-}
-
-var ErrKeyRevoked error = keyRevokedError(0)
-
-type WeakAlgorithmError string
-
-func (e WeakAlgorithmError) Error() string {
- return "openpgp: weak algorithms are rejected: " + string(e)
-}
-
-type UnknownPacketTypeError uint8
-
-func (upte UnknownPacketTypeError) Error() string {
- return "openpgp: unknown packet type: " + strconv.Itoa(int(upte))
-}
-
-type CriticalUnknownPacketTypeError uint8
-
-func (upte CriticalUnknownPacketTypeError) Error() string {
- return "openpgp: unknown critical packet type: " + strconv.Itoa(int(upte))
-}
-
-// AEADError indicates that there is a problem when initializing or using a
-// AEAD instance, configuration struct, nonces or index values.
-type AEADError string
-
-func (ae AEADError) Error() string {
- return "openpgp: aead error: " + string(ae)
-}
-
-// ErrDummyPrivateKey results when operations are attempted on a private key
-// that is just a dummy key. See
-// https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109
-type ErrDummyPrivateKey string
-
-func (dke ErrDummyPrivateKey) Error() string {
- return "openpgp: s2k GNU dummy key: " + string(dke)
-}
-
-// ErrMalformedMessage results when the packet sequence is incorrect
-type ErrMalformedMessage string
-
-func (dke ErrMalformedMessage) Error() string {
- return "openpgp: malformed message " + string(dke)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/forwarding.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/forwarding.go
deleted file mode 100644
index ae45c3c2..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/forwarding.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package openpgp
-
-import (
- goerrors "errors"
-
- "github.com/ProtonMail/go-crypto/openpgp/ecdh"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/packet"
-)
-
-// NewForwardingEntity generates a new forwardee key and derives the proxy parameters from the entity e.
-// If strict, it will return an error if encryption-capable non-revoked subkeys with a wrong algorithm are found,
-// instead of ignoring them
-func (e *Entity) NewForwardingEntity(
- name, comment, email string, config *packet.Config, strict bool,
-) (
- forwardeeKey *Entity, instances []packet.ForwardingInstance, err error,
-) {
- if e.PrimaryKey.Version != 4 {
- return nil, nil, errors.InvalidArgumentError("unsupported key version")
- }
-
- now := config.Now()
- i := e.PrimaryIdentity()
- if e.PrimaryKey.KeyExpired(i.SelfSignature, now) || // primary key has expired
- i.SelfSignature.SigExpired(now) || // user ID self-signature has expired
- e.Revoked(now) || // primary key has been revoked
- i.Revoked(now) { // user ID has been revoked
- return nil, nil, errors.InvalidArgumentError("primary key is expired")
- }
-
- // Generate a new Primary key for the forwardee
- config.Algorithm = packet.PubKeyAlgoEdDSA
- config.Curve = packet.Curve25519
- keyLifetimeSecs := config.KeyLifetime()
-
- forwardeePrimaryPrivRaw, err := newSigner(config)
- if err != nil {
- return nil, nil, err
- }
-
- primary := packet.NewSignerPrivateKey(now, forwardeePrimaryPrivRaw)
-
- forwardeeKey = &Entity{
- PrimaryKey: &primary.PublicKey,
- PrivateKey: primary,
- Identities: make(map[string]*Identity),
- Subkeys: []Subkey{},
- }
-
- err = forwardeeKey.addUserId(name, comment, email, config, now, keyLifetimeSecs, true)
- if err != nil {
- return nil, nil, err
- }
-
- // Init empty instances
- instances = []packet.ForwardingInstance{}
-
- // Handle all forwarder subkeys
- for _, forwarderSubKey := range e.Subkeys {
- // Filter flags
- if !forwarderSubKey.PublicKey.PubKeyAlgo.CanEncrypt() {
- continue
- }
-
- // Filter expiration & revokal
- if forwarderSubKey.PublicKey.KeyExpired(forwarderSubKey.Sig, now) ||
- forwarderSubKey.Sig.SigExpired(now) ||
- forwarderSubKey.Revoked(now) {
- continue
- }
-
- if forwarderSubKey.PublicKey.PubKeyAlgo != packet.PubKeyAlgoECDH {
- if strict {
- return nil, nil, errors.InvalidArgumentError("encryption subkey is not algorithm 18 (ECDH)")
- } else {
- continue
- }
- }
-
- forwarderEcdhKey, ok := forwarderSubKey.PrivateKey.PrivateKey.(*ecdh.PrivateKey)
- if !ok {
- return nil, nil, errors.InvalidArgumentError("malformed key")
- }
-
- err = forwardeeKey.addEncryptionSubkey(config, now, 0)
- if err != nil {
- return nil, nil, err
- }
-
- forwardeeSubKey := forwardeeKey.Subkeys[len(forwardeeKey.Subkeys)-1]
-
- forwardeeEcdhKey, ok := forwardeeSubKey.PrivateKey.PrivateKey.(*ecdh.PrivateKey)
- if !ok {
- return nil, nil, goerrors.New("wrong forwarding sub key generation")
- }
-
- instance := packet.ForwardingInstance{
- KeyVersion: 4,
- ForwarderFingerprint: forwarderSubKey.PublicKey.Fingerprint,
- }
-
- instance.ProxyParameter, err = ecdh.DeriveProxyParam(forwarderEcdhKey, forwardeeEcdhKey)
- if err != nil {
- return nil, nil, err
- }
-
- kdf := ecdh.KDF{
- Version: ecdh.KDFVersionForwarding,
- Hash: forwarderEcdhKey.KDF.Hash,
- Cipher: forwarderEcdhKey.KDF.Cipher,
- }
-
- // If deriving a forwarding key from a forwarding key
- if forwarderSubKey.Sig.FlagForward {
- if forwarderEcdhKey.KDF.Version != ecdh.KDFVersionForwarding {
- return nil, nil, goerrors.New("malformed forwarder key")
- }
- kdf.ReplacementFingerprint = forwarderEcdhKey.KDF.ReplacementFingerprint
- } else {
- kdf.ReplacementFingerprint = forwarderSubKey.PublicKey.Fingerprint
- }
-
- err = forwardeeSubKey.PublicKey.ReplaceKDF(kdf)
- if err != nil {
- return nil, nil, err
- }
-
- // Extract fingerprint after changing the KDF
- instance.ForwardeeFingerprint = forwardeeSubKey.PublicKey.Fingerprint
-
- // 0x04 - This key may be used to encrypt communications.
- forwardeeSubKey.Sig.FlagEncryptCommunications = false
-
- // 0x08 - This key may be used to encrypt storage.
- forwardeeSubKey.Sig.FlagEncryptStorage = false
-
- // 0x10 - The private component of this key may have been split by a secret-sharing mechanism.
- forwardeeSubKey.Sig.FlagSplitKey = true
-
- // 0x40 - This key may be used for forwarded communications.
- forwardeeSubKey.Sig.FlagForward = true
-
- // Re-sign subkey binding signature
- err = forwardeeSubKey.Sig.SignKey(forwardeeSubKey.PublicKey, forwardeeKey.PrivateKey, config)
- if err != nil {
- return nil, nil, err
- }
-
- // Append each valid instance to the list
- instances = append(instances, instance)
- }
-
- if len(instances) == 0 {
- return nil, nil, errors.InvalidArgumentError("no valid subkey found")
- }
-
- return forwardeeKey, instances, nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/hash.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/hash.go
deleted file mode 100644
index 526bd777..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/hash.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package openpgp
-
-import (
- "crypto"
-
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
-)
-
-// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP
-// hash id.
-func HashIdToHash(id byte) (h crypto.Hash, ok bool) {
- return algorithm.HashIdToHash(id)
-}
-
-// HashIdToString returns the name of the hash function corresponding to the
-// given OpenPGP hash id.
-func HashIdToString(id byte) (name string, ok bool) {
- return algorithm.HashIdToString(id)
-}
-
-// HashToHashId returns an OpenPGP hash id which corresponds the given Hash.
-func HashToHashId(h crypto.Hash) (id byte, ok bool) {
- return algorithm.HashToHashId(h)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go
deleted file mode 100644
index d0670651..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (C) 2019 ProtonTech AG
-
-package algorithm
-
-import (
- "crypto/cipher"
- "github.com/ProtonMail/go-crypto/eax"
- "github.com/ProtonMail/go-crypto/ocb"
-)
-
-// AEADMode defines the Authenticated Encryption with Associated Data mode of
-// operation.
-type AEADMode uint8
-
-// Supported modes of operation (see RFC4880bis [EAX] and RFC7253)
-const (
- AEADModeEAX = AEADMode(1)
- AEADModeOCB = AEADMode(2)
- AEADModeGCM = AEADMode(3)
-)
-
-// TagLength returns the length in bytes of authentication tags.
-func (mode AEADMode) TagLength() int {
- switch mode {
- case AEADModeEAX:
- return 16
- case AEADModeOCB:
- return 16
- case AEADModeGCM:
- return 16
- default:
- return 0
- }
-}
-
-// NonceLength returns the length in bytes of nonces.
-func (mode AEADMode) NonceLength() int {
- switch mode {
- case AEADModeEAX:
- return 16
- case AEADModeOCB:
- return 15
- case AEADModeGCM:
- return 12
- default:
- return 0
- }
-}
-
-// New returns a fresh instance of the given mode
-func (mode AEADMode) New(block cipher.Block) (alg cipher.AEAD) {
- var err error
- switch mode {
- case AEADModeEAX:
- alg, err = eax.NewEAX(block)
- case AEADModeOCB:
- alg, err = ocb.NewOCB(block)
- case AEADModeGCM:
- alg, err = cipher.NewGCM(block)
- }
- if err != nil {
- panic(err.Error())
- }
- return alg
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go
deleted file mode 100644
index c76a75bc..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package algorithm
-
-import (
- "crypto/aes"
- "crypto/cipher"
- "crypto/des"
-
- "golang.org/x/crypto/cast5"
-)
-
-// Cipher is an official symmetric key cipher algorithm. See RFC 4880,
-// section 9.2.
-type Cipher interface {
- // Id returns the algorithm ID, as a byte, of the cipher.
- Id() uint8
- // KeySize returns the key size, in bytes, of the cipher.
- KeySize() int
- // BlockSize returns the block size, in bytes, of the cipher.
- BlockSize() int
- // New returns a fresh instance of the given cipher.
- New(key []byte) cipher.Block
-}
-
-// The following constants mirror the OpenPGP standard (RFC 4880).
-const (
- TripleDES = CipherFunction(2)
- CAST5 = CipherFunction(3)
- AES128 = CipherFunction(7)
- AES192 = CipherFunction(8)
- AES256 = CipherFunction(9)
-)
-
-// CipherById represents the different block ciphers specified for OpenPGP. See
-// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13
-var CipherById = map[uint8]Cipher{
- TripleDES.Id(): TripleDES,
- CAST5.Id(): CAST5,
- AES128.Id(): AES128,
- AES192.Id(): AES192,
- AES256.Id(): AES256,
-}
-
-type CipherFunction uint8
-
-// ID returns the algorithm Id, as a byte, of cipher.
-func (sk CipherFunction) Id() uint8 {
- return uint8(sk)
-}
-
-// KeySize returns the key size, in bytes, of cipher.
-func (cipher CipherFunction) KeySize() int {
- switch cipher {
- case CAST5:
- return cast5.KeySize
- case AES128:
- return 16
- case AES192, TripleDES:
- return 24
- case AES256:
- return 32
- }
- return 0
-}
-
-// BlockSize returns the block size, in bytes, of cipher.
-func (cipher CipherFunction) BlockSize() int {
- switch cipher {
- case TripleDES:
- return des.BlockSize
- case CAST5:
- return 8
- case AES128, AES192, AES256:
- return 16
- }
- return 0
-}
-
-// New returns a fresh instance of the given cipher.
-func (cipher CipherFunction) New(key []byte) (block cipher.Block) {
- var err error
- switch cipher {
- case TripleDES:
- block, err = des.NewTripleDESCipher(key)
- case CAST5:
- block, err = cast5.NewCipher(key)
- case AES128, AES192, AES256:
- block, err = aes.NewCipher(key)
- }
- if err != nil {
- panic(err.Error())
- }
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go
deleted file mode 100644
index d1a00fc7..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package algorithm
-
-import (
- "crypto"
- "fmt"
- "hash"
-)
-
-// Hash is an official hash function algorithm. See RFC 4880, section 9.4.
-type Hash interface {
- // Id returns the algorithm ID, as a byte, of Hash.
- Id() uint8
- // Available reports whether the given hash function is linked into the binary.
- Available() bool
- // HashFunc simply returns the value of h so that Hash implements SignerOpts.
- HashFunc() crypto.Hash
- // New returns a new hash.Hash calculating the given hash function. New
- // panics if the hash function is not linked into the binary.
- New() hash.Hash
- // Size returns the length, in bytes, of a digest resulting from the given
- // hash function. It doesn't require that the hash function in question be
- // linked into the program.
- Size() int
- // String is the name of the hash function corresponding to the given
- // OpenPGP hash id.
- String() string
-}
-
-// The following vars mirror the crypto/Hash supported hash functions.
-var (
- SHA1 Hash = cryptoHash{2, crypto.SHA1}
- SHA256 Hash = cryptoHash{8, crypto.SHA256}
- SHA384 Hash = cryptoHash{9, crypto.SHA384}
- SHA512 Hash = cryptoHash{10, crypto.SHA512}
- SHA224 Hash = cryptoHash{11, crypto.SHA224}
- SHA3_256 Hash = cryptoHash{12, crypto.SHA3_256}
- SHA3_512 Hash = cryptoHash{14, crypto.SHA3_512}
-)
-
-// HashById represents the different hash functions specified for OpenPGP. See
-// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-14
-var (
- HashById = map[uint8]Hash{
- SHA256.Id(): SHA256,
- SHA384.Id(): SHA384,
- SHA512.Id(): SHA512,
- SHA224.Id(): SHA224,
- SHA3_256.Id(): SHA3_256,
- SHA3_512.Id(): SHA3_512,
- }
-)
-
-// cryptoHash contains pairs relating OpenPGP's hash identifier with
-// Go's crypto.Hash type. See RFC 4880, section 9.4.
-type cryptoHash struct {
- id uint8
- crypto.Hash
-}
-
-// Id returns the algorithm ID, as a byte, of cryptoHash.
-func (h cryptoHash) Id() uint8 {
- return h.id
-}
-
-var hashNames = map[uint8]string{
- SHA256.Id(): "SHA256",
- SHA384.Id(): "SHA384",
- SHA512.Id(): "SHA512",
- SHA224.Id(): "SHA224",
- SHA3_256.Id(): "SHA3-256",
- SHA3_512.Id(): "SHA3-512",
-}
-
-func (h cryptoHash) String() string {
- s, ok := hashNames[h.id]
- if !ok {
- panic(fmt.Sprintf("Unsupported hash function %d", h.id))
- }
- return s
-}
-
-// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP
-// hash id.
-func HashIdToHash(id byte) (h crypto.Hash, ok bool) {
- if hash, ok := HashById[id]; ok {
- return hash.HashFunc(), true
- }
- return 0, false
-}
-
-// HashIdToHashWithSha1 returns a crypto.Hash which corresponds to the given OpenPGP
-// hash id, allowing sha1.
-func HashIdToHashWithSha1(id byte) (h crypto.Hash, ok bool) {
- if hash, ok := HashById[id]; ok {
- return hash.HashFunc(), true
- }
-
- if id == SHA1.Id() {
- return SHA1.HashFunc(), true
- }
-
- return 0, false
-}
-
-// HashIdToString returns the name of the hash function corresponding to the
-// given OpenPGP hash id.
-func HashIdToString(id byte) (name string, ok bool) {
- if hash, ok := HashById[id]; ok {
- return hash.String(), true
- }
- return "", false
-}
-
-// HashToHashId returns an OpenPGP hash id which corresponds the given Hash.
-func HashToHashId(h crypto.Hash) (id byte, ok bool) {
- for id, hash := range HashById {
- if hash.HashFunc() == h {
- return id, true
- }
- }
-
- return 0, false
-}
-
-// HashToHashIdWithSha1 returns an OpenPGP hash id which corresponds the given Hash,
-// allowing instances of SHA1
-func HashToHashIdWithSha1(h crypto.Hash) (id byte, ok bool) {
- for id, hash := range HashById {
- if hash.HashFunc() == h {
- return id, true
- }
- }
-
- if h == SHA1.HashFunc() {
- return SHA1.Id(), true
- }
-
- return 0, false
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go
deleted file mode 100644
index a6721ff9..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA.
-package ecc
-
-import (
- "crypto/subtle"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- x25519lib "github.com/cloudflare/circl/dh/x25519"
- "io"
-)
-
-type curve25519 struct{}
-
-func NewCurve25519() *curve25519 {
- return &curve25519{}
-}
-
-func (c *curve25519) GetCurveName() string {
- return "curve25519"
-}
-
-// MarshalBytePoint encodes the public point from native format, adding the prefix.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6
-func (c *curve25519) MarshalBytePoint(point []byte) []byte {
- return append([]byte{0x40}, point...)
-}
-
-// UnmarshalBytePoint decodes the public point to native format, removing the prefix.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6
-func (c *curve25519) UnmarshalBytePoint(point []byte) []byte {
- if len(point) != x25519lib.Size+1 {
- return nil
- }
-
- // Remove prefix
- return point[1:]
-}
-
-// MarshalByteSecret encodes the secret scalar from native format.
-// Note that the EC secret scalar differs from the definition of public keys in
-// [Curve25519] in two ways: (1) the byte-ordering is big-endian, which is
-// more uniform with how big integers are represented in OpenPGP, and (2) the
-// leading zeros are truncated.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.1
-// Note that leading zero bytes are stripped later when encoding as an MPI.
-func (c *curve25519) MarshalByteSecret(secret []byte) []byte {
- d := make([]byte, x25519lib.Size)
- copyReversed(d, secret)
-
- // The following ensures that the private key is a number of the form
- // 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of
- // the curve.
- //
- // This masking is done internally in the underlying lib and so is unnecessary
- // for security, but OpenPGP implementations require that private keys be
- // pre-masked.
- d[0] &= 127
- d[0] |= 64
- d[31] &= 248
-
- return d
-}
-
-// UnmarshalByteSecret decodes the secret scalar from native format.
-// Note that the EC secret scalar differs from the definition of public keys in
-// [Curve25519] in two ways: (1) the byte-ordering is big-endian, which is
-// more uniform with how big integers are represented in OpenPGP, and (2) the
-// leading zeros are truncated.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.1
-func (c *curve25519) UnmarshalByteSecret(d []byte) []byte {
- if len(d) > x25519lib.Size {
- return nil
- }
-
- // Ensure truncated leading bytes are re-added
- secret := make([]byte, x25519lib.Size)
- copyReversed(secret, d)
-
- return secret
-}
-
-// generateKeyPairBytes Generates a private-public key-pair.
-// 'priv' is a private key; a little-endian scalar belonging to the set
-// 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of the
-// curve. 'pub' is simply 'priv' * G where G is the base point.
-// See https://cr.yp.to/ecdh.html and RFC7748, sec 5.
-func (c *curve25519) generateKeyPairBytes(rand io.Reader) (priv, pub x25519lib.Key, err error) {
- _, err = io.ReadFull(rand, priv[:])
- if err != nil {
- return
- }
-
- x25519lib.KeyGen(&pub, &priv)
- return
-}
-
-func (c *curve25519) GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error) {
- priv, pub, err := c.generateKeyPairBytes(rand)
- if err != nil {
- return
- }
-
- return pub[:], priv[:], nil
-}
-
-func (c *genericCurve) MaskSecret(secret []byte) []byte {
- return secret
-}
-
-func (c *curve25519) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) {
- // RFC6637 §8: "Generate an ephemeral key pair {v, V=vG}"
- // ephemeralPrivate corresponds to `v`.
- // ephemeralPublic corresponds to `V`.
- ephemeralPrivate, ephemeralPublic, err := c.generateKeyPairBytes(rand)
- if err != nil {
- return nil, nil, err
- }
-
- // RFC6637 §8: "Obtain the authenticated recipient public key R"
- // pubKey corresponds to `R`.
- var pubKey x25519lib.Key
- copy(pubKey[:], point)
-
- // RFC6637 §8: "Compute the shared point S = vR"
- // "VB = convert point V to the octet string"
- // sharedPoint corresponds to `VB`.
- var sharedPoint x25519lib.Key
- x25519lib.Shared(&sharedPoint, &ephemeralPrivate, &pubKey)
-
- return ephemeralPublic[:], sharedPoint[:], nil
-}
-
-func (c *curve25519) Decaps(vsG, secret []byte) (sharedSecret []byte, err error) {
- var ephemeralPublic, decodedPrivate, sharedPoint x25519lib.Key
- // RFC6637 §8: "The decryption is the inverse of the method given."
- // All quoted descriptions in comments below describe encryption, and
- // the reverse is performed.
- // vsG corresponds to `VB` in RFC6637 §8 .
-
- // RFC6637 §8: "VB = convert point V to the octet string"
- copy(ephemeralPublic[:], vsG)
-
- // decodedPrivate corresponds to `r` in RFC6637 §8 .
- copy(decodedPrivate[:], secret)
-
- // RFC6637 §8: "Note that the recipient obtains the shared secret by calculating
- // S = rV = rvG, where (r,R) is the recipient's key pair."
- // sharedPoint corresponds to `S`.
- x25519lib.Shared(&sharedPoint, &decodedPrivate, &ephemeralPublic)
-
- return sharedPoint[:], nil
-}
-
-func (c *curve25519) ValidateECDH(point []byte, secret []byte) (err error) {
- var pk, sk x25519lib.Key
- copy(sk[:], secret)
- x25519lib.KeyGen(&pk, &sk)
-
- if subtle.ConstantTimeCompare(point, pk[:]) == 0 {
- return errors.KeyInvalidError("ecc: invalid curve25519 public point")
- }
-
- return nil
-}
-
-func copyReversed(out []byte, in []byte) {
- l := len(in)
- for i := 0; i < l; i++ {
- out[i] = in[l-i-1]
- }
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/curve25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/curve25519.go
deleted file mode 100644
index 21670a82..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/curve25519.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Package curve25519 implements custom field operations without clamping for forwarding.
-package curve25519
-
-import (
- "crypto/subtle"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field"
- x25519lib "github.com/cloudflare/circl/dh/x25519"
- "math/big"
-)
-
-var curveGroupByte = [x25519lib.Size]byte{
- 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x14, 0xde, 0xf9, 0xde, 0xa2, 0xf7, 0x9c, 0xd6, 0x58, 0x12, 0x63, 0x1a, 0x5c, 0xf5, 0xd3, 0xed,
-}
-
-const ParamSize = x25519lib.Size
-
-func DeriveProxyParam(recipientSecretByte, forwardeeSecretByte []byte) (proxyParam []byte, err error) {
- curveGroup := new(big.Int).SetBytes(curveGroupByte[:])
- recipientSecret := new(big.Int).SetBytes(recipientSecretByte)
- forwardeeSecret := new(big.Int).SetBytes(forwardeeSecretByte)
-
- proxyTransform := new(big.Int).Mod(
- new(big.Int).Mul(
- new(big.Int).ModInverse(forwardeeSecret, curveGroup),
- recipientSecret,
- ),
- curveGroup,
- )
-
- rawProxyParam := proxyTransform.Bytes()
-
- // pad and convert to small endian
- proxyParam = make([]byte, x25519lib.Size)
- l := len(rawProxyParam)
- for i := 0; i < l; i++ {
- proxyParam[i] = rawProxyParam[l-i-1]
- }
-
- return proxyParam, nil
-}
-
-func ProxyTransform(ephemeral, proxyParam []byte) ([]byte, error) {
- var transformed, safetyCheck [x25519lib.Size]byte
-
- var scalarEight = make([]byte, x25519lib.Size)
- scalarEight[0] = 0x08
- err := ScalarMult(&safetyCheck, scalarEight, ephemeral)
- if err != nil {
- return nil, err
- }
-
- err = ScalarMult(&transformed, proxyParam, ephemeral)
- if err != nil {
- return nil, err
- }
-
- return transformed[:], nil
-}
-
-func ScalarMult(dst *[32]byte, scalar, point []byte) error {
- var in, base, zero [32]byte
- copy(in[:], scalar)
- copy(base[:], point)
-
- scalarMult(dst, &in, &base)
- if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 {
- return errors.InvalidArgumentError("invalid ephemeral: low order point")
- }
-
- return nil
-}
-
-func scalarMult(dst, scalar, point *[32]byte) {
- var e [32]byte
-
- copy(e[:], scalar[:])
-
- var x1, x2, z2, x3, z3, tmp0, tmp1 field.Element
- x1.SetBytes(point[:])
- x2.One()
- x3.Set(&x1)
- z3.One()
-
- swap := 0
- for pos := 254; pos >= 0; pos-- {
- b := e[pos/8] >> uint(pos&7)
- b &= 1
- swap ^= int(b)
- x2.Swap(&x3, swap)
- z2.Swap(&z3, swap)
- swap = int(b)
-
- tmp0.Subtract(&x3, &z3)
- tmp1.Subtract(&x2, &z2)
- x2.Add(&x2, &z2)
- z2.Add(&x3, &z3)
- z3.Multiply(&tmp0, &x2)
- z2.Multiply(&z2, &tmp1)
- tmp0.Square(&tmp1)
- tmp1.Square(&x2)
- x3.Add(&z3, &z2)
- z2.Subtract(&z3, &z2)
- x2.Multiply(&tmp1, &tmp0)
- tmp1.Subtract(&tmp1, &tmp0)
- z2.Square(&z2)
-
- z3.Mult32(&tmp1, 121666)
- x3.Square(&x3)
- tmp0.Add(&tmp0, &z3)
- z3.Multiply(&x1, &z2)
- z2.Multiply(&tmp1, &tmp0)
- }
-
- x2.Swap(&x3, swap)
- z2.Swap(&z3, swap)
-
- z2.Invert(&z2)
- x2.Multiply(&x2, &z2)
- copy(dst[:], x2.Bytes())
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe.go
deleted file mode 100644
index ca841ad9..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe.go
+++ /dev/null
@@ -1,416 +0,0 @@
-// Copyright (c) 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package field implements fast arithmetic modulo 2^255-19.
-package field
-
-import (
- "crypto/subtle"
- "encoding/binary"
- "math/bits"
-)
-
-// Element represents an element of the field GF(2^255-19). Note that this
-// is not a cryptographically secure group, and should only be used to interact
-// with edwards25519.Point coordinates.
-//
-// This type works similarly to math/big.Int, and all arguments and receivers
-// are allowed to alias.
-//
-// The zero value is a valid zero element.
-type Element struct {
- // An element t represents the integer
- // t.l0 + t.l1*2^51 + t.l2*2^102 + t.l3*2^153 + t.l4*2^204
- //
- // Between operations, all limbs are expected to be lower than 2^52.
- l0 uint64
- l1 uint64
- l2 uint64
- l3 uint64
- l4 uint64
-}
-
-const maskLow51Bits uint64 = (1 << 51) - 1
-
-var feZero = &Element{0, 0, 0, 0, 0}
-
-// Zero sets v = 0, and returns v.
-func (v *Element) Zero() *Element {
- *v = *feZero
- return v
-}
-
-var feOne = &Element{1, 0, 0, 0, 0}
-
-// One sets v = 1, and returns v.
-func (v *Element) One() *Element {
- *v = *feOne
- return v
-}
-
-// reduce reduces v modulo 2^255 - 19 and returns it.
-func (v *Element) reduce() *Element {
- v.carryPropagate()
-
- // After the light reduction we now have a field element representation
- // v < 2^255 + 2^13 * 19, but need v < 2^255 - 19.
-
- // If v >= 2^255 - 19, then v + 19 >= 2^255, which would overflow 2^255 - 1,
- // generating a carry. That is, c will be 0 if v < 2^255 - 19, and 1 otherwise.
- c := (v.l0 + 19) >> 51
- c = (v.l1 + c) >> 51
- c = (v.l2 + c) >> 51
- c = (v.l3 + c) >> 51
- c = (v.l4 + c) >> 51
-
- // If v < 2^255 - 19 and c = 0, this will be a no-op. Otherwise, it's
- // effectively applying the reduction identity to the carry.
- v.l0 += 19 * c
-
- v.l1 += v.l0 >> 51
- v.l0 = v.l0 & maskLow51Bits
- v.l2 += v.l1 >> 51
- v.l1 = v.l1 & maskLow51Bits
- v.l3 += v.l2 >> 51
- v.l2 = v.l2 & maskLow51Bits
- v.l4 += v.l3 >> 51
- v.l3 = v.l3 & maskLow51Bits
- // no additional carry
- v.l4 = v.l4 & maskLow51Bits
-
- return v
-}
-
-// Add sets v = a + b, and returns v.
-func (v *Element) Add(a, b *Element) *Element {
- v.l0 = a.l0 + b.l0
- v.l1 = a.l1 + b.l1
- v.l2 = a.l2 + b.l2
- v.l3 = a.l3 + b.l3
- v.l4 = a.l4 + b.l4
- // Using the generic implementation here is actually faster than the
- // assembly. Probably because the body of this function is so simple that
- // the compiler can figure out better optimizations by inlining the carry
- // propagation. TODO
- return v.carryPropagateGeneric()
-}
-
-// Subtract sets v = a - b, and returns v.
-func (v *Element) Subtract(a, b *Element) *Element {
- // We first add 2 * p, to guarantee the subtraction won't underflow, and
- // then subtract b (which can be up to 2^255 + 2^13 * 19).
- v.l0 = (a.l0 + 0xFFFFFFFFFFFDA) - b.l0
- v.l1 = (a.l1 + 0xFFFFFFFFFFFFE) - b.l1
- v.l2 = (a.l2 + 0xFFFFFFFFFFFFE) - b.l2
- v.l3 = (a.l3 + 0xFFFFFFFFFFFFE) - b.l3
- v.l4 = (a.l4 + 0xFFFFFFFFFFFFE) - b.l4
- return v.carryPropagate()
-}
-
-// Negate sets v = -a, and returns v.
-func (v *Element) Negate(a *Element) *Element {
- return v.Subtract(feZero, a)
-}
-
-// Invert sets v = 1/z mod p, and returns v.
-//
-// If z == 0, Invert returns v = 0.
-func (v *Element) Invert(z *Element) *Element {
- // Inversion is implemented as exponentiation with exponent p − 2. It uses the
- // same sequence of 255 squarings and 11 multiplications as [Curve25519].
- var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t Element
-
- z2.Square(z) // 2
- t.Square(&z2) // 4
- t.Square(&t) // 8
- z9.Multiply(&t, z) // 9
- z11.Multiply(&z9, &z2) // 11
- t.Square(&z11) // 22
- z2_5_0.Multiply(&t, &z9) // 31 = 2^5 - 2^0
-
- t.Square(&z2_5_0) // 2^6 - 2^1
- for i := 0; i < 4; i++ {
- t.Square(&t) // 2^10 - 2^5
- }
- z2_10_0.Multiply(&t, &z2_5_0) // 2^10 - 2^0
-
- t.Square(&z2_10_0) // 2^11 - 2^1
- for i := 0; i < 9; i++ {
- t.Square(&t) // 2^20 - 2^10
- }
- z2_20_0.Multiply(&t, &z2_10_0) // 2^20 - 2^0
-
- t.Square(&z2_20_0) // 2^21 - 2^1
- for i := 0; i < 19; i++ {
- t.Square(&t) // 2^40 - 2^20
- }
- t.Multiply(&t, &z2_20_0) // 2^40 - 2^0
-
- t.Square(&t) // 2^41 - 2^1
- for i := 0; i < 9; i++ {
- t.Square(&t) // 2^50 - 2^10
- }
- z2_50_0.Multiply(&t, &z2_10_0) // 2^50 - 2^0
-
- t.Square(&z2_50_0) // 2^51 - 2^1
- for i := 0; i < 49; i++ {
- t.Square(&t) // 2^100 - 2^50
- }
- z2_100_0.Multiply(&t, &z2_50_0) // 2^100 - 2^0
-
- t.Square(&z2_100_0) // 2^101 - 2^1
- for i := 0; i < 99; i++ {
- t.Square(&t) // 2^200 - 2^100
- }
- t.Multiply(&t, &z2_100_0) // 2^200 - 2^0
-
- t.Square(&t) // 2^201 - 2^1
- for i := 0; i < 49; i++ {
- t.Square(&t) // 2^250 - 2^50
- }
- t.Multiply(&t, &z2_50_0) // 2^250 - 2^0
-
- t.Square(&t) // 2^251 - 2^1
- t.Square(&t) // 2^252 - 2^2
- t.Square(&t) // 2^253 - 2^3
- t.Square(&t) // 2^254 - 2^4
- t.Square(&t) // 2^255 - 2^5
-
- return v.Multiply(&t, &z11) // 2^255 - 21
-}
-
-// Set sets v = a, and returns v.
-func (v *Element) Set(a *Element) *Element {
- *v = *a
- return v
-}
-
-// SetBytes sets v to x, which must be a 32-byte little-endian encoding.
-//
-// Consistent with RFC 7748, the most significant bit (the high bit of the
-// last byte) is ignored, and non-canonical values (2^255-19 through 2^255-1)
-// are accepted. Note that this is laxer than specified by RFC 8032.
-func (v *Element) SetBytes(x []byte) *Element {
- if len(x) != 32 {
- panic("edwards25519: invalid field element input size")
- }
-
- // Bits 0:51 (bytes 0:8, bits 0:64, shift 0, mask 51).
- v.l0 = binary.LittleEndian.Uint64(x[0:8])
- v.l0 &= maskLow51Bits
- // Bits 51:102 (bytes 6:14, bits 48:112, shift 3, mask 51).
- v.l1 = binary.LittleEndian.Uint64(x[6:14]) >> 3
- v.l1 &= maskLow51Bits
- // Bits 102:153 (bytes 12:20, bits 96:160, shift 6, mask 51).
- v.l2 = binary.LittleEndian.Uint64(x[12:20]) >> 6
- v.l2 &= maskLow51Bits
- // Bits 153:204 (bytes 19:27, bits 152:216, shift 1, mask 51).
- v.l3 = binary.LittleEndian.Uint64(x[19:27]) >> 1
- v.l3 &= maskLow51Bits
- // Bits 204:251 (bytes 24:32, bits 192:256, shift 12, mask 51).
- // Note: not bytes 25:33, shift 4, to avoid overread.
- v.l4 = binary.LittleEndian.Uint64(x[24:32]) >> 12
- v.l4 &= maskLow51Bits
-
- return v
-}
-
-// Bytes returns the canonical 32-byte little-endian encoding of v.
-func (v *Element) Bytes() []byte {
- // This function is outlined to make the allocations inline in the caller
- // rather than happen on the heap.
- var out [32]byte
- return v.bytes(&out)
-}
-
-func (v *Element) bytes(out *[32]byte) []byte {
- t := *v
- t.reduce()
-
- var buf [8]byte
- for i, l := range [5]uint64{t.l0, t.l1, t.l2, t.l3, t.l4} {
- bitsOffset := i * 51
- binary.LittleEndian.PutUint64(buf[:], l<= len(out) {
- break
- }
- out[off] |= bb
- }
- }
-
- return out[:]
-}
-
-// Equal returns 1 if v and u are equal, and 0 otherwise.
-func (v *Element) Equal(u *Element) int {
- sa, sv := u.Bytes(), v.Bytes()
- return subtle.ConstantTimeCompare(sa, sv)
-}
-
-// mask64Bits returns 0xffffffff if cond is 1, and 0 otherwise.
-func mask64Bits(cond int) uint64 { return ^(uint64(cond) - 1) }
-
-// Select sets v to a if cond == 1, and to b if cond == 0.
-func (v *Element) Select(a, b *Element, cond int) *Element {
- m := mask64Bits(cond)
- v.l0 = (m & a.l0) | (^m & b.l0)
- v.l1 = (m & a.l1) | (^m & b.l1)
- v.l2 = (m & a.l2) | (^m & b.l2)
- v.l3 = (m & a.l3) | (^m & b.l3)
- v.l4 = (m & a.l4) | (^m & b.l4)
- return v
-}
-
-// Swap swaps v and u if cond == 1 or leaves them unchanged if cond == 0, and returns v.
-func (v *Element) Swap(u *Element, cond int) {
- m := mask64Bits(cond)
- t := m & (v.l0 ^ u.l0)
- v.l0 ^= t
- u.l0 ^= t
- t = m & (v.l1 ^ u.l1)
- v.l1 ^= t
- u.l1 ^= t
- t = m & (v.l2 ^ u.l2)
- v.l2 ^= t
- u.l2 ^= t
- t = m & (v.l3 ^ u.l3)
- v.l3 ^= t
- u.l3 ^= t
- t = m & (v.l4 ^ u.l4)
- v.l4 ^= t
- u.l4 ^= t
-}
-
-// IsNegative returns 1 if v is negative, and 0 otherwise.
-func (v *Element) IsNegative() int {
- return int(v.Bytes()[0] & 1)
-}
-
-// Absolute sets v to |u|, and returns v.
-func (v *Element) Absolute(u *Element) *Element {
- return v.Select(new(Element).Negate(u), u, u.IsNegative())
-}
-
-// Multiply sets v = x * y, and returns v.
-func (v *Element) Multiply(x, y *Element) *Element {
- feMul(v, x, y)
- return v
-}
-
-// Square sets v = x * x, and returns v.
-func (v *Element) Square(x *Element) *Element {
- feSquare(v, x)
- return v
-}
-
-// Mult32 sets v = x * y, and returns v.
-func (v *Element) Mult32(x *Element, y uint32) *Element {
- x0lo, x0hi := mul51(x.l0, y)
- x1lo, x1hi := mul51(x.l1, y)
- x2lo, x2hi := mul51(x.l2, y)
- x3lo, x3hi := mul51(x.l3, y)
- x4lo, x4hi := mul51(x.l4, y)
- v.l0 = x0lo + 19*x4hi // carried over per the reduction identity
- v.l1 = x1lo + x0hi
- v.l2 = x2lo + x1hi
- v.l3 = x3lo + x2hi
- v.l4 = x4lo + x3hi
- // The hi portions are going to be only 32 bits, plus any previous excess,
- // so we can skip the carry propagation.
- return v
-}
-
-// mul51 returns lo + hi * 2⁵¹ = a * b.
-func mul51(a uint64, b uint32) (lo uint64, hi uint64) {
- mh, ml := bits.Mul64(a, uint64(b))
- lo = ml & maskLow51Bits
- hi = (mh << 13) | (ml >> 51)
- return
-}
-
-// Pow22523 set v = x^((p-5)/8), and returns v. (p-5)/8 is 2^252-3.
-func (v *Element) Pow22523(x *Element) *Element {
- var t0, t1, t2 Element
-
- t0.Square(x) // x^2
- t1.Square(&t0) // x^4
- t1.Square(&t1) // x^8
- t1.Multiply(x, &t1) // x^9
- t0.Multiply(&t0, &t1) // x^11
- t0.Square(&t0) // x^22
- t0.Multiply(&t1, &t0) // x^31
- t1.Square(&t0) // x^62
- for i := 1; i < 5; i++ { // x^992
- t1.Square(&t1)
- }
- t0.Multiply(&t1, &t0) // x^1023 -> 1023 = 2^10 - 1
- t1.Square(&t0) // 2^11 - 2
- for i := 1; i < 10; i++ { // 2^20 - 2^10
- t1.Square(&t1)
- }
- t1.Multiply(&t1, &t0) // 2^20 - 1
- t2.Square(&t1) // 2^21 - 2
- for i := 1; i < 20; i++ { // 2^40 - 2^20
- t2.Square(&t2)
- }
- t1.Multiply(&t2, &t1) // 2^40 - 1
- t1.Square(&t1) // 2^41 - 2
- for i := 1; i < 10; i++ { // 2^50 - 2^10
- t1.Square(&t1)
- }
- t0.Multiply(&t1, &t0) // 2^50 - 1
- t1.Square(&t0) // 2^51 - 2
- for i := 1; i < 50; i++ { // 2^100 - 2^50
- t1.Square(&t1)
- }
- t1.Multiply(&t1, &t0) // 2^100 - 1
- t2.Square(&t1) // 2^101 - 2
- for i := 1; i < 100; i++ { // 2^200 - 2^100
- t2.Square(&t2)
- }
- t1.Multiply(&t2, &t1) // 2^200 - 1
- t1.Square(&t1) // 2^201 - 2
- for i := 1; i < 50; i++ { // 2^250 - 2^50
- t1.Square(&t1)
- }
- t0.Multiply(&t1, &t0) // 2^250 - 1
- t0.Square(&t0) // 2^251 - 2
- t0.Square(&t0) // 2^252 - 4
- return v.Multiply(&t0, x) // 2^252 - 3 -> x^(2^252-3)
-}
-
-// sqrtM1 is 2^((p-1)/4), which squared is equal to -1 by Euler's Criterion.
-var sqrtM1 = &Element{1718705420411056, 234908883556509,
- 2233514472574048, 2117202627021982, 765476049583133}
-
-// SqrtRatio sets r to the non-negative square root of the ratio of u and v.
-//
-// If u/v is square, SqrtRatio returns r and 1. If u/v is not square, SqrtRatio
-// sets r according to Section 4.3 of draft-irtf-cfrg-ristretto255-decaf448-00,
-// and returns r and 0.
-func (r *Element) SqrtRatio(u, v *Element) (rr *Element, wasSquare int) {
- var a, b Element
-
- // r = (u * v3) * (u * v7)^((p-5)/8)
- v2 := a.Square(v)
- uv3 := b.Multiply(u, b.Multiply(v2, v))
- uv7 := a.Multiply(uv3, a.Square(v2))
- r.Multiply(uv3, r.Pow22523(uv7))
-
- check := a.Multiply(v, a.Square(r)) // check = v * r^2
-
- uNeg := b.Negate(u)
- correctSignSqrt := check.Equal(u)
- flippedSignSqrt := check.Equal(uNeg)
- flippedSignSqrtI := check.Equal(uNeg.Multiply(uNeg, sqrtM1))
-
- rPrime := b.Multiply(r, sqrtM1) // r_prime = SQRT_M1 * r
- // r = CT_SELECT(r_prime IF flipped_sign_sqrt | flipped_sign_sqrt_i ELSE r)
- r.Select(rPrime, r, flippedSignSqrt|flippedSignSqrtI)
-
- r.Absolute(r) // Choose the nonnegative square root.
- return r, correctSignSqrt | flippedSignSqrt
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_amd64.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_amd64.go
deleted file mode 100644
index 44dc8e8c..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_amd64.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
-
-// +build amd64,gc,!purego
-
-package field
-
-// feMul sets out = a * b. It works like feMulGeneric.
-//go:noescape
-func feMul(out *Element, a *Element, b *Element)
-
-// feSquare sets out = a * a. It works like feSquareGeneric.
-//go:noescape
-func feSquare(out *Element, a *Element)
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_amd64.s b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_amd64.s
deleted file mode 100644
index 293f013c..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_amd64.s
+++ /dev/null
@@ -1,379 +0,0 @@
-// Code generated by command: go run fe_amd64_asm.go -out ../fe_amd64.s -stubs ../fe_amd64.go -pkg field. DO NOT EDIT.
-
-//go:build amd64 && gc && !purego
-// +build amd64,gc,!purego
-
-#include "textflag.h"
-
-// func feMul(out *Element, a *Element, b *Element)
-TEXT ·feMul(SB), NOSPLIT, $0-24
- MOVQ a+8(FP), CX
- MOVQ b+16(FP), BX
-
- // r0 = a0×b0
- MOVQ (CX), AX
- MULQ (BX)
- MOVQ AX, DI
- MOVQ DX, SI
-
- // r0 += 19×a1×b4
- MOVQ 8(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 32(BX)
- ADDQ AX, DI
- ADCQ DX, SI
-
- // r0 += 19×a2×b3
- MOVQ 16(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 24(BX)
- ADDQ AX, DI
- ADCQ DX, SI
-
- // r0 += 19×a3×b2
- MOVQ 24(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 16(BX)
- ADDQ AX, DI
- ADCQ DX, SI
-
- // r0 += 19×a4×b1
- MOVQ 32(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 8(BX)
- ADDQ AX, DI
- ADCQ DX, SI
-
- // r1 = a0×b1
- MOVQ (CX), AX
- MULQ 8(BX)
- MOVQ AX, R9
- MOVQ DX, R8
-
- // r1 += a1×b0
- MOVQ 8(CX), AX
- MULQ (BX)
- ADDQ AX, R9
- ADCQ DX, R8
-
- // r1 += 19×a2×b4
- MOVQ 16(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 32(BX)
- ADDQ AX, R9
- ADCQ DX, R8
-
- // r1 += 19×a3×b3
- MOVQ 24(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 24(BX)
- ADDQ AX, R9
- ADCQ DX, R8
-
- // r1 += 19×a4×b2
- MOVQ 32(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 16(BX)
- ADDQ AX, R9
- ADCQ DX, R8
-
- // r2 = a0×b2
- MOVQ (CX), AX
- MULQ 16(BX)
- MOVQ AX, R11
- MOVQ DX, R10
-
- // r2 += a1×b1
- MOVQ 8(CX), AX
- MULQ 8(BX)
- ADDQ AX, R11
- ADCQ DX, R10
-
- // r2 += a2×b0
- MOVQ 16(CX), AX
- MULQ (BX)
- ADDQ AX, R11
- ADCQ DX, R10
-
- // r2 += 19×a3×b4
- MOVQ 24(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 32(BX)
- ADDQ AX, R11
- ADCQ DX, R10
-
- // r2 += 19×a4×b3
- MOVQ 32(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 24(BX)
- ADDQ AX, R11
- ADCQ DX, R10
-
- // r3 = a0×b3
- MOVQ (CX), AX
- MULQ 24(BX)
- MOVQ AX, R13
- MOVQ DX, R12
-
- // r3 += a1×b2
- MOVQ 8(CX), AX
- MULQ 16(BX)
- ADDQ AX, R13
- ADCQ DX, R12
-
- // r3 += a2×b1
- MOVQ 16(CX), AX
- MULQ 8(BX)
- ADDQ AX, R13
- ADCQ DX, R12
-
- // r3 += a3×b0
- MOVQ 24(CX), AX
- MULQ (BX)
- ADDQ AX, R13
- ADCQ DX, R12
-
- // r3 += 19×a4×b4
- MOVQ 32(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 32(BX)
- ADDQ AX, R13
- ADCQ DX, R12
-
- // r4 = a0×b4
- MOVQ (CX), AX
- MULQ 32(BX)
- MOVQ AX, R15
- MOVQ DX, R14
-
- // r4 += a1×b3
- MOVQ 8(CX), AX
- MULQ 24(BX)
- ADDQ AX, R15
- ADCQ DX, R14
-
- // r4 += a2×b2
- MOVQ 16(CX), AX
- MULQ 16(BX)
- ADDQ AX, R15
- ADCQ DX, R14
-
- // r4 += a3×b1
- MOVQ 24(CX), AX
- MULQ 8(BX)
- ADDQ AX, R15
- ADCQ DX, R14
-
- // r4 += a4×b0
- MOVQ 32(CX), AX
- MULQ (BX)
- ADDQ AX, R15
- ADCQ DX, R14
-
- // First reduction chain
- MOVQ $0x0007ffffffffffff, AX
- SHLQ $0x0d, DI, SI
- SHLQ $0x0d, R9, R8
- SHLQ $0x0d, R11, R10
- SHLQ $0x0d, R13, R12
- SHLQ $0x0d, R15, R14
- ANDQ AX, DI
- IMUL3Q $0x13, R14, R14
- ADDQ R14, DI
- ANDQ AX, R9
- ADDQ SI, R9
- ANDQ AX, R11
- ADDQ R8, R11
- ANDQ AX, R13
- ADDQ R10, R13
- ANDQ AX, R15
- ADDQ R12, R15
-
- // Second reduction chain (carryPropagate)
- MOVQ DI, SI
- SHRQ $0x33, SI
- MOVQ R9, R8
- SHRQ $0x33, R8
- MOVQ R11, R10
- SHRQ $0x33, R10
- MOVQ R13, R12
- SHRQ $0x33, R12
- MOVQ R15, R14
- SHRQ $0x33, R14
- ANDQ AX, DI
- IMUL3Q $0x13, R14, R14
- ADDQ R14, DI
- ANDQ AX, R9
- ADDQ SI, R9
- ANDQ AX, R11
- ADDQ R8, R11
- ANDQ AX, R13
- ADDQ R10, R13
- ANDQ AX, R15
- ADDQ R12, R15
-
- // Store output
- MOVQ out+0(FP), AX
- MOVQ DI, (AX)
- MOVQ R9, 8(AX)
- MOVQ R11, 16(AX)
- MOVQ R13, 24(AX)
- MOVQ R15, 32(AX)
- RET
-
-// func feSquare(out *Element, a *Element)
-TEXT ·feSquare(SB), NOSPLIT, $0-16
- MOVQ a+8(FP), CX
-
- // r0 = l0×l0
- MOVQ (CX), AX
- MULQ (CX)
- MOVQ AX, SI
- MOVQ DX, BX
-
- // r0 += 38×l1×l4
- MOVQ 8(CX), AX
- IMUL3Q $0x26, AX, AX
- MULQ 32(CX)
- ADDQ AX, SI
- ADCQ DX, BX
-
- // r0 += 38×l2×l3
- MOVQ 16(CX), AX
- IMUL3Q $0x26, AX, AX
- MULQ 24(CX)
- ADDQ AX, SI
- ADCQ DX, BX
-
- // r1 = 2×l0×l1
- MOVQ (CX), AX
- SHLQ $0x01, AX
- MULQ 8(CX)
- MOVQ AX, R8
- MOVQ DX, DI
-
- // r1 += 38×l2×l4
- MOVQ 16(CX), AX
- IMUL3Q $0x26, AX, AX
- MULQ 32(CX)
- ADDQ AX, R8
- ADCQ DX, DI
-
- // r1 += 19×l3×l3
- MOVQ 24(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 24(CX)
- ADDQ AX, R8
- ADCQ DX, DI
-
- // r2 = 2×l0×l2
- MOVQ (CX), AX
- SHLQ $0x01, AX
- MULQ 16(CX)
- MOVQ AX, R10
- MOVQ DX, R9
-
- // r2 += l1×l1
- MOVQ 8(CX), AX
- MULQ 8(CX)
- ADDQ AX, R10
- ADCQ DX, R9
-
- // r2 += 38×l3×l4
- MOVQ 24(CX), AX
- IMUL3Q $0x26, AX, AX
- MULQ 32(CX)
- ADDQ AX, R10
- ADCQ DX, R9
-
- // r3 = 2×l0×l3
- MOVQ (CX), AX
- SHLQ $0x01, AX
- MULQ 24(CX)
- MOVQ AX, R12
- MOVQ DX, R11
-
- // r3 += 2×l1×l2
- MOVQ 8(CX), AX
- IMUL3Q $0x02, AX, AX
- MULQ 16(CX)
- ADDQ AX, R12
- ADCQ DX, R11
-
- // r3 += 19×l4×l4
- MOVQ 32(CX), AX
- IMUL3Q $0x13, AX, AX
- MULQ 32(CX)
- ADDQ AX, R12
- ADCQ DX, R11
-
- // r4 = 2×l0×l4
- MOVQ (CX), AX
- SHLQ $0x01, AX
- MULQ 32(CX)
- MOVQ AX, R14
- MOVQ DX, R13
-
- // r4 += 2×l1×l3
- MOVQ 8(CX), AX
- IMUL3Q $0x02, AX, AX
- MULQ 24(CX)
- ADDQ AX, R14
- ADCQ DX, R13
-
- // r4 += l2×l2
- MOVQ 16(CX), AX
- MULQ 16(CX)
- ADDQ AX, R14
- ADCQ DX, R13
-
- // First reduction chain
- MOVQ $0x0007ffffffffffff, AX
- SHLQ $0x0d, SI, BX
- SHLQ $0x0d, R8, DI
- SHLQ $0x0d, R10, R9
- SHLQ $0x0d, R12, R11
- SHLQ $0x0d, R14, R13
- ANDQ AX, SI
- IMUL3Q $0x13, R13, R13
- ADDQ R13, SI
- ANDQ AX, R8
- ADDQ BX, R8
- ANDQ AX, R10
- ADDQ DI, R10
- ANDQ AX, R12
- ADDQ R9, R12
- ANDQ AX, R14
- ADDQ R11, R14
-
- // Second reduction chain (carryPropagate)
- MOVQ SI, BX
- SHRQ $0x33, BX
- MOVQ R8, DI
- SHRQ $0x33, DI
- MOVQ R10, R9
- SHRQ $0x33, R9
- MOVQ R12, R11
- SHRQ $0x33, R11
- MOVQ R14, R13
- SHRQ $0x33, R13
- ANDQ AX, SI
- IMUL3Q $0x13, R13, R13
- ADDQ R13, SI
- ANDQ AX, R8
- ADDQ BX, R8
- ANDQ AX, R10
- ADDQ DI, R10
- ANDQ AX, R12
- ADDQ R9, R12
- ANDQ AX, R14
- ADDQ R11, R14
-
- // Store output
- MOVQ out+0(FP), AX
- MOVQ SI, (AX)
- MOVQ R8, 8(AX)
- MOVQ R10, 16(AX)
- MOVQ R12, 24(AX)
- MOVQ R14, 32(AX)
- RET
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_amd64_noasm.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_amd64_noasm.go
deleted file mode 100644
index ddb6c9b8..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_amd64_noasm.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright (c) 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !amd64 || !gc || purego
-// +build !amd64 !gc purego
-
-package field
-
-func feMul(v, x, y *Element) { feMulGeneric(v, x, y) }
-
-func feSquare(v, x *Element) { feSquareGeneric(v, x) }
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_arm64.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_arm64.go
deleted file mode 100644
index af459ef5..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_arm64.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright (c) 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build arm64 && gc && !purego
-// +build arm64,gc,!purego
-
-package field
-
-//go:noescape
-func carryPropagate(v *Element)
-
-func (v *Element) carryPropagate() *Element {
- carryPropagate(v)
- return v
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_arm64.s b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_arm64.s
deleted file mode 100644
index 5c91e458..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_arm64.s
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright (c) 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build arm64 && gc && !purego
-// +build arm64,gc,!purego
-
-#include "textflag.h"
-
-// carryPropagate works exactly like carryPropagateGeneric and uses the
-// same AND, ADD, and LSR+MADD instructions emitted by the compiler, but
-// avoids loading R0-R4 twice and uses LDP and STP.
-//
-// See https://golang.org/issues/43145 for the main compiler issue.
-//
-// func carryPropagate(v *Element)
-TEXT ·carryPropagate(SB),NOFRAME|NOSPLIT,$0-8
- MOVD v+0(FP), R20
-
- LDP 0(R20), (R0, R1)
- LDP 16(R20), (R2, R3)
- MOVD 32(R20), R4
-
- AND $0x7ffffffffffff, R0, R10
- AND $0x7ffffffffffff, R1, R11
- AND $0x7ffffffffffff, R2, R12
- AND $0x7ffffffffffff, R3, R13
- AND $0x7ffffffffffff, R4, R14
-
- ADD R0>>51, R11, R11
- ADD R1>>51, R12, R12
- ADD R2>>51, R13, R13
- ADD R3>>51, R14, R14
- // R4>>51 * 19 + R10 -> R10
- LSR $51, R4, R21
- MOVD $19, R22
- MADD R22, R10, R21, R10
-
- STP (R10, R11), 0(R20)
- STP (R12, R13), 16(R20)
- MOVD R14, 32(R20)
-
- RET
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_arm64_noasm.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_arm64_noasm.go
deleted file mode 100644
index 234a5b2e..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_arm64_noasm.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright (c) 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !arm64 || !gc || purego
-// +build !arm64 !gc purego
-
-package field
-
-func (v *Element) carryPropagate() *Element {
- return v.carryPropagateGeneric()
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_generic.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_generic.go
deleted file mode 100644
index 7b5b78cb..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve25519/field/fe_generic.go
+++ /dev/null
@@ -1,264 +0,0 @@
-// Copyright (c) 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package field
-
-import "math/bits"
-
-// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
-// bits.Mul64 and bits.Add64 intrinsics.
-type uint128 struct {
- lo, hi uint64
-}
-
-// mul64 returns a * b.
-func mul64(a, b uint64) uint128 {
- hi, lo := bits.Mul64(a, b)
- return uint128{lo, hi}
-}
-
-// addMul64 returns v + a * b.
-func addMul64(v uint128, a, b uint64) uint128 {
- hi, lo := bits.Mul64(a, b)
- lo, c := bits.Add64(lo, v.lo, 0)
- hi, _ = bits.Add64(hi, v.hi, c)
- return uint128{lo, hi}
-}
-
-// shiftRightBy51 returns a >> 51. a is assumed to be at most 115 bits.
-func shiftRightBy51(a uint128) uint64 {
- return (a.hi << (64 - 51)) | (a.lo >> 51)
-}
-
-func feMulGeneric(v, a, b *Element) {
- a0 := a.l0
- a1 := a.l1
- a2 := a.l2
- a3 := a.l3
- a4 := a.l4
-
- b0 := b.l0
- b1 := b.l1
- b2 := b.l2
- b3 := b.l3
- b4 := b.l4
-
- // Limb multiplication works like pen-and-paper columnar multiplication, but
- // with 51-bit limbs instead of digits.
- //
- // a4 a3 a2 a1 a0 x
- // b4 b3 b2 b1 b0 =
- // ------------------------
- // a4b0 a3b0 a2b0 a1b0 a0b0 +
- // a4b1 a3b1 a2b1 a1b1 a0b1 +
- // a4b2 a3b2 a2b2 a1b2 a0b2 +
- // a4b3 a3b3 a2b3 a1b3 a0b3 +
- // a4b4 a3b4 a2b4 a1b4 a0b4 =
- // ----------------------------------------------
- // r8 r7 r6 r5 r4 r3 r2 r1 r0
- //
- // We can then use the reduction identity (a * 2²⁵⁵ + b = a * 19 + b) to
- // reduce the limbs that would overflow 255 bits. r5 * 2²⁵⁵ becomes 19 * r5,
- // r6 * 2³⁰⁶ becomes 19 * r6 * 2⁵¹, etc.
- //
- // Reduction can be carried out simultaneously to multiplication. For
- // example, we do not compute r5: whenever the result of a multiplication
- // belongs to r5, like a1b4, we multiply it by 19 and add the result to r0.
- //
- // a4b0 a3b0 a2b0 a1b0 a0b0 +
- // a3b1 a2b1 a1b1 a0b1 19×a4b1 +
- // a2b2 a1b2 a0b2 19×a4b2 19×a3b2 +
- // a1b3 a0b3 19×a4b3 19×a3b3 19×a2b3 +
- // a0b4 19×a4b4 19×a3b4 19×a2b4 19×a1b4 =
- // --------------------------------------
- // r4 r3 r2 r1 r0
- //
- // Finally we add up the columns into wide, overlapping limbs.
-
- a1_19 := a1 * 19
- a2_19 := a2 * 19
- a3_19 := a3 * 19
- a4_19 := a4 * 19
-
- // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
- r0 := mul64(a0, b0)
- r0 = addMul64(r0, a1_19, b4)
- r0 = addMul64(r0, a2_19, b3)
- r0 = addMul64(r0, a3_19, b2)
- r0 = addMul64(r0, a4_19, b1)
-
- // r1 = a0×b1 + a1×b0 + 19×(a2×b4 + a3×b3 + a4×b2)
- r1 := mul64(a0, b1)
- r1 = addMul64(r1, a1, b0)
- r1 = addMul64(r1, a2_19, b4)
- r1 = addMul64(r1, a3_19, b3)
- r1 = addMul64(r1, a4_19, b2)
-
- // r2 = a0×b2 + a1×b1 + a2×b0 + 19×(a3×b4 + a4×b3)
- r2 := mul64(a0, b2)
- r2 = addMul64(r2, a1, b1)
- r2 = addMul64(r2, a2, b0)
- r2 = addMul64(r2, a3_19, b4)
- r2 = addMul64(r2, a4_19, b3)
-
- // r3 = a0×b3 + a1×b2 + a2×b1 + a3×b0 + 19×a4×b4
- r3 := mul64(a0, b3)
- r3 = addMul64(r3, a1, b2)
- r3 = addMul64(r3, a2, b1)
- r3 = addMul64(r3, a3, b0)
- r3 = addMul64(r3, a4_19, b4)
-
- // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
- r4 := mul64(a0, b4)
- r4 = addMul64(r4, a1, b3)
- r4 = addMul64(r4, a2, b2)
- r4 = addMul64(r4, a3, b1)
- r4 = addMul64(r4, a4, b0)
-
- // After the multiplication, we need to reduce (carry) the five coefficients
- // to obtain a result with limbs that are at most slightly larger than 2⁵¹,
- // to respect the Element invariant.
- //
- // Overall, the reduction works the same as carryPropagate, except with
- // wider inputs: we take the carry for each coefficient by shifting it right
- // by 51, and add it to the limb above it. The top carry is multiplied by 19
- // according to the reduction identity and added to the lowest limb.
- //
- // The largest coefficient (r0) will be at most 111 bits, which guarantees
- // that all carries are at most 111 - 51 = 60 bits, which fits in a uint64.
- //
- // r0 = a0×b0 + 19×(a1×b4 + a2×b3 + a3×b2 + a4×b1)
- // r0 < 2⁵²×2⁵² + 19×(2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵² + 2⁵²×2⁵²)
- // r0 < (1 + 19 × 4) × 2⁵² × 2⁵²
- // r0 < 2⁷ × 2⁵² × 2⁵²
- // r0 < 2¹¹¹
- //
- // Moreover, the top coefficient (r4) is at most 107 bits, so c4 is at most
- // 56 bits, and c4 * 19 is at most 61 bits, which again fits in a uint64 and
- // allows us to easily apply the reduction identity.
- //
- // r4 = a0×b4 + a1×b3 + a2×b2 + a3×b1 + a4×b0
- // r4 < 5 × 2⁵² × 2⁵²
- // r4 < 2¹⁰⁷
- //
-
- c0 := shiftRightBy51(r0)
- c1 := shiftRightBy51(r1)
- c2 := shiftRightBy51(r2)
- c3 := shiftRightBy51(r3)
- c4 := shiftRightBy51(r4)
-
- rr0 := r0.lo&maskLow51Bits + c4*19
- rr1 := r1.lo&maskLow51Bits + c0
- rr2 := r2.lo&maskLow51Bits + c1
- rr3 := r3.lo&maskLow51Bits + c2
- rr4 := r4.lo&maskLow51Bits + c3
-
- // Now all coefficients fit into 64-bit registers but are still too large to
- // be passed around as a Element. We therefore do one last carry chain,
- // where the carries will be small enough to fit in the wiggle room above 2⁵¹.
- *v = Element{rr0, rr1, rr2, rr3, rr4}
- v.carryPropagate()
-}
-
-func feSquareGeneric(v, a *Element) {
- l0 := a.l0
- l1 := a.l1
- l2 := a.l2
- l3 := a.l3
- l4 := a.l4
-
- // Squaring works precisely like multiplication above, but thanks to its
- // symmetry we get to group a few terms together.
- //
- // l4 l3 l2 l1 l0 x
- // l4 l3 l2 l1 l0 =
- // ------------------------
- // l4l0 l3l0 l2l0 l1l0 l0l0 +
- // l4l1 l3l1 l2l1 l1l1 l0l1 +
- // l4l2 l3l2 l2l2 l1l2 l0l2 +
- // l4l3 l3l3 l2l3 l1l3 l0l3 +
- // l4l4 l3l4 l2l4 l1l4 l0l4 =
- // ----------------------------------------------
- // r8 r7 r6 r5 r4 r3 r2 r1 r0
- //
- // l4l0 l3l0 l2l0 l1l0 l0l0 +
- // l3l1 l2l1 l1l1 l0l1 19×l4l1 +
- // l2l2 l1l2 l0l2 19×l4l2 19×l3l2 +
- // l1l3 l0l3 19×l4l3 19×l3l3 19×l2l3 +
- // l0l4 19×l4l4 19×l3l4 19×l2l4 19×l1l4 =
- // --------------------------------------
- // r4 r3 r2 r1 r0
- //
- // With precomputed 2×, 19×, and 2×19× terms, we can compute each limb with
- // only three Mul64 and four Add64, instead of five and eight.
-
- l0_2 := l0 * 2
- l1_2 := l1 * 2
-
- l1_38 := l1 * 38
- l2_38 := l2 * 38
- l3_38 := l3 * 38
-
- l3_19 := l3 * 19
- l4_19 := l4 * 19
-
- // r0 = l0×l0 + 19×(l1×l4 + l2×l3 + l3×l2 + l4×l1) = l0×l0 + 19×2×(l1×l4 + l2×l3)
- r0 := mul64(l0, l0)
- r0 = addMul64(r0, l1_38, l4)
- r0 = addMul64(r0, l2_38, l3)
-
- // r1 = l0×l1 + l1×l0 + 19×(l2×l4 + l3×l3 + l4×l2) = 2×l0×l1 + 19×2×l2×l4 + 19×l3×l3
- r1 := mul64(l0_2, l1)
- r1 = addMul64(r1, l2_38, l4)
- r1 = addMul64(r1, l3_19, l3)
-
- // r2 = l0×l2 + l1×l1 + l2×l0 + 19×(l3×l4 + l4×l3) = 2×l0×l2 + l1×l1 + 19×2×l3×l4
- r2 := mul64(l0_2, l2)
- r2 = addMul64(r2, l1, l1)
- r2 = addMul64(r2, l3_38, l4)
-
- // r3 = l0×l3 + l1×l2 + l2×l1 + l3×l0 + 19×l4×l4 = 2×l0×l3 + 2×l1×l2 + 19×l4×l4
- r3 := mul64(l0_2, l3)
- r3 = addMul64(r3, l1_2, l2)
- r3 = addMul64(r3, l4_19, l4)
-
- // r4 = l0×l4 + l1×l3 + l2×l2 + l3×l1 + l4×l0 = 2×l0×l4 + 2×l1×l3 + l2×l2
- r4 := mul64(l0_2, l4)
- r4 = addMul64(r4, l1_2, l3)
- r4 = addMul64(r4, l2, l2)
-
- c0 := shiftRightBy51(r0)
- c1 := shiftRightBy51(r1)
- c2 := shiftRightBy51(r2)
- c3 := shiftRightBy51(r3)
- c4 := shiftRightBy51(r4)
-
- rr0 := r0.lo&maskLow51Bits + c4*19
- rr1 := r1.lo&maskLow51Bits + c0
- rr2 := r2.lo&maskLow51Bits + c1
- rr3 := r3.lo&maskLow51Bits + c2
- rr4 := r4.lo&maskLow51Bits + c3
-
- *v = Element{rr0, rr1, rr2, rr3, rr4}
- v.carryPropagate()
-}
-
-// carryPropagate brings the limbs below 52 bits by applying the reduction
-// identity (a * 2²⁵⁵ + b = a * 19 + b) to the l4 carry. TODO inline
-func (v *Element) carryPropagateGeneric() *Element {
- c0 := v.l0 >> 51
- c1 := v.l1 >> 51
- c2 := v.l2 >> 51
- c3 := v.l3 >> 51
- c4 := v.l4 >> 51
-
- v.l0 = v.l0&maskLow51Bits + c4*19
- v.l1 = v.l1&maskLow51Bits + c0
- v.l2 = v.l2&maskLow51Bits + c1
- v.l3 = v.l3&maskLow51Bits + c2
- v.l4 = v.l4&maskLow51Bits + c3
-
- return v
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go
deleted file mode 100644
index 97f891ff..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curve_info.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA.
-package ecc
-
-import (
- "bytes"
- "crypto/elliptic"
-
- "github.com/ProtonMail/go-crypto/bitcurves"
- "github.com/ProtonMail/go-crypto/brainpool"
- "github.com/ProtonMail/go-crypto/openpgp/internal/encoding"
-)
-
-type CurveInfo struct {
- GenName string
- Oid *encoding.OID
- Curve Curve
-}
-
-var Curves = []CurveInfo{
- {
- // NIST P-256
- GenName: "P256",
- Oid: encoding.NewOID([]byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07}),
- Curve: NewGenericCurve(elliptic.P256()),
- },
- {
- // NIST P-384
- GenName: "P384",
- Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x22}),
- Curve: NewGenericCurve(elliptic.P384()),
- },
- {
- // NIST P-521
- GenName: "P521",
- Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x23}),
- Curve: NewGenericCurve(elliptic.P521()),
- },
- {
- // SecP256k1
- GenName: "SecP256k1",
- Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x0A}),
- Curve: NewGenericCurve(bitcurves.S256()),
- },
- {
- // Curve25519
- GenName: "Curve25519",
- Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0x97, 0x55, 0x01, 0x05, 0x01}),
- Curve: NewCurve25519(),
- },
- {
- // x448
- GenName: "Curve448",
- Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x6F}),
- Curve: NewX448(),
- },
- {
- // Ed25519
- GenName: "Curve25519",
- Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0xDA, 0x47, 0x0F, 0x01}),
- Curve: NewEd25519(),
- },
- {
- // Ed448
- GenName: "Curve448",
- Oid: encoding.NewOID([]byte{0x2B, 0x65, 0x71}),
- Curve: NewEd448(),
- },
- {
- // BrainpoolP256r1
- GenName: "BrainpoolP256",
- Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x07}),
- Curve: NewGenericCurve(brainpool.P256r1()),
- },
- {
- // BrainpoolP384r1
- GenName: "BrainpoolP384",
- Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0B}),
- Curve: NewGenericCurve(brainpool.P384r1()),
- },
- {
- // BrainpoolP512r1
- GenName: "BrainpoolP512",
- Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0D}),
- Curve: NewGenericCurve(brainpool.P512r1()),
- },
-}
-
-func FindByCurve(curve Curve) *CurveInfo {
- for _, curveInfo := range Curves {
- if curveInfo.Curve.GetCurveName() == curve.GetCurveName() {
- return &curveInfo
- }
- }
- return nil
-}
-
-func FindByOid(oid encoding.Field) *CurveInfo {
- var rawBytes = oid.Bytes()
- for _, curveInfo := range Curves {
- if bytes.Equal(curveInfo.Oid.Bytes(), rawBytes) {
- return &curveInfo
- }
- }
- return nil
-}
-
-func FindEdDSAByGenName(curveGenName string) EdDSACurve {
- for _, curveInfo := range Curves {
- if curveInfo.GenName == curveGenName {
- curve, ok := curveInfo.Curve.(EdDSACurve)
- if ok {
- return curve
- }
- }
- }
- return nil
-}
-
-func FindECDSAByGenName(curveGenName string) ECDSACurve {
- for _, curveInfo := range Curves {
- if curveInfo.GenName == curveGenName {
- curve, ok := curveInfo.Curve.(ECDSACurve)
- if ok {
- return curve
- }
- }
- }
- return nil
-}
-
-func FindECDHByGenName(curveGenName string) ECDHCurve {
- for _, curveInfo := range Curves {
- if curveInfo.GenName == curveGenName {
- curve, ok := curveInfo.Curve.(ECDHCurve)
- if ok {
- return curve
- }
- }
- }
- return nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go
deleted file mode 100644
index 5ed9c93b..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curves.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA.
-package ecc
-
-import (
- "io"
- "math/big"
-)
-
-type Curve interface {
- GetCurveName() string
-}
-
-type ECDSACurve interface {
- Curve
- MarshalIntegerPoint(x, y *big.Int) []byte
- UnmarshalIntegerPoint([]byte) (x, y *big.Int)
- MarshalIntegerSecret(d *big.Int) []byte
- UnmarshalIntegerSecret(d []byte) *big.Int
- GenerateECDSA(rand io.Reader) (x, y, secret *big.Int, err error)
- Sign(rand io.Reader, x, y, d *big.Int, hash []byte) (r, s *big.Int, err error)
- Verify(x, y *big.Int, hash []byte, r, s *big.Int) bool
- ValidateECDSA(x, y *big.Int, secret []byte) error
-}
-
-type EdDSACurve interface {
- Curve
- MarshalBytePoint(x []byte) []byte
- UnmarshalBytePoint([]byte) (x []byte)
- MarshalByteSecret(d []byte) []byte
- UnmarshalByteSecret(d []byte) []byte
- MarshalSignature(sig []byte) (r, s []byte)
- UnmarshalSignature(r, s []byte) (sig []byte)
- GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error)
- Sign(publicKey, privateKey, message []byte) (sig []byte, err error)
- Verify(publicKey, message, sig []byte) bool
- ValidateEdDSA(publicKey, privateKey []byte) (err error)
-}
-type ECDHCurve interface {
- Curve
- MarshalBytePoint([]byte) (encoded []byte)
- UnmarshalBytePoint(encoded []byte) []byte
- MarshalByteSecret(d []byte) []byte
- UnmarshalByteSecret(d []byte) []byte
- GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error)
- Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error)
- Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error)
- ValidateECDH(public []byte, secret []byte) error
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go
deleted file mode 100644
index 54a08a8a..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed25519.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA.
-package ecc
-
-import (
- "crypto/subtle"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- ed25519lib "github.com/cloudflare/circl/sign/ed25519"
-)
-
-const ed25519Size = 32
-
-type ed25519 struct{}
-
-func NewEd25519() *ed25519 {
- return &ed25519{}
-}
-
-func (c *ed25519) GetCurveName() string {
- return "ed25519"
-}
-
-// MarshalBytePoint encodes the public point from native format, adding the prefix.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed25519) MarshalBytePoint(x []byte) []byte {
- return append([]byte{0x40}, x...)
-}
-
-// UnmarshalBytePoint decodes a point from prefixed format to native.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed25519) UnmarshalBytePoint(point []byte) (x []byte) {
- if len(point) != ed25519lib.PublicKeySize+1 {
- return nil
- }
-
- // Return unprefixed
- return point[1:]
-}
-
-// MarshalByteSecret encodes a scalar in native format.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed25519) MarshalByteSecret(d []byte) []byte {
- return d
-}
-
-// UnmarshalByteSecret decodes a scalar in native format and re-adds the stripped leading zeroes
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed25519) UnmarshalByteSecret(s []byte) (d []byte) {
- if len(s) > ed25519lib.SeedSize {
- return nil
- }
-
- // Handle stripped leading zeroes
- d = make([]byte, ed25519lib.SeedSize)
- copy(d[ed25519lib.SeedSize-len(s):], s)
- return
-}
-
-// MarshalSignature splits a signature in R and S.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.1
-func (c *ed25519) MarshalSignature(sig []byte) (r, s []byte) {
- return sig[:ed25519Size], sig[ed25519Size:]
-}
-
-// UnmarshalSignature decodes R and S in the native format, re-adding the stripped leading zeroes
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.1
-func (c *ed25519) UnmarshalSignature(r, s []byte) (sig []byte) {
- // Check size
- if len(r) > 32 || len(s) > 32 {
- return nil
- }
-
- sig = make([]byte, ed25519lib.SignatureSize)
-
- // Handle stripped leading zeroes
- copy(sig[ed25519Size-len(r):ed25519Size], r)
- copy(sig[ed25519lib.SignatureSize-len(s):], s)
- return sig
-}
-
-func (c *ed25519) GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) {
- pk, sk, err := ed25519lib.GenerateKey(rand)
-
- if err != nil {
- return nil, nil, err
- }
-
- return pk, sk[:ed25519lib.SeedSize], nil
-}
-
-func getEd25519Sk(publicKey, privateKey []byte) ed25519lib.PrivateKey {
- return append(privateKey, publicKey...)
-}
-
-func (c *ed25519) Sign(publicKey, privateKey, message []byte) (sig []byte, err error) {
- sig = ed25519lib.Sign(getEd25519Sk(publicKey, privateKey), message)
- return sig, nil
-}
-
-func (c *ed25519) Verify(publicKey, message, sig []byte) bool {
- return ed25519lib.Verify(publicKey, message, sig)
-}
-
-func (c *ed25519) ValidateEdDSA(publicKey, privateKey []byte) (err error) {
- priv := getEd25519Sk(publicKey, privateKey)
- expectedPriv := ed25519lib.NewKeyFromSeed(priv.Seed())
- if subtle.ConstantTimeCompare(priv, expectedPriv) == 0 {
- return errors.KeyInvalidError("ecc: invalid ed25519 secret")
- }
- return nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go
deleted file mode 100644
index 18cd8043..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/ed448.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA.
-package ecc
-
-import (
- "crypto/subtle"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- ed448lib "github.com/cloudflare/circl/sign/ed448"
-)
-
-type ed448 struct{}
-
-func NewEd448() *ed448 {
- return &ed448{}
-}
-
-func (c *ed448) GetCurveName() string {
- return "ed448"
-}
-
-// MarshalBytePoint encodes the public point from native format, adding the prefix.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed448) MarshalBytePoint(x []byte) []byte {
- // Return prefixed
- return append([]byte{0x40}, x...)
-}
-
-// UnmarshalBytePoint decodes a point from prefixed format to native.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed448) UnmarshalBytePoint(point []byte) (x []byte) {
- if len(point) != ed448lib.PublicKeySize+1 {
- return nil
- }
-
- // Strip prefix
- return point[1:]
-}
-
-// MarshalByteSecret encoded a scalar from native format to prefixed.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed448) MarshalByteSecret(d []byte) []byte {
- // Return prefixed
- return append([]byte{0x40}, d...)
-}
-
-// UnmarshalByteSecret decodes a scalar from prefixed format to native.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.5
-func (c *ed448) UnmarshalByteSecret(s []byte) (d []byte) {
- // Check prefixed size
- if len(s) != ed448lib.SeedSize+1 {
- return nil
- }
-
- // Strip prefix
- return s[1:]
-}
-
-// MarshalSignature splits a signature in R and S, where R is in prefixed native format and
-// S is an MPI with value zero.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.2
-func (c *ed448) MarshalSignature(sig []byte) (r, s []byte) {
- return append([]byte{0x40}, sig...), []byte{}
-}
-
-// UnmarshalSignature decodes R and S in the native format. Only R is used, in prefixed native format.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.2.3.3.2
-func (c *ed448) UnmarshalSignature(r, s []byte) (sig []byte) {
- if len(r) != ed448lib.SignatureSize+1 {
- return nil
- }
-
- return r[1:]
-}
-
-func (c *ed448) GenerateEdDSA(rand io.Reader) (pub, priv []byte, err error) {
- pk, sk, err := ed448lib.GenerateKey(rand)
-
- if err != nil {
- return nil, nil, err
- }
-
- return pk, sk[:ed448lib.SeedSize], nil
-}
-
-func getEd448Sk(publicKey, privateKey []byte) ed448lib.PrivateKey {
- return append(privateKey, publicKey...)
-}
-
-func (c *ed448) Sign(publicKey, privateKey, message []byte) (sig []byte, err error) {
- // Ed448 is used with the empty string as a context string.
- // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7
- sig = ed448lib.Sign(getEd448Sk(publicKey, privateKey), message, "")
-
- return sig, nil
-}
-
-func (c *ed448) Verify(publicKey, message, sig []byte) bool {
- // Ed448 is used with the empty string as a context string.
- // See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-13.7
- return ed448lib.Verify(publicKey, message, sig, "")
-}
-
-func (c *ed448) ValidateEdDSA(publicKey, privateKey []byte) (err error) {
- priv := getEd448Sk(publicKey, privateKey)
- expectedPriv := ed448lib.NewKeyFromSeed(priv.Seed())
- if subtle.ConstantTimeCompare(priv, expectedPriv) == 0 {
- return errors.KeyInvalidError("ecc: invalid ed448 secret")
- }
- return nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go
deleted file mode 100644
index e28d7c71..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/generic.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA.
-package ecc
-
-import (
- "crypto/ecdsa"
- "crypto/elliptic"
- "fmt"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "io"
- "math/big"
-)
-
-type genericCurve struct {
- Curve elliptic.Curve
-}
-
-func NewGenericCurve(c elliptic.Curve) *genericCurve {
- return &genericCurve{
- Curve: c,
- }
-}
-
-func (c *genericCurve) GetCurveName() string {
- return c.Curve.Params().Name
-}
-
-func (c *genericCurve) MarshalBytePoint(point []byte) []byte {
- return point
-}
-
-func (c *genericCurve) UnmarshalBytePoint(point []byte) []byte {
- return point
-}
-
-func (c *genericCurve) MarshalIntegerPoint(x, y *big.Int) []byte {
- return elliptic.Marshal(c.Curve, x, y)
-}
-
-func (c *genericCurve) UnmarshalIntegerPoint(point []byte) (x, y *big.Int) {
- return elliptic.Unmarshal(c.Curve, point)
-}
-
-func (c *genericCurve) MarshalByteSecret(d []byte) []byte {
- return d
-}
-
-func (c *genericCurve) UnmarshalByteSecret(d []byte) []byte {
- return d
-}
-
-func (c *genericCurve) MarshalIntegerSecret(d *big.Int) []byte {
- return d.Bytes()
-}
-
-func (c *genericCurve) UnmarshalIntegerSecret(d []byte) *big.Int {
- return new(big.Int).SetBytes(d)
-}
-
-func (c *genericCurve) GenerateECDH(rand io.Reader) (point, secret []byte, err error) {
- secret, x, y, err := elliptic.GenerateKey(c.Curve, rand)
- if err != nil {
- return nil, nil, err
- }
-
- point = elliptic.Marshal(c.Curve, x, y)
- return point, secret, nil
-}
-
-func (c *genericCurve) GenerateECDSA(rand io.Reader) (x, y, secret *big.Int, err error) {
- priv, err := ecdsa.GenerateKey(c.Curve, rand)
- if err != nil {
- return
- }
-
- return priv.X, priv.Y, priv.D, nil
-}
-
-func (c *genericCurve) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) {
- xP, yP := elliptic.Unmarshal(c.Curve, point)
- if xP == nil {
- panic("invalid point")
- }
-
- d, x, y, err := elliptic.GenerateKey(c.Curve, rand)
- if err != nil {
- return nil, nil, err
- }
-
- vsG := elliptic.Marshal(c.Curve, x, y)
- zbBig, _ := c.Curve.ScalarMult(xP, yP, d)
-
- byteLen := (c.Curve.Params().BitSize + 7) >> 3
- zb := make([]byte, byteLen)
- zbBytes := zbBig.Bytes()
- copy(zb[byteLen-len(zbBytes):], zbBytes)
-
- return vsG, zb, nil
-}
-
-func (c *genericCurve) Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error) {
- x, y := elliptic.Unmarshal(c.Curve, ephemeral)
- zbBig, _ := c.Curve.ScalarMult(x, y, secret)
- byteLen := (c.Curve.Params().BitSize + 7) >> 3
- zb := make([]byte, byteLen)
- zbBytes := zbBig.Bytes()
- copy(zb[byteLen-len(zbBytes):], zbBytes)
-
- return zb, nil
-}
-
-func (c *genericCurve) Sign(rand io.Reader, x, y, d *big.Int, hash []byte) (r, s *big.Int, err error) {
- priv := &ecdsa.PrivateKey{D: d, PublicKey: ecdsa.PublicKey{X: x, Y: y, Curve: c.Curve}}
- return ecdsa.Sign(rand, priv, hash)
-}
-
-func (c *genericCurve) Verify(x, y *big.Int, hash []byte, r, s *big.Int) bool {
- pub := &ecdsa.PublicKey{X: x, Y: y, Curve: c.Curve}
- return ecdsa.Verify(pub, hash, r, s)
-}
-
-func (c *genericCurve) validate(xP, yP *big.Int, secret []byte) error {
- // the public point should not be at infinity (0,0)
- zero := new(big.Int)
- if xP.Cmp(zero) == 0 && yP.Cmp(zero) == 0 {
- return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): infinity point", c.Curve.Params().Name))
- }
-
- // re-derive the public point Q' = (X,Y) = dG
- // to compare to declared Q in public key
- expectedX, expectedY := c.Curve.ScalarBaseMult(secret)
- if xP.Cmp(expectedX) != 0 || yP.Cmp(expectedY) != 0 {
- return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): invalid point", c.Curve.Params().Name))
- }
-
- return nil
-}
-
-func (c *genericCurve) ValidateECDSA(xP, yP *big.Int, secret []byte) error {
- return c.validate(xP, yP, secret)
-}
-
-func (c *genericCurve) ValidateECDH(point []byte, secret []byte) error {
- xP, yP := elliptic.Unmarshal(c.Curve, point)
- if xP == nil {
- return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): invalid point", c.Curve.Params().Name))
- }
-
- return c.validate(xP, yP, secret)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go
deleted file mode 100644
index df04262e..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/x448.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Package ecc implements a generic interface for ECDH, ECDSA, and EdDSA.
-package ecc
-
-import (
- "crypto/subtle"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- x448lib "github.com/cloudflare/circl/dh/x448"
-)
-
-type x448 struct{}
-
-func NewX448() *x448 {
- return &x448{}
-}
-
-func (c *x448) GetCurveName() string {
- return "x448"
-}
-
-// MarshalBytePoint encodes the public point from native format, adding the prefix.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6
-func (c *x448) MarshalBytePoint(point []byte) []byte {
- return append([]byte{0x40}, point...)
-}
-
-// UnmarshalBytePoint decodes a point from prefixed format to native.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6
-func (c *x448) UnmarshalBytePoint(point []byte) []byte {
- if len(point) != x448lib.Size+1 {
- return nil
- }
-
- return point[1:]
-}
-
-// MarshalByteSecret encoded a scalar from native format to prefixed.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.2
-func (c *x448) MarshalByteSecret(d []byte) []byte {
- return append([]byte{0x40}, d...)
-}
-
-// UnmarshalByteSecret decodes a scalar from prefixed format to native.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh-06#section-5.5.5.6.1.2
-func (c *x448) UnmarshalByteSecret(d []byte) []byte {
- if len(d) != x448lib.Size+1 {
- return nil
- }
-
- // Store without prefix
- return d[1:]
-}
-
-func (c *x448) generateKeyPairBytes(rand io.Reader) (sk, pk x448lib.Key, err error) {
- if _, err = rand.Read(sk[:]); err != nil {
- return
- }
-
- x448lib.KeyGen(&pk, &sk)
- return
-}
-
-func (c *x448) GenerateECDH(rand io.Reader) (point []byte, secret []byte, err error) {
- priv, pub, err := c.generateKeyPairBytes(rand)
- if err != nil {
- return
- }
-
- return pub[:], priv[:], nil
-}
-
-func (c *x448) Encaps(rand io.Reader, point []byte) (ephemeral, sharedSecret []byte, err error) {
- var pk, ss x448lib.Key
- seed, e, err := c.generateKeyPairBytes(rand)
- if err != nil {
- return nil, nil, err
- }
- copy(pk[:], point)
- x448lib.Shared(&ss, &seed, &pk)
-
- return e[:], ss[:], nil
-}
-
-func (c *x448) Decaps(ephemeral, secret []byte) (sharedSecret []byte, err error) {
- var ss, sk, e x448lib.Key
-
- copy(sk[:], secret)
- copy(e[:], ephemeral)
- x448lib.Shared(&ss, &sk, &e)
-
- return ss[:], nil
-}
-
-func (c *x448) ValidateECDH(point []byte, secret []byte) error {
- var sk, pk, expectedPk x448lib.Key
-
- copy(pk[:], point)
- copy(sk[:], secret)
- x448lib.KeyGen(&expectedPk, &sk)
-
- if subtle.ConstantTimeCompare(expectedPk[:], pk[:]) == 0 {
- return errors.KeyInvalidError("ecc: invalid curve25519 public point")
- }
-
- return nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go
deleted file mode 100644
index 6c921481..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package encoding implements openpgp packet field encodings as specified in
-// RFC 4880 and 6637.
-package encoding
-
-import "io"
-
-// Field is an encoded field of an openpgp packet.
-type Field interface {
- // Bytes returns the decoded data.
- Bytes() []byte
-
- // BitLength is the size in bits of the decoded data.
- BitLength() uint16
-
- // EncodedBytes returns the encoded data.
- EncodedBytes() []byte
-
- // EncodedLength is the size in bytes of the encoded data.
- EncodedLength() uint16
-
- // ReadFrom reads the next Field from r.
- ReadFrom(r io.Reader) (int64, error)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go
deleted file mode 100644
index 02e5e695..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package encoding
-
-import (
- "io"
- "math/big"
- "math/bits"
-)
-
-// An MPI is used to store the contents of a big integer, along with the bit
-// length that was specified in the original input. This allows the MPI to be
-// reserialized exactly.
-type MPI struct {
- bytes []byte
- bitLength uint16
-}
-
-// NewMPI returns a MPI initialized with bytes.
-func NewMPI(bytes []byte) *MPI {
- for len(bytes) != 0 && bytes[0] == 0 {
- bytes = bytes[1:]
- }
- if len(bytes) == 0 {
- bitLength := uint16(0)
- return &MPI{bytes, bitLength}
- }
- bitLength := 8*uint16(len(bytes)-1) + uint16(bits.Len8(bytes[0]))
- return &MPI{bytes, bitLength}
-}
-
-// Bytes returns the decoded data.
-func (m *MPI) Bytes() []byte {
- return m.bytes
-}
-
-// BitLength is the size in bits of the decoded data.
-func (m *MPI) BitLength() uint16 {
- return m.bitLength
-}
-
-// EncodedBytes returns the encoded data.
-func (m *MPI) EncodedBytes() []byte {
- return append([]byte{byte(m.bitLength >> 8), byte(m.bitLength)}, m.bytes...)
-}
-
-// EncodedLength is the size in bytes of the encoded data.
-func (m *MPI) EncodedLength() uint16 {
- return uint16(2 + len(m.bytes))
-}
-
-// ReadFrom reads into m the next MPI from r.
-func (m *MPI) ReadFrom(r io.Reader) (int64, error) {
- var buf [2]byte
- n, err := io.ReadFull(r, buf[0:])
- if err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return int64(n), err
- }
-
- m.bitLength = uint16(buf[0])<<8 | uint16(buf[1])
- m.bytes = make([]byte, (int(m.bitLength)+7)/8)
-
- nn, err := io.ReadFull(r, m.bytes)
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
-
- // remove leading zero bytes from malformed GnuPG encoded MPIs:
- // https://bugs.gnupg.org/gnupg/issue1853
- // for _, b := range m.bytes {
- // if b != 0 {
- // break
- // }
- // m.bytes = m.bytes[1:]
- // m.bitLength -= 8
- // }
-
- return int64(n) + int64(nn), err
-}
-
-// SetBig initializes m with the bits from n.
-func (m *MPI) SetBig(n *big.Int) *MPI {
- m.bytes = n.Bytes()
- m.bitLength = uint16(n.BitLen())
- return m
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go
deleted file mode 100644
index c9df9fe2..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package encoding
-
-import (
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-// OID is used to store a variable-length field with a one-octet size
-// prefix. See https://tools.ietf.org/html/rfc6637#section-9.
-type OID struct {
- bytes []byte
-}
-
-const (
- // maxOID is the maximum number of bytes in a OID.
- maxOID = 254
- // reservedOIDLength1 and reservedOIDLength2 are OID lengths that the RFC
- // specifies are reserved.
- reservedOIDLength1 = 0
- reservedOIDLength2 = 0xff
-)
-
-// NewOID returns a OID initialized with bytes.
-func NewOID(bytes []byte) *OID {
- switch len(bytes) {
- case reservedOIDLength1, reservedOIDLength2:
- panic("encoding: NewOID argument length is reserved")
- default:
- if len(bytes) > maxOID {
- panic("encoding: NewOID argument too large")
- }
- }
-
- return &OID{
- bytes: bytes,
- }
-}
-
-// Bytes returns the decoded data.
-func (o *OID) Bytes() []byte {
- return o.bytes
-}
-
-// BitLength is the size in bits of the decoded data.
-func (o *OID) BitLength() uint16 {
- return uint16(len(o.bytes) * 8)
-}
-
-// EncodedBytes returns the encoded data.
-func (o *OID) EncodedBytes() []byte {
- return append([]byte{byte(len(o.bytes))}, o.bytes...)
-}
-
-// EncodedLength is the size in bytes of the encoded data.
-func (o *OID) EncodedLength() uint16 {
- return uint16(1 + len(o.bytes))
-}
-
-// ReadFrom reads into b the next OID from r.
-func (o *OID) ReadFrom(r io.Reader) (int64, error) {
- var buf [1]byte
- n, err := io.ReadFull(r, buf[:])
- if err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return int64(n), err
- }
-
- switch buf[0] {
- case reservedOIDLength1, reservedOIDLength2:
- return int64(n), errors.UnsupportedError("reserved for future extensions")
- }
-
- o.bytes = make([]byte, buf[0])
-
- nn, err := io.ReadFull(r, o.bytes)
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
-
- return int64(n) + int64(nn), err
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/short_byte_string.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/short_byte_string.go
deleted file mode 100644
index 0c3b9123..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/short_byte_string.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package encoding
-
-import (
- "io"
-)
-
-type ShortByteString struct {
- length uint8
- data []byte
-}
-
-func NewShortByteString(data []byte) *ShortByteString {
- byteLength := uint8(len(data))
-
- return &ShortByteString{byteLength, data}
-}
-
-func (byteString *ShortByteString) Bytes() []byte {
- return byteString.data
-}
-
-func (byteString *ShortByteString) BitLength() uint16 {
- return uint16(byteString.length) * 8
-}
-
-func (byteString *ShortByteString) EncodedBytes() []byte {
- encodedLength := [1]byte{
- uint8(byteString.length),
- }
- return append(encodedLength[:], byteString.data...)
-}
-
-func (byteString *ShortByteString) EncodedLength() uint16 {
- return uint16(byteString.length) + 1
-}
-
-func (byteString *ShortByteString) ReadFrom(r io.Reader) (int64, error) {
- var lengthBytes [1]byte
- if n, err := io.ReadFull(r, lengthBytes[:]); err != nil {
- return int64(n), err
- }
-
- byteString.length = uint8(lengthBytes[0])
-
- byteString.data = make([]byte, byteString.length)
- if n, err := io.ReadFull(r, byteString.data); err != nil {
- return int64(n + 1), err
- }
- return int64(byteString.length + 1), nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go
deleted file mode 100644
index bcf23175..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go
+++ /dev/null
@@ -1,452 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package openpgp
-
-import (
- "crypto"
- "crypto/rand"
- "crypto/rsa"
- goerrors "errors"
- "io"
- "math/big"
- "time"
-
- "github.com/ProtonMail/go-crypto/openpgp/ecdh"
- "github.com/ProtonMail/go-crypto/openpgp/ecdsa"
- "github.com/ProtonMail/go-crypto/openpgp/ed25519"
- "github.com/ProtonMail/go-crypto/openpgp/ed448"
- "github.com/ProtonMail/go-crypto/openpgp/eddsa"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
- "github.com/ProtonMail/go-crypto/openpgp/internal/ecc"
- "github.com/ProtonMail/go-crypto/openpgp/packet"
- "github.com/ProtonMail/go-crypto/openpgp/symmetric"
- "github.com/ProtonMail/go-crypto/openpgp/x25519"
- "github.com/ProtonMail/go-crypto/openpgp/x448"
-)
-
-// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a
-// single identity composed of the given full name, comment and email, any of
-// which may be empty but must not contain any of "()<>\x00".
-// If config is nil, sensible defaults will be used.
-func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) {
- creationTime := config.Now()
- keyLifetimeSecs := config.KeyLifetime()
-
- // Generate a primary signing key
- primaryPrivRaw, err := newSigner(config)
- if err != nil {
- return nil, err
- }
- primary := packet.NewSignerPrivateKey(creationTime, primaryPrivRaw)
- if config.V6() {
- primary.UpgradeToV6()
- }
-
- e := &Entity{
- PrimaryKey: &primary.PublicKey,
- PrivateKey: primary,
- Identities: make(map[string]*Identity),
- Subkeys: []Subkey{},
- Signatures: []*packet.Signature{},
- }
-
- if config.V6() {
- // In v6 keys algorithm preferences should be stored in direct key signatures
- selfSignature := createSignaturePacket(&primary.PublicKey, packet.SigTypeDirectSignature, config)
- err = writeKeyProperties(selfSignature, creationTime, keyLifetimeSecs, config)
- if err != nil {
- return nil, err
- }
- err = selfSignature.SignDirectKeyBinding(&primary.PublicKey, primary, config)
- if err != nil {
- return nil, err
- }
- e.Signatures = append(e.Signatures, selfSignature)
- e.SelfSignature = selfSignature
- }
-
- err = e.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs, !config.V6())
- if err != nil {
- return nil, err
- }
-
- // NOTE: No key expiry here, but we will not return this subkey in EncryptionKey()
- // if the primary/master key has expired.
- err = e.addEncryptionSubkey(config, creationTime, 0)
- if err != nil {
- return nil, err
- }
-
- return e, nil
-}
-
-func (t *Entity) AddUserId(name, comment, email string, config *packet.Config) error {
- creationTime := config.Now()
- keyLifetimeSecs := config.KeyLifetime()
- return t.addUserId(name, comment, email, config, creationTime, keyLifetimeSecs, !config.V6())
-}
-
-func writeKeyProperties(selfSignature *packet.Signature, creationTime time.Time, keyLifetimeSecs uint32, config *packet.Config) error {
- selfSignature.CreationTime = creationTime
- selfSignature.KeyLifetimeSecs = &keyLifetimeSecs
- selfSignature.FlagsValid = true
- selfSignature.FlagSign = true
- selfSignature.FlagCertify = true
- selfSignature.SEIPDv1 = true // true by default, see 5.8 vs. 5.14
- selfSignature.SEIPDv2 = config.AEAD() != nil
-
- // Set the PreferredHash for the SelfSignature from the packet.Config.
- // If it is not the must-implement algorithm from rfc4880bis, append that.
- hash, ok := algorithm.HashToHashId(config.Hash())
- if !ok {
- return errors.UnsupportedError("unsupported preferred hash function")
- }
-
- selfSignature.PreferredHash = []uint8{hash}
- if config.Hash() != crypto.SHA256 {
- selfSignature.PreferredHash = append(selfSignature.PreferredHash, hashToHashId(crypto.SHA256))
- }
-
- // Likewise for DefaultCipher.
- selfSignature.PreferredSymmetric = []uint8{uint8(config.Cipher())}
- if config.Cipher() != packet.CipherAES128 {
- selfSignature.PreferredSymmetric = append(selfSignature.PreferredSymmetric, uint8(packet.CipherAES128))
- }
-
- // We set CompressionNone as the preferred compression algorithm because
- // of compression side channel attacks, then append the configured
- // DefaultCompressionAlgo if any is set (to signal support for cases
- // where the application knows that using compression is safe).
- selfSignature.PreferredCompression = []uint8{uint8(packet.CompressionNone)}
- if config.Compression() != packet.CompressionNone {
- selfSignature.PreferredCompression = append(selfSignature.PreferredCompression, uint8(config.Compression()))
- }
-
- // And for DefaultMode.
- modes := []uint8{uint8(config.AEAD().Mode())}
- if config.AEAD().Mode() != packet.AEADModeOCB {
- modes = append(modes, uint8(packet.AEADModeOCB))
- }
-
- // For preferred (AES256, GCM), we'll generate (AES256, GCM), (AES256, OCB), (AES128, GCM), (AES128, OCB)
- for _, cipher := range selfSignature.PreferredSymmetric {
- for _, mode := range modes {
- selfSignature.PreferredCipherSuites = append(selfSignature.PreferredCipherSuites, [2]uint8{cipher, mode})
- }
- }
- return nil
-}
-
-func (t *Entity) addUserId(name, comment, email string, config *packet.Config, creationTime time.Time, keyLifetimeSecs uint32, writeProperties bool) error {
- uid := packet.NewUserId(name, comment, email)
- if uid == nil {
- return errors.InvalidArgumentError("user id field contained invalid characters")
- }
-
- if _, ok := t.Identities[uid.Id]; ok {
- return errors.InvalidArgumentError("user id exist")
- }
-
- primary := t.PrivateKey
- isPrimaryId := len(t.Identities) == 0
- selfSignature := createSignaturePacket(&primary.PublicKey, packet.SigTypePositiveCert, config)
- if writeProperties {
- err := writeKeyProperties(selfSignature, creationTime, keyLifetimeSecs, config)
- if err != nil {
- return err
- }
- }
- selfSignature.IsPrimaryId = &isPrimaryId
-
- // User ID binding signature
- err := selfSignature.SignUserId(uid.Id, &primary.PublicKey, primary, config)
- if err != nil {
- return err
- }
- t.Identities[uid.Id] = &Identity{
- Name: uid.Id,
- UserId: uid,
- SelfSignature: selfSignature,
- Signatures: []*packet.Signature{selfSignature},
- }
- return nil
-}
-
-// AddSigningSubkey adds a signing keypair as a subkey to the Entity.
-// If config is nil, sensible defaults will be used.
-func (e *Entity) AddSigningSubkey(config *packet.Config) error {
- creationTime := config.Now()
- keyLifetimeSecs := config.KeyLifetime()
-
- subPrivRaw, err := newSigner(config)
- if err != nil {
- return err
- }
- sub := packet.NewSignerPrivateKey(creationTime, subPrivRaw)
- sub.IsSubkey = true
- if config.V6() {
- sub.UpgradeToV6()
- }
-
- subkey := Subkey{
- PublicKey: &sub.PublicKey,
- PrivateKey: sub,
- }
- subkey.Sig = createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyBinding, config)
- subkey.Sig.CreationTime = creationTime
- subkey.Sig.KeyLifetimeSecs = &keyLifetimeSecs
- subkey.Sig.FlagsValid = true
- subkey.Sig.FlagSign = true
- subkey.Sig.EmbeddedSignature = createSignaturePacket(subkey.PublicKey, packet.SigTypePrimaryKeyBinding, config)
- subkey.Sig.EmbeddedSignature.CreationTime = creationTime
-
- err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey, subkey.PrivateKey, config)
- if err != nil {
- return err
- }
-
- err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config)
- if err != nil {
- return err
- }
-
- e.Subkeys = append(e.Subkeys, subkey)
- return nil
-}
-
-// AddEncryptionSubkey adds an encryption keypair as a subkey to the Entity.
-// If config is nil, sensible defaults will be used.
-func (e *Entity) AddEncryptionSubkey(config *packet.Config) error {
- creationTime := config.Now()
- keyLifetimeSecs := config.KeyLifetime()
- return e.addEncryptionSubkey(config, creationTime, keyLifetimeSecs)
-}
-
-func (e *Entity) addEncryptionSubkey(config *packet.Config, creationTime time.Time, keyLifetimeSecs uint32) error {
- subPrivRaw, err := newDecrypter(config)
- if err != nil {
- return err
- }
- sub := packet.NewDecrypterPrivateKey(creationTime, subPrivRaw)
- sub.IsSubkey = true
- if config.V6() {
- sub.UpgradeToV6()
- }
-
- subkey := Subkey{
- PublicKey: &sub.PublicKey,
- PrivateKey: sub,
- }
- subkey.Sig = createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyBinding, config)
- subkey.Sig.CreationTime = creationTime
- subkey.Sig.KeyLifetimeSecs = &keyLifetimeSecs
- subkey.Sig.FlagsValid = true
- subkey.Sig.FlagEncryptStorage = true
- subkey.Sig.FlagEncryptCommunications = true
-
- err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config)
- if err != nil {
- return err
- }
-
- e.Subkeys = append(e.Subkeys, subkey)
- return nil
-}
-
-// Generates a signing key
-func newSigner(config *packet.Config) (signer interface{}, err error) {
- switch config.PublicKeyAlgorithm() {
- case packet.PubKeyAlgoRSA:
- bits := config.RSAModulusBits()
- if bits < 1024 {
- return nil, errors.InvalidArgumentError("bits must be >= 1024")
- }
- if config != nil && len(config.RSAPrimes) >= 2 {
- primes := config.RSAPrimes[0:2]
- config.RSAPrimes = config.RSAPrimes[2:]
- return generateRSAKeyWithPrimes(config.Random(), 2, bits, primes)
- }
- return rsa.GenerateKey(config.Random(), bits)
- case packet.PubKeyAlgoEdDSA:
- if config.V6() {
- // Implementations MUST NOT accept or generate v6 key material
- // using the deprecated OIDs.
- return nil, errors.InvalidArgumentError("EdDSALegacy cannot be used for v6 keys")
- }
- curve := ecc.FindEdDSAByGenName(string(config.CurveName()))
- if curve == nil {
- return nil, errors.InvalidArgumentError("unsupported curve")
- }
-
- priv, err := eddsa.GenerateKey(config.Random(), curve)
- if err != nil {
- return nil, err
- }
- return priv, nil
- case packet.PubKeyAlgoECDSA:
- curve := ecc.FindECDSAByGenName(string(config.CurveName()))
- if curve == nil {
- return nil, errors.InvalidArgumentError("unsupported curve")
- }
-
- priv, err := ecdsa.GenerateKey(config.Random(), curve)
- if err != nil {
- return nil, err
- }
- return priv, nil
- case packet.PubKeyAlgoEd25519:
- priv, err := ed25519.GenerateKey(config.Random())
- if err != nil {
- return nil, err
- }
- return priv, nil
- case packet.PubKeyAlgoEd448:
- priv, err := ed448.GenerateKey(config.Random())
- if err != nil {
- return nil, err
- }
- return priv, nil
- case packet.ExperimentalPubKeyAlgoHMAC:
- hash := algorithm.HashById[hashToHashId(config.Hash())]
- return symmetric.HMACGenerateKey(config.Random(), hash)
- default:
- return nil, errors.InvalidArgumentError("unsupported public key algorithm")
- }
-}
-
-// Generates an encryption/decryption key
-func newDecrypter(config *packet.Config) (decrypter interface{}, err error) {
- switch config.PublicKeyAlgorithm() {
- case packet.PubKeyAlgoRSA:
- bits := config.RSAModulusBits()
- if bits < 1024 {
- return nil, errors.InvalidArgumentError("bits must be >= 1024")
- }
- if config != nil && len(config.RSAPrimes) >= 2 {
- primes := config.RSAPrimes[0:2]
- config.RSAPrimes = config.RSAPrimes[2:]
- return generateRSAKeyWithPrimes(config.Random(), 2, bits, primes)
- }
- return rsa.GenerateKey(config.Random(), bits)
- case packet.PubKeyAlgoEdDSA, packet.PubKeyAlgoECDSA:
- fallthrough // When passing EdDSA or ECDSA, we generate an ECDH subkey
- case packet.PubKeyAlgoECDH:
- if config.V6() &&
- (config.CurveName() == packet.Curve25519 ||
- config.CurveName() == packet.Curve448) {
- // Implementations MUST NOT accept or generate v6 key material
- // using the deprecated OIDs.
- return nil, errors.InvalidArgumentError("ECDH with Curve25519/448 legacy cannot be used for v6 keys")
- }
- var kdf = ecdh.KDF{
- Hash: algorithm.SHA512,
- Cipher: algorithm.AES256,
- }
- curve := ecc.FindECDHByGenName(string(config.CurveName()))
- if curve == nil {
- return nil, errors.InvalidArgumentError("unsupported curve")
- }
- return ecdh.GenerateKey(config.Random(), curve, kdf)
- case packet.PubKeyAlgoEd25519, packet.PubKeyAlgoX25519: // When passing Ed25519, we generate an x25519 subkey
- return x25519.GenerateKey(config.Random())
- case packet.PubKeyAlgoEd448, packet.PubKeyAlgoX448: // When passing Ed448, we generate an x448 subkey
- return x448.GenerateKey(config.Random())
- case packet.ExperimentalPubKeyAlgoAEAD:
- cipher := algorithm.CipherFunction(config.Cipher())
- return symmetric.AEADGenerateKey(config.Random(), cipher)
- default:
- return nil, errors.InvalidArgumentError("unsupported public key algorithm")
- }
-}
-
-var bigOne = big.NewInt(1)
-
-// generateRSAKeyWithPrimes generates a multi-prime RSA keypair of the
-// given bit size, using the given random source and pre-populated primes.
-func generateRSAKeyWithPrimes(random io.Reader, nprimes int, bits int, prepopulatedPrimes []*big.Int) (*rsa.PrivateKey, error) {
- priv := new(rsa.PrivateKey)
- priv.E = 65537
-
- if nprimes < 2 {
- return nil, goerrors.New("generateRSAKeyWithPrimes: nprimes must be >= 2")
- }
-
- if bits < 1024 {
- return nil, goerrors.New("generateRSAKeyWithPrimes: bits must be >= 1024")
- }
-
- primes := make([]*big.Int, nprimes)
-
-NextSetOfPrimes:
- for {
- todo := bits
- // crypto/rand should set the top two bits in each prime.
- // Thus each prime has the form
- // p_i = 2^bitlen(p_i) × 0.11... (in base 2).
- // And the product is:
- // P = 2^todo × α
- // where α is the product of nprimes numbers of the form 0.11...
- //
- // If α < 1/2 (which can happen for nprimes > 2), we need to
- // shift todo to compensate for lost bits: the mean value of 0.11...
- // is 7/8, so todo + shift - nprimes * log2(7/8) ~= bits - 1/2
- // will give good results.
- if nprimes >= 7 {
- todo += (nprimes - 2) / 5
- }
- for i := 0; i < nprimes; i++ {
- var err error
- if len(prepopulatedPrimes) == 0 {
- primes[i], err = rand.Prime(random, todo/(nprimes-i))
- if err != nil {
- return nil, err
- }
- } else {
- primes[i] = prepopulatedPrimes[0]
- prepopulatedPrimes = prepopulatedPrimes[1:]
- }
-
- todo -= primes[i].BitLen()
- }
-
- // Make sure that primes is pairwise unequal.
- for i, prime := range primes {
- for j := 0; j < i; j++ {
- if prime.Cmp(primes[j]) == 0 {
- continue NextSetOfPrimes
- }
- }
- }
-
- n := new(big.Int).Set(bigOne)
- totient := new(big.Int).Set(bigOne)
- pminus1 := new(big.Int)
- for _, prime := range primes {
- n.Mul(n, prime)
- pminus1.Sub(prime, bigOne)
- totient.Mul(totient, pminus1)
- }
- if n.BitLen() != bits {
- // This should never happen for nprimes == 2 because
- // crypto/rand should set the top two bits in each prime.
- // For nprimes > 2 we hope it does not happen often.
- continue NextSetOfPrimes
- }
-
- priv.D = new(big.Int)
- e := big.NewInt(int64(priv.E))
- ok := priv.D.ModInverse(e, totient)
-
- if ok != nil {
- priv.Primes = primes
- priv.N = n
- break
- }
- }
-
- priv.Precompute()
- return priv, nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go
deleted file mode 100644
index bbcc95d9..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go
+++ /dev/null
@@ -1,915 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package openpgp
-
-import (
- goerrors "errors"
- "fmt"
- "io"
- "time"
-
- "github.com/ProtonMail/go-crypto/openpgp/armor"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/packet"
-)
-
-// PublicKeyType is the armor type for a PGP public key.
-var PublicKeyType = "PGP PUBLIC KEY BLOCK"
-
-// PrivateKeyType is the armor type for a PGP private key.
-var PrivateKeyType = "PGP PRIVATE KEY BLOCK"
-
-// An Entity represents the components of an OpenPGP key: a primary public key
-// (which must be a signing key), one or more identities claimed by that key,
-// and zero or more subkeys, which may be encryption keys.
-type Entity struct {
- PrimaryKey *packet.PublicKey
- PrivateKey *packet.PrivateKey
- Identities map[string]*Identity // indexed by Identity.Name
- Revocations []*packet.Signature
- Subkeys []Subkey
- SelfSignature *packet.Signature // Direct-key self signature of the PrimaryKey (contains primary key properties in v6)
- Signatures []*packet.Signature // all (potentially unverified) self-signatures, revocations, and third-party signatures
-}
-
-// An Identity represents an identity claimed by an Entity and zero or more
-// assertions by other entities about that claim.
-type Identity struct {
- Name string // by convention, has the form "Full Name (comment) "
- UserId *packet.UserId
- SelfSignature *packet.Signature
- Revocations []*packet.Signature
- Signatures []*packet.Signature // all (potentially unverified) self-signatures, revocations, and third-party signatures
-}
-
-// A Subkey is an additional public key in an Entity. Subkeys can be used for
-// encryption.
-type Subkey struct {
- PublicKey *packet.PublicKey
- PrivateKey *packet.PrivateKey
- Sig *packet.Signature
- Revocations []*packet.Signature
-}
-
-// A Key identifies a specific public key in an Entity. This is either the
-// Entity's primary key or a subkey.
-type Key struct {
- Entity *Entity
- PublicKey *packet.PublicKey
- PrivateKey *packet.PrivateKey
- SelfSignature *packet.Signature
- Revocations []*packet.Signature
-}
-
-// A KeyRing provides access to public and private keys.
-type KeyRing interface {
- // KeysById returns the set of keys that have the given key id.
- KeysById(id uint64) []Key
- // KeysByIdAndUsage returns the set of keys with the given id
- // that also meet the key usage given by requiredUsage.
- // The requiredUsage is expressed as the bitwise-OR of
- // packet.KeyFlag* values.
- KeysByIdUsage(id uint64, requiredUsage byte) []Key
- // DecryptionKeys returns all private keys that are valid for
- // decryption.
- DecryptionKeys() []Key
-}
-
-// PrimaryIdentity returns an Identity, preferring non-revoked identities,
-// identities marked as primary, or the latest-created identity, in that order.
-func (e *Entity) PrimaryIdentity() *Identity {
- var primaryIdentity *Identity
- for _, ident := range e.Identities {
- if shouldPreferIdentity(primaryIdentity, ident) {
- primaryIdentity = ident
- }
- }
- return primaryIdentity
-}
-
-func shouldPreferIdentity(existingId, potentialNewId *Identity) bool {
- if existingId == nil {
- return true
- }
-
- if len(existingId.Revocations) > len(potentialNewId.Revocations) {
- return true
- }
-
- if len(existingId.Revocations) < len(potentialNewId.Revocations) {
- return false
- }
-
- if existingId.SelfSignature == nil {
- return true
- }
-
- if existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId &&
- !(potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId) {
- return false
- }
-
- if !(existingId.SelfSignature.IsPrimaryId != nil && *existingId.SelfSignature.IsPrimaryId) &&
- potentialNewId.SelfSignature.IsPrimaryId != nil && *potentialNewId.SelfSignature.IsPrimaryId {
- return true
- }
-
- return potentialNewId.SelfSignature.CreationTime.After(existingId.SelfSignature.CreationTime)
-}
-
-// EncryptionKey returns the best candidate Key for encrypting a message to the
-// given Entity.
-func (e *Entity) EncryptionKey(now time.Time) (Key, bool) {
- // Fail to find any encryption key if the...
- primarySelfSignature, primaryIdentity := e.PrimarySelfSignature()
- if primarySelfSignature == nil || // no self-signature found
- e.PrimaryKey.KeyExpired(primarySelfSignature, now) || // primary key has expired
- e.Revoked(now) || // primary key has been revoked
- primarySelfSignature.SigExpired(now) || // user ID or or direct self-signature has expired
- (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // user ID has been revoked (for v4 keys)
- return Key{}, false
- }
-
- // Iterate the keys to find the newest, unexpired one
- candidateSubkey := -1
- var maxTime time.Time
- for i, subkey := range e.Subkeys {
- if subkey.Sig.FlagsValid &&
- subkey.Sig.FlagEncryptCommunications &&
- subkey.PublicKey.PubKeyAlgo.CanEncrypt() &&
- !subkey.PublicKey.KeyExpired(subkey.Sig, now) &&
- !subkey.Sig.SigExpired(now) &&
- !subkey.Revoked(now) &&
- (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) {
- candidateSubkey = i
- maxTime = subkey.Sig.CreationTime
- }
- }
-
- if candidateSubkey != -1 {
- subkey := e.Subkeys[candidateSubkey]
- return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true
- }
-
- // If we don't have any subkeys for encryption and the primary key
- // is marked as OK to encrypt with, then we can use it.
- if primarySelfSignature.FlagsValid && primarySelfSignature.FlagEncryptCommunications &&
- e.PrimaryKey.PubKeyAlgo.CanEncrypt() {
- return Key{e, e.PrimaryKey, e.PrivateKey, primarySelfSignature, e.Revocations}, true
- }
-
- return Key{}, false
-}
-
-// CertificationKey return the best candidate Key for certifying a key with this
-// Entity.
-func (e *Entity) CertificationKey(now time.Time) (Key, bool) {
- return e.CertificationKeyById(now, 0)
-}
-
-// CertificationKeyById return the Key for key certification with this
-// Entity and keyID.
-func (e *Entity) CertificationKeyById(now time.Time, id uint64) (Key, bool) {
- return e.signingKeyByIdUsage(now, id, packet.KeyFlagCertify)
-}
-
-// SigningKey return the best candidate Key for signing a message with this
-// Entity.
-func (e *Entity) SigningKey(now time.Time) (Key, bool) {
- return e.SigningKeyById(now, 0)
-}
-
-// SigningKeyById return the Key for signing a message with this
-// Entity and keyID.
-func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) {
- return e.signingKeyByIdUsage(now, id, packet.KeyFlagSign)
-}
-
-func (e *Entity) signingKeyByIdUsage(now time.Time, id uint64, flags int) (Key, bool) {
- // Fail to find any signing key if the...
- primarySelfSignature, primaryIdentity := e.PrimarySelfSignature()
- if primarySelfSignature == nil || // no self-signature found
- e.PrimaryKey.KeyExpired(primarySelfSignature, now) || // primary key has expired
- e.Revoked(now) || // primary key has been revoked
- primarySelfSignature.SigExpired(now) || // user ID or direct self-signature has expired
- (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // user ID has been revoked (for v4 keys)
- return Key{}, false
- }
-
- // Iterate the keys to find the newest, unexpired one
- candidateSubkey := -1
- var maxTime time.Time
- for idx, subkey := range e.Subkeys {
- if subkey.Sig.FlagsValid &&
- (flags&packet.KeyFlagCertify == 0 || subkey.Sig.FlagCertify) &&
- (flags&packet.KeyFlagSign == 0 || subkey.Sig.FlagSign) &&
- subkey.PublicKey.PubKeyAlgo.CanSign() &&
- !subkey.PublicKey.KeyExpired(subkey.Sig, now) &&
- !subkey.Sig.SigExpired(now) &&
- !subkey.Revoked(now) &&
- (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) &&
- (id == 0 || subkey.PublicKey.KeyId == id) {
- candidateSubkey = idx
- maxTime = subkey.Sig.CreationTime
- }
- }
-
- if candidateSubkey != -1 {
- subkey := e.Subkeys[candidateSubkey]
- return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Revocations}, true
- }
-
- // If we don't have any subkeys for signing and the primary key
- // is marked as OK to sign with, then we can use it.
- if primarySelfSignature.FlagsValid &&
- (flags&packet.KeyFlagCertify == 0 || primarySelfSignature.FlagCertify) &&
- (flags&packet.KeyFlagSign == 0 || primarySelfSignature.FlagSign) &&
- e.PrimaryKey.PubKeyAlgo.CanSign() &&
- (id == 0 || e.PrimaryKey.KeyId == id) {
- return Key{e, e.PrimaryKey, e.PrivateKey, primarySelfSignature, e.Revocations}, true
- }
-
- // No keys with a valid Signing Flag or no keys matched the id passed in
- return Key{}, false
-}
-
-func revoked(revocations []*packet.Signature, now time.Time) bool {
- for _, revocation := range revocations {
- if revocation.RevocationReason != nil && *revocation.RevocationReason == packet.KeyCompromised {
- // If the key is compromised, the key is considered revoked even before the revocation date.
- return true
- }
- if !revocation.SigExpired(now) {
- return true
- }
- }
- return false
-}
-
-// Revoked returns whether the entity has any direct key revocation signatures.
-// Note that third-party revocation signatures are not supported.
-// Note also that Identity and Subkey revocation should be checked separately.
-func (e *Entity) Revoked(now time.Time) bool {
- return revoked(e.Revocations, now)
-}
-
-// EncryptPrivateKeys encrypts all non-encrypted keys in the entity with the same key
-// derived from the provided passphrase. Public keys and dummy keys are ignored,
-// and don't cause an error to be returned.
-func (e *Entity) EncryptPrivateKeys(passphrase []byte, config *packet.Config) error {
- var keysToEncrypt []*packet.PrivateKey
- // Add entity private key to encrypt.
- if e.PrivateKey != nil && !e.PrivateKey.Dummy() && !e.PrivateKey.Encrypted {
- keysToEncrypt = append(keysToEncrypt, e.PrivateKey)
- }
-
- // Add subkeys to encrypt.
- for _, sub := range e.Subkeys {
- if sub.PrivateKey != nil && !sub.PrivateKey.Dummy() && !sub.PrivateKey.Encrypted {
- keysToEncrypt = append(keysToEncrypt, sub.PrivateKey)
- }
- }
- return packet.EncryptPrivateKeys(keysToEncrypt, passphrase, config)
-}
-
-// DecryptPrivateKeys decrypts all encrypted keys in the entity with the given passphrase.
-// Avoids recomputation of similar s2k key derivations. Public keys and dummy keys are ignored,
-// and don't cause an error to be returned.
-func (e *Entity) DecryptPrivateKeys(passphrase []byte) error {
- var keysToDecrypt []*packet.PrivateKey
- // Add entity private key to decrypt.
- if e.PrivateKey != nil && !e.PrivateKey.Dummy() && e.PrivateKey.Encrypted {
- keysToDecrypt = append(keysToDecrypt, e.PrivateKey)
- }
-
- // Add subkeys to decrypt.
- for _, sub := range e.Subkeys {
- if sub.PrivateKey != nil && !sub.PrivateKey.Dummy() && sub.PrivateKey.Encrypted {
- keysToDecrypt = append(keysToDecrypt, sub.PrivateKey)
- }
- }
- return packet.DecryptPrivateKeys(keysToDecrypt, passphrase)
-}
-
-// Revoked returns whether the identity has been revoked by a self-signature.
-// Note that third-party revocation signatures are not supported.
-func (i *Identity) Revoked(now time.Time) bool {
- return revoked(i.Revocations, now)
-}
-
-// Revoked returns whether the subkey has been revoked by a self-signature.
-// Note that third-party revocation signatures are not supported.
-func (s *Subkey) Revoked(now time.Time) bool {
- return revoked(s.Revocations, now)
-}
-
-// Revoked returns whether the key or subkey has been revoked by a self-signature.
-// Note that third-party revocation signatures are not supported.
-// Note also that Identity revocation should be checked separately.
-// Normally, it's not necessary to call this function, except on keys returned by
-// KeysById or KeysByIdUsage.
-func (key *Key) Revoked(now time.Time) bool {
- return revoked(key.Revocations, now)
-}
-
-// An EntityList contains one or more Entities.
-type EntityList []*Entity
-
-// KeysById returns the set of keys that have the given key id.
-func (el EntityList) KeysById(id uint64) (keys []Key) {
- for _, e := range el {
- if e.PrimaryKey.KeyId == id {
- selfSig, _ := e.PrimarySelfSignature()
- keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig, e.Revocations})
- }
-
- for _, subKey := range e.Subkeys {
- if subKey.PublicKey.KeyId == id {
- keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations})
- }
- }
- }
- return
-}
-
-// KeysByIdAndUsage returns the set of keys with the given id that also meet
-// the key usage given by requiredUsage. The requiredUsage is expressed as
-// the bitwise-OR of packet.KeyFlag* values.
-func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) {
- for _, key := range el.KeysById(id) {
- if requiredUsage != 0 {
- if key.SelfSignature == nil || !key.SelfSignature.FlagsValid {
- continue
- }
-
- var usage byte
- if key.SelfSignature.FlagCertify {
- usage |= packet.KeyFlagCertify
- }
- if key.SelfSignature.FlagSign {
- usage |= packet.KeyFlagSign
- }
- if key.SelfSignature.FlagEncryptCommunications {
- usage |= packet.KeyFlagEncryptCommunications
- }
- if key.SelfSignature.FlagEncryptStorage {
- usage |= packet.KeyFlagEncryptStorage
- }
- if usage&requiredUsage != requiredUsage {
- continue
- }
- }
-
- keys = append(keys, key)
- }
- return
-}
-
-// DecryptionKeys returns all private keys that are valid for decryption.
-func (el EntityList) DecryptionKeys() (keys []Key) {
- for _, e := range el {
- for _, subKey := range e.Subkeys {
- if subKey.PrivateKey != nil && subKey.Sig.FlagsValid && (subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications || subKey.Sig.FlagForward) {
- keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Revocations})
- }
- }
- }
- return
-}
-
-// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file.
-func ReadArmoredKeyRing(r io.Reader) (EntityList, error) {
- block, err := armor.Decode(r)
- if err == io.EOF {
- return nil, errors.InvalidArgumentError("no armored data found")
- }
- if err != nil {
- return nil, err
- }
- if block.Type != PublicKeyType && block.Type != PrivateKeyType {
- return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type)
- }
-
- return ReadKeyRing(block.Body)
-}
-
-// ReadKeyRing reads one or more public/private keys. Unsupported keys are
-// ignored as long as at least a single valid key is found.
-func ReadKeyRing(r io.Reader) (el EntityList, err error) {
- packets := packet.NewReader(r)
- var lastUnsupportedError error
-
- for {
- var e *Entity
- e, err = ReadEntity(packets)
- if err != nil {
- // TODO: warn about skipped unsupported/unreadable keys
- if _, ok := err.(errors.UnsupportedError); ok {
- lastUnsupportedError = err
- err = readToNextPublicKey(packets)
- } else if _, ok := err.(errors.StructuralError); ok {
- // Skip unreadable, badly-formatted keys
- lastUnsupportedError = err
- err = readToNextPublicKey(packets)
- }
- if err == io.EOF {
- err = nil
- break
- }
- if err != nil {
- el = nil
- break
- }
- } else {
- el = append(el, e)
- }
- }
-
- if len(el) == 0 && err == nil {
- err = lastUnsupportedError
- }
- return
-}
-
-// readToNextPublicKey reads packets until the start of the entity and leaves
-// the first packet of the new entity in the Reader.
-func readToNextPublicKey(packets *packet.Reader) (err error) {
- var p packet.Packet
- for {
- p, err = packets.Next()
- if err == io.EOF {
- return
- } else if err != nil {
- if _, ok := err.(errors.UnsupportedError); ok {
- continue
- }
- return
- }
-
- if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey {
- packets.Unread(p)
- return
- }
- }
-}
-
-// ReadEntity reads an entity (public key, identities, subkeys etc) from the
-// given Reader.
-func ReadEntity(packets *packet.Reader) (*Entity, error) {
- e := new(Entity)
- e.Identities = make(map[string]*Identity)
-
- p, err := packets.Next()
- if err != nil {
- return nil, err
- }
-
- var ok bool
- if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok {
- if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok {
- packets.Unread(p)
- return nil, errors.StructuralError("first packet was not a public/private key")
- }
- e.PrimaryKey = &e.PrivateKey.PublicKey
- }
-
- if !e.PrimaryKey.PubKeyAlgo.CanSign() {
- return nil, errors.StructuralError("primary key cannot be used for signatures")
- }
-
- var revocations []*packet.Signature
- var directSignatures []*packet.Signature
-EachPacket:
- for {
- p, err := packets.Next()
- if err == io.EOF {
- break
- } else if err != nil {
- return nil, err
- }
-
- switch pkt := p.(type) {
- case *packet.UserId:
- if err := addUserID(e, packets, pkt); err != nil {
- return nil, err
- }
- case *packet.Signature:
- if pkt.SigType == packet.SigTypeKeyRevocation {
- revocations = append(revocations, pkt)
- } else if pkt.SigType == packet.SigTypeDirectSignature {
- directSignatures = append(directSignatures, pkt)
- }
- // Else, ignoring the signature as it does not follow anything
- // we would know to attach it to.
- case *packet.PrivateKey:
- if !pkt.IsSubkey {
- packets.Unread(p)
- break EachPacket
- }
- err = addSubkey(e, packets, &pkt.PublicKey, pkt)
- if err != nil {
- return nil, err
- }
- case *packet.PublicKey:
- if !pkt.IsSubkey {
- packets.Unread(p)
- break EachPacket
- }
- err = addSubkey(e, packets, pkt, nil)
- if err != nil {
- return nil, err
- }
- default:
- // we ignore unknown packets.
- }
- }
-
- if len(e.Identities) == 0 && e.PrimaryKey.Version < 6 {
- return nil, errors.StructuralError(fmt.Sprintf("v%d entity without any identities", e.PrimaryKey.Version))
- }
-
- // An implementation MUST ensure that a valid direct-key signature is present before using a v6 key.
- if e.PrimaryKey.Version == 6 {
- if len(directSignatures) == 0 {
- return nil, errors.StructuralError("v6 entity without a valid direct-key signature")
- }
- // Select main direct key signature.
- var mainDirectKeySelfSignature *packet.Signature
- for _, directSignature := range directSignatures {
- if directSignature.SigType == packet.SigTypeDirectSignature &&
- directSignature.CheckKeyIdOrFingerprint(e.PrimaryKey) &&
- (mainDirectKeySelfSignature == nil ||
- directSignature.CreationTime.After(mainDirectKeySelfSignature.CreationTime)) {
- mainDirectKeySelfSignature = directSignature
- }
- }
- if mainDirectKeySelfSignature == nil {
- return nil, errors.StructuralError("no valid direct-key self-signature for v6 primary key found")
- }
- // Check that the main self-signature is valid.
- err = e.PrimaryKey.VerifyDirectKeySignature(mainDirectKeySelfSignature)
- if err != nil {
- return nil, errors.StructuralError("invalid direct-key self-signature for v6 primary key")
- }
- e.SelfSignature = mainDirectKeySelfSignature
- e.Signatures = directSignatures
- }
-
- for _, revocation := range revocations {
- err = e.PrimaryKey.VerifyRevocationSignature(revocation)
- if err == nil {
- e.Revocations = append(e.Revocations, revocation)
- } else {
- // TODO: RFC 4880 5.2.3.15 defines revocation keys.
- return nil, errors.StructuralError("revocation signature signed by alternate key")
- }
- }
-
- return e, nil
-}
-
-func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error {
- // Make a new Identity object, that we might wind up throwing away.
- // We'll only add it if we get a valid self-signature over this
- // userID.
- identity := new(Identity)
- identity.Name = pkt.Id
- identity.UserId = pkt
-
- for {
- p, err := packets.Next()
- if err == io.EOF {
- break
- } else if err != nil {
- return err
- }
-
- sig, ok := p.(*packet.Signature)
- if !ok {
- packets.Unread(p)
- break
- }
-
- if sig.SigType != packet.SigTypeGenericCert &&
- sig.SigType != packet.SigTypePersonaCert &&
- sig.SigType != packet.SigTypeCasualCert &&
- sig.SigType != packet.SigTypePositiveCert &&
- sig.SigType != packet.SigTypeCertificationRevocation {
- return errors.StructuralError("user ID signature with wrong type")
- }
-
- if sig.CheckKeyIdOrFingerprint(e.PrimaryKey) {
- if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil {
- return errors.StructuralError("user ID self-signature invalid: " + err.Error())
- }
- if sig.SigType == packet.SigTypeCertificationRevocation {
- identity.Revocations = append(identity.Revocations, sig)
- } else if identity.SelfSignature == nil || sig.CreationTime.After(identity.SelfSignature.CreationTime) {
- identity.SelfSignature = sig
- }
- identity.Signatures = append(identity.Signatures, sig)
- e.Identities[pkt.Id] = identity
- } else {
- identity.Signatures = append(identity.Signatures, sig)
- }
- }
-
- return nil
-}
-
-func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error {
- var subKey Subkey
- subKey.PublicKey = pub
- subKey.PrivateKey = priv
-
- for {
- p, err := packets.Next()
- if err == io.EOF {
- break
- } else if err != nil {
- return errors.StructuralError("subkey signature invalid: " + err.Error())
- }
-
- sig, ok := p.(*packet.Signature)
- if !ok {
- packets.Unread(p)
- break
- }
-
- if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation {
- return errors.StructuralError("subkey signature with wrong type")
- }
-
- if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil {
- return errors.StructuralError("subkey signature invalid: " + err.Error())
- }
-
- switch sig.SigType {
- case packet.SigTypeSubkeyRevocation:
- subKey.Revocations = append(subKey.Revocations, sig)
- case packet.SigTypeSubkeyBinding:
- if subKey.Sig == nil || sig.CreationTime.After(subKey.Sig.CreationTime) {
- subKey.Sig = sig
- }
- }
- }
-
- if subKey.Sig == nil {
- return errors.StructuralError("subkey packet not followed by signature")
- }
-
- e.Subkeys = append(e.Subkeys, subKey)
-
- return nil
-}
-
-// SerializePrivate serializes an Entity, including private key material, but
-// excluding signatures from other entities, to the given Writer.
-// Identities and subkeys are re-signed in case they changed since NewEntry.
-// If config is nil, sensible defaults will be used.
-func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) {
- if e.PrivateKey.Dummy() {
- return errors.ErrDummyPrivateKey("dummy private key cannot re-sign identities")
- }
- return e.serializePrivate(w, config, true)
-}
-
-// SerializePrivateWithoutSigning serializes an Entity, including private key
-// material, but excluding signatures from other entities, to the given Writer.
-// Self-signatures of identities and subkeys are not re-signed. This is useful
-// when serializing GNU dummy keys, among other things.
-// If config is nil, sensible defaults will be used.
-func (e *Entity) SerializePrivateWithoutSigning(w io.Writer, config *packet.Config) (err error) {
- return e.serializePrivate(w, config, false)
-}
-
-func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign bool) (err error) {
- if e.PrivateKey == nil {
- return goerrors.New("openpgp: private key is missing")
- }
- err = e.PrivateKey.Serialize(w)
- if err != nil {
- return
- }
- for _, revocation := range e.Revocations {
- err := revocation.Serialize(w)
- if err != nil {
- return err
- }
- }
- for _, directSignature := range e.Signatures {
- err := directSignature.Serialize(w)
- if err != nil {
- return err
- }
- }
- for _, ident := range e.Identities {
- err = ident.UserId.Serialize(w)
- if err != nil {
- return
- }
- if reSign {
- if ident.SelfSignature == nil {
- return goerrors.New("openpgp: can't re-sign identity without valid self-signature")
- }
- err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config)
- if err != nil {
- return
- }
- }
- for _, sig := range ident.Signatures {
- err = sig.Serialize(w)
- if err != nil {
- return err
- }
- }
- }
- for _, subkey := range e.Subkeys {
- err = subkey.PrivateKey.Serialize(w)
- if err != nil {
- return
- }
- if reSign {
- err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config)
- if err != nil {
- return
- }
- if subkey.Sig.EmbeddedSignature != nil {
- err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey,
- subkey.PrivateKey, config)
- if err != nil {
- return
- }
- }
- }
- for _, revocation := range subkey.Revocations {
- err := revocation.Serialize(w)
- if err != nil {
- return err
- }
- }
- err = subkey.Sig.Serialize(w)
- if err != nil {
- return
- }
- }
- return nil
-}
-
-// Serialize writes the public part of the given Entity to w, including
-// signatures from other entities. No private key material will be output.
-func (e *Entity) Serialize(w io.Writer) error {
- if e.PrimaryKey.PubKeyAlgo == packet.ExperimentalPubKeyAlgoHMAC ||
- e.PrimaryKey.PubKeyAlgo == packet.ExperimentalPubKeyAlgoAEAD {
- return errors.InvalidArgumentError("Can't serialize symmetric primary key")
- }
- err := e.PrimaryKey.Serialize(w)
- if err != nil {
- return err
- }
- for _, revocation := range e.Revocations {
- err := revocation.Serialize(w)
- if err != nil {
- return err
- }
- }
- for _, directSignature := range e.Signatures {
- err := directSignature.Serialize(w)
- if err != nil {
- return err
- }
- }
- for _, ident := range e.Identities {
- err = ident.UserId.Serialize(w)
- if err != nil {
- return err
- }
- for _, sig := range ident.Signatures {
- err = sig.Serialize(w)
- if err != nil {
- return err
- }
- }
- }
- for _, subkey := range e.Subkeys {
- // The types of keys below are only useful as private keys. Thus, the
- // public key packets contain no meaningful information and do not need
- // to be serialized.
- // Prevent public key export for forwarding keys, see forwarding section 4.1.
- if subkey.PublicKey.PubKeyAlgo == packet.ExperimentalPubKeyAlgoHMAC ||
- subkey.PublicKey.PubKeyAlgo == packet.ExperimentalPubKeyAlgoAEAD ||
- subkey.Sig.FlagForward {
- continue
- }
-
- err = subkey.PublicKey.Serialize(w)
- if err != nil {
- return err
- }
- for _, revocation := range subkey.Revocations {
- err := revocation.Serialize(w)
- if err != nil {
- return err
- }
- }
- err = subkey.Sig.Serialize(w)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// SignIdentity adds a signature to e, from signer, attesting that identity is
-// associated with e. The provided identity must already be an element of
-// e.Identities and the private key of signer must have been decrypted if
-// necessary.
-// If config is nil, sensible defaults will be used.
-func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error {
- certificationKey, ok := signer.CertificationKey(config.Now())
- if !ok {
- return errors.InvalidArgumentError("no valid certification key found")
- }
-
- if certificationKey.PrivateKey.Encrypted {
- return errors.InvalidArgumentError("signing Entity's private key must be decrypted")
- }
-
- ident, ok := e.Identities[identity]
- if !ok {
- return errors.InvalidArgumentError("given identity string not found in Entity")
- }
-
- sig := createSignaturePacket(certificationKey.PublicKey, packet.SigTypeGenericCert, config)
-
- signingUserID := config.SigningUserId()
- if signingUserID != "" {
- if _, ok := signer.Identities[signingUserID]; !ok {
- return errors.InvalidArgumentError("signer identity string not found in signer Entity")
- }
- sig.SignerUserId = &signingUserID
- }
-
- if err := sig.SignUserId(identity, e.PrimaryKey, certificationKey.PrivateKey, config); err != nil {
- return err
- }
- ident.Signatures = append(ident.Signatures, sig)
- return nil
-}
-
-// RevokeKey generates a key revocation signature (packet.SigTypeKeyRevocation) with the
-// specified reason code and text (RFC4880 section-5.2.3.23).
-// If config is nil, sensible defaults will be used.
-func (e *Entity) RevokeKey(reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error {
- revSig := createSignaturePacket(e.PrimaryKey, packet.SigTypeKeyRevocation, config)
- revSig.RevocationReason = &reason
- revSig.RevocationReasonText = reasonText
-
- if err := revSig.RevokeKey(e.PrimaryKey, e.PrivateKey, config); err != nil {
- return err
- }
- e.Revocations = append(e.Revocations, revSig)
- return nil
-}
-
-// RevokeSubkey generates a subkey revocation signature (packet.SigTypeSubkeyRevocation) for
-// a subkey with the specified reason code and text (RFC4880 section-5.2.3.23).
-// If config is nil, sensible defaults will be used.
-func (e *Entity) RevokeSubkey(sk *Subkey, reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error {
- if err := e.PrimaryKey.VerifyKeySignature(sk.PublicKey, sk.Sig); err != nil {
- return errors.InvalidArgumentError("given subkey is not associated with this key")
- }
-
- revSig := createSignaturePacket(e.PrimaryKey, packet.SigTypeSubkeyRevocation, config)
- revSig.RevocationReason = &reason
- revSig.RevocationReasonText = reasonText
-
- if err := revSig.RevokeSubkey(sk.PublicKey, e.PrivateKey, config); err != nil {
- return err
- }
-
- sk.Revocations = append(sk.Revocations, revSig)
- return nil
-}
-
-func (e *Entity) primaryDirectSignature() *packet.Signature {
- return e.SelfSignature
-}
-
-// PrimarySelfSignature searches the entity for the self-signature that stores key preferences.
-// For V4 keys, returns the self-signature of the primary identity, and the identity.
-// For V6 keys, returns the latest valid direct-key self-signature, and no identity (nil).
-// This self-signature is to be used to check the key expiration,
-// algorithm preferences, and so on.
-func (e *Entity) PrimarySelfSignature() (*packet.Signature, *Identity) {
- if e.PrimaryKey.Version == 6 {
- return e.primaryDirectSignature(), nil
- }
- primaryIdentity := e.PrimaryIdentity()
- if primaryIdentity == nil {
- return nil, nil
- }
- return primaryIdentity.SelfSignature, primaryIdentity
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go
deleted file mode 100644
index 108fd096..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go
+++ /dev/null
@@ -1,538 +0,0 @@
-package openpgp
-
-const expiringKeyHex = "c6c04d0451d0c680010800abbb021fd03ffc4e96618901180c3fdcb060ee69eeead97b91256d11420d80b5f1b51930248044130bd300605cf8a05b7a40d3d8cfb0a910be2e3db50dcd50a9c54064c2a5550801daa834ff4480b33d3d3ca495ff8a4e84a886977d17d998f881241a874083d8b995beab555b6d22b8a4817ab17ac3e7304f7d4d2c05c495fb2218348d3bc13651db1d92732e368a9dd7dcefa6eddff30b94706a9aaee47e9d39321460b740c59c6fc3c2fd8ab6c0fb868cb87c0051f0321301fe0f0e1820b15e7fb7063395769b525005c7e30a7ce85984f5cac00504e7b4fdc45d74958de8388436fd5c7ba9ea121f1c851b5911dd1b47a14d81a09e92ef37721e2325b6790011010001cd00c2c07b041001080025050251d0c680050900278d00060b09070803020415080a0203160201021901021b03021e01000a0910e7b484133a890a35ae4b0800a1beb82e7f28eaf5273d6af9d3391314f6280b2b624eaca2851f89a9ebcaf80ac589ebd509f168bc4322106ca2e2ce77a76e071a3c7444787d65216b5f05e82c77928860b92aace3b7d0327db59492f422eb9dfab7249266d37429870b091a98aba8724c2259ebf8f85093f21255eafa75aa841e31d94f2ac891b9755fed455e539044ee69fc47950b80e003fc9f298d695660f28329eaa38037c367efde1727458e514faf990d439a21461b719edaddf9296d3d0647b43ca56cb8dbf63b4fcf8b9968e7928c463470fab3b98e44d0d95645062f94b2d04fe56bd52822b71934db8ce845622c40b92fcbe765a142e7f38b61a6aa9606c8e8858dcd3b6eb1894acec04d0451d1f06b01080088bea67444e1789390e7c0335c86775502d58ec783d99c8ef4e06de235ed3dd4b0467f6f358d818c7d8989d43ec6d69fcbc8c32632d5a1b605e3fa8e41d695fcdcaa535936cd0157f9040dce362519803b908eafe838bb13216c885c6f93e9e8d5745607f0d062322085d6bdc760969149a8ff8dd9f5c18d9bfe2e6f63a06e17694cf1f67587c6fb70e9aebf90ffc528ca3b615ac7c9d4a21ea4f7c06f2e98fbbd90a859b8608bf9ea638e3a54289ce44c283110d0c45fa458de6251cd6e7baf71f80f12c8978340490fd90c92b81736ae902ed958e478dceae2835953d189c45d182aff02ea2be61b81d8e94430f041d638647b43e2fcb45fd512fbf5068b810011010001c2c06504180108000f050251d1f06b050900081095021b0c000a0910e7b484133a890a35e63407fe2ec88d6d1e6c9ce7553ece0cb2524747217bad29f251d33df84599ffcc900141a355abd62126800744068a5e05dc167056aa9205273dc7765a2ed49db15c2a83b8d6e6429c902136f1e12229086c1c10c0053242c2a4ae1930db58163387a48cad64607ff2153c320e42843dec28e3fce90e7399d63ac0affa2fee1f0adc0953c89eb3f46ef1d6c04328ed13b491669d5120a3782e3ffb7c69575fb77eebd108794f4dda9d34be2bae57e8e59ec8ebfda2f6f06104b2321be408ea146e2db482b00c5055c8618de36ac9716f80da2617e225556d0fce61b01c8cea2d1e0ea982c31711060ca370f2739366e1e708f38405d784b49d16a26cf62d152eae734327cec04d0451d1f07b010800d5af91c5e7c2fd8951c8d254eab0c97cdcb66822f868b79b78c366255059a68fd74ebca9adb9b970cd9e586690e6e0756705432306878c897b10a4b4ca0005966f99ac8fa4e6f9caf54bf8e53844544beee9872a7ac64c119cf1393d96e674254b661f61ee975633d0e8a8672531edb6bb8e211204e7754a9efa802342118eee850beea742bac95a3f706cc2024cf6037a308bb68162b2f53b9a6346a96e6d31871a2456186e24a1c7a82b82ac04afdfd57cd7fb9ba77a9c760d40b76a170f7be525e5fb6a9848cc726e806187710d9b190387df28700f321f988a392899f93815cc937f309129eb94d5299c5547cb2c085898e6639496e70d746c9d3fb9881d0011010001c2c06504180108000f050251d1f07b050900266305021b0c000a0910e7b484133a890a35bff207fd10dfe8c4a6ea1dd30568012b6fd6891a763c87ad0f7a1d112aad9e8e3239378a3b85588c235865bac2e614348cb4f216d7217f53b3ef48c192e0a4d31d64d7bfa5faccf21155965fa156e887056db644a05ad08a85cc6152d1377d9e37b46f4ff462bbe68ace2dc586ef90070314576c985d8037c2ba63f0a7dc17a62e15bd77e88bc61d9d00858979709f12304264a4cf4225c5cf86f12c8e19486cb9cdcc69f18f027e5f16f4ca8b50e28b3115eaff3a345acd21f624aef81f6ede515c1b55b26b84c1e32264754eab672d5489b287e7277ea855e0a5ff2aa9e8b8c76d579a964ec225255f4d57bf66639ccb34b64798846943e162a41096a7002ca21c7f56"
-const subkeyUsageHex = "988d04533a52bc010400d26af43085558f65b9e7dbc90cb9238015259aed5e954637adcfa2181548b2d0b60c65f1f42ec5081cbf1bc0a8aa4900acfb77070837c58f26012fbce297d70afe96e759ad63531f0037538e70dbf8e384569b9720d99d8eb39d8d0a2947233ed242436cb6ac7dfe74123354b3d0119b5c235d3dd9c9d6c004f8ffaf67ad8583001101000188b7041f010200210502533b8552170c8001ce094aa433f7040bb2ddf0be3893cb843d0fe70c020700000a0910a42704b92866382aa98404009d63d916a27543da4221c60087c33f1c44bec9998c5438018ed370cca4962876c748e94b73eb39c58eb698063f3fd6346d58dd2a11c0247934c4a9d71f24754f7468f96fb24c3e791dd2392b62f626148ad724189498cbf993db2df7c0cdc2d677c35da0f16cb16c9ce7c33b4de65a4a91b1d21a130ae9cc26067718910ef8e2b417556d627261203c756d627261407379642e65642e61753e88b80413010200220502533a52bc021b03060b090807030206150802090a0b0416020301021e01021780000a0910a42704b92866382a47840400c0c2bd04f5fca586de408b395b3c280a278259c93eaaa8b79a53b97003f8ed502a8a00446dd9947fb462677e4fcac0dac2f0701847d15130aadb6cd9e0705ea0cf5f92f129136c7be21a718d46c8e641eb7f044f2adae573e11ae423a0a9ca51324f03a8a2f34b91fa40c3cc764bee4dccadedb54c768ba0469b683ea53f1c29b88d04533a52bc01040099c92a5d6f8b744224da27bc2369127c35269b58bec179de6bbc038f749344222f85a31933224f26b70243c4e4b2d242f0c4777eaef7b5502f9dad6d8bf3aaeb471210674b74de2d7078af497d55f5cdad97c7bedfbc1b41e8065a97c9c3d344b21fc81d27723af8e374bc595da26ea242dccb6ae497be26eea57e563ed517e90011010001889f0418010200090502533a52bc021b0c000a0910a42704b92866382afa1403ff70284c2de8a043ff51d8d29772602fa98009b7861c540535f874f2c230af8caf5638151a636b21f8255003997ccd29747fdd06777bb24f9593bd7d98a3e887689bf902f999915fcc94625ae487e5d13e6616f89090ebc4fdc7eb5cad8943e4056995bb61c6af37f8043016876a958ec7ebf39c43d20d53b7f546cfa83e8d2604b88d04533b8283010400c0b529316dbdf58b4c54461e7e669dc11c09eb7f73819f178ccd4177b9182b91d138605fcf1e463262fabefa73f94a52b5e15d1904635541c7ea540f07050ce0fb51b73e6f88644cec86e91107c957a114f69554548a85295d2b70bd0b203992f76eb5d493d86d9eabcaa7ef3fc7db7e458438db3fcdb0ca1cc97c638439a9170011010001889f0418010200090502533b8283021b0c000a0910a42704b92866382adc6d0400cfff6258485a21675adb7a811c3e19ebca18851533f75a7ba317950b9997fda8d1a4c8c76505c08c04b6c2cc31dc704d33da36a21273f2b388a1a706f7c3378b66d887197a525936ed9a69acb57fe7f718133da85ec742001c5d1864e9c6c8ea1b94f1c3759cebfd93b18606066c063a63be86085b7e37bdbc65f9a915bf084bb901a204533b85cd110400aed3d2c52af2b38b5b67904b0ef73d6dd7aef86adb770e2b153cd22489654dcc91730892087bb9856ae2d9f7ed1eb48f214243fe86bfe87b349ebd7c30e630e49c07b21fdabf78b7a95c8b7f969e97e3d33f2e074c63552ba64a2ded7badc05ce0ea2be6d53485f6900c7860c7aa76560376ce963d7271b9b54638a4028b573f00a0d8854bfcdb04986141568046202192263b9b67350400aaa1049dbc7943141ef590a70dcb028d730371d92ea4863de715f7f0f16d168bd3dc266c2450457d46dcbbf0b071547e5fbee7700a820c3750b236335d8d5848adb3c0da010e998908dfd93d961480084f3aea20b247034f8988eccb5546efaa35a92d0451df3aaf1aee5aa36a4c4d462c760ecd9cebcabfbe1412b1f21450f203fd126687cd486496e971a87fd9e1a8a765fe654baa219a6871ab97768596ab05c26c1aeea8f1a2c72395a58dbc12ef9640d2b95784e974a4d2d5a9b17c25fedacfe551bda52602de8f6d2e48443f5dd1a2a2a8e6a5e70ecdb88cd6e766ad9745c7ee91d78cc55c3d06536b49c3fee6c3d0b6ff0fb2bf13a314f57c953b8f4d93bf88e70418010200090502533b85cd021b0200520910a42704b92866382a47200419110200060502533b85cd000a091042ce2c64bc0ba99214b2009e26b26852c8b13b10c35768e40e78fbbb48bd084100a0c79d9ea0844fa5853dd3c85ff3ecae6f2c9dd6c557aa04008bbbc964cd65b9b8299d4ebf31f41cc7264b8cf33a00e82c5af022331fac79efc9563a822497ba012953cefe2629f1242fcdcb911dbb2315985bab060bfd58261ace3c654bdbbe2e8ed27a46e836490145c86dc7bae15c011f7e1ffc33730109b9338cd9f483e7cef3d2f396aab5bd80efb6646d7e778270ee99d934d187dd98"
-const revokedKeyHex = "988d045331ce82010400c4fdf7b40a5477f206e6ee278eaef888ca73bf9128a9eef9f2f1ddb8b7b71a4c07cfa241f028a04edb405e4d916c61d6beabc333813dc7b484d2b3c52ee233c6a79b1eea4e9cc51596ba9cd5ac5aeb9df62d86ea051055b79d03f8a4fa9f38386f5bd17529138f3325d46801514ea9047977e0829ed728e68636802796801be10011010001889f04200102000905025331d0e3021d03000a0910a401d9f09a34f7c042aa040086631196405b7e6af71026b88e98012eab44aa9849f6ef3fa930c7c9f23deaedba9db1538830f8652fb7648ec3fcade8dbcbf9eaf428e83c6cbcc272201bfe2fbb90d41963397a7c0637a1a9d9448ce695d9790db2dc95433ad7be19eb3de72dacf1d6db82c3644c13eae2a3d072b99bb341debba012c5ce4006a7d34a1f4b94b444526567205265766f6b657220283c52656727732022424d204261726973746122204b657920262530305c303e5c29203c72656740626d626172697374612e636f2e61753e88b704130102002205025331ce82021b03060b090807030206150802090a0b0416020301021e01021780000a0910a401d9f09a34f7c0019c03f75edfbeb6a73e7225ad3cc52724e2872e04260d7daf0d693c170d8c4b243b8767bc7785763533febc62ec2600c30603c433c095453ede59ff2fcabeb84ce32e0ed9d5cf15ffcbc816202b64370d4d77c1e9077d74e94a16fb4fa2e5bec23a56d7a73cf275f91691ae1801a976fcde09e981a2f6327ac27ea1fecf3185df0d56889c04100102000605025331cfb5000a0910fe9645554e8266b64b4303fc084075396674fb6f778d302ac07cef6bc0b5d07b66b2004c44aef711cbac79617ef06d836b4957522d8772dd94bf41a2f4ac8b1ee6d70c57503f837445a74765a076d07b829b8111fc2a918423ddb817ead7ca2a613ef0bfb9c6b3562aec6c3cf3c75ef3031d81d95f6563e4cdcc9960bcb386c5d757b104fcca5fe11fc709df884604101102000605025331cfe7000a09107b15a67f0b3ddc0317f6009e360beea58f29c1d963a22b962b80788c3fa6c84e009d148cfde6b351469b8eae91187eff07ad9d08fcaab88d045331ce820104009f25e20a42b904f3fa555530fe5c46737cf7bd076c35a2a0d22b11f7e0b61a69320b768f4a80fe13980ce380d1cfc4a0cd8fbe2d2e2ef85416668b77208baa65bf973fe8e500e78cc310d7c8705cdb34328bf80e24f0385fce5845c33bc7943cf6b11b02348a23da0bf6428e57c05135f2dc6bd7c1ce325d666d5a5fd2fd5e410011010001889f04180102000905025331ce82021b0c000a0910a401d9f09a34f7c0418003fe34feafcbeaef348a800a0d908a7a6809cc7304017d820f70f0474d5e23cb17e38b67dc6dca282c6ca00961f4ec9edf2738d0f087b1d81e4871ef08e1798010863afb4eac4c44a376cb343be929c5be66a78cfd4456ae9ec6a99d97f4e1c3ff3583351db2147a65c0acef5c003fb544ab3a2e2dc4d43646f58b811a6c3a369d1f"
-const revokedSubkeyHex = "988d04533121f6010400aefc803a3e4bb1a61c86e8a86d2726c6a43e0079e9f2713f1fa017e9854c83877f4aced8e331d675c67ea83ddab80aacbfa0b9040bb12d96f5a3d6be09455e2a76546cbd21677537db941cab710216b6d24ec277ee0bd65b910f416737ed120f6b93a9d3b306245c8cfd8394606fdb462e5cf43c551438d2864506c63367fc890011010001b41d416c696365203c616c69636540626d626172697374612e636f2e61753e88bb041301020025021b03060b090807030206150802090a0b0416020301021e01021780050253312798021901000a09104ef7e4beccde97f015a803ff5448437780f63263b0df8442a995e7f76c221351a51edd06f2063d8166cf3157aada4923dfc44aa0f2a6a4da5cf83b7fe722ba8ab416c976e77c6b5682e7f1069026673bd0de56ba06fd5d7a9f177607f277d9b55ff940a638c3e68525c67517e2b3d976899b93ca267f705b3e5efad7d61220e96b618a4497eab8d04403d23f8846041011020006050253312910000a09107b15a67f0b3ddc03d96e009f50b6365d86c4be5d5e9d0ea42d5e56f5794c617700a0ab274e19c2827780016d23417ce89e0a2c0d987d889c04100102000605025331cf7a000a0910a401d9f09a34f7c0ee970400aca292f213041c9f3b3fc49148cbda9d84afee6183c8dd6c5ff2600b29482db5fecd4303797be1ee6d544a20a858080fec43412061c9a71fae4039fd58013b4ae341273e6c66ad4c7cdd9e68245bedb260562e7b166f2461a1032f2b38c0e0e5715fb3d1656979e052b55ca827a76f872b78a9fdae64bc298170bfcebedc1271b41a416c696365203c616c696365407379646973702e6f722e61753e88b804130102002205025331278b021b03060b090807030206150802090a0b0416020301021e01021780000a09104ef7e4beccde97f06a7003fa03c3af68d272ebc1fa08aa72a03b02189c26496a2833d90450801c4e42c5b5f51ad96ce2d2c9cef4b7c02a6a2fcf1412d6a2d486098eb762f5010a201819c17fd2888aec8eda20c65a3b75744de7ee5cc8ac7bfc470cbe3cb982720405a27a3c6a8c229cfe36905f881b02ed5680f6a8f05866efb9d6c5844897e631deb949ca8846041011020006050253312910000a09107b15a67f0b3ddc0347bc009f7fa35db59147469eb6f2c5aaf6428accb138b22800a0caa2f5f0874bacc5909c652a57a31beda65eddd5889c04100102000605025331cf7a000a0910a401d9f09a34f7c0316403ff46f2a5c101256627f16384d34a38fb47a6c88ba60506843e532d91614339fccae5f884a5741e7582ffaf292ba38ee10a270a05f139bde3814b6a077e8cd2db0f105ebea2a83af70d385f13b507fac2ad93ff79d84950328bb86f3074745a8b7f9b64990fb142e2a12976e27e8d09a28dc5621f957ac49091116da410ac3cbde1b88d04533121f6010400cbd785b56905e4192e2fb62a720727d43c4fa487821203cf72138b884b78b701093243e1d8c92a0248a6c0203a5a88693da34af357499abacaf4b3309c640797d03093870a323b4b6f37865f6eaa2838148a67df4735d43a90ca87942554cdf1c4a751b1e75f9fd4ce4e97e278d6c1c7ed59d33441df7d084f3f02beb68896c70011010001889f0418010200090502533121f6021b0c000a09104ef7e4beccde97f0b98b03fc0a5ccf6a372995835a2f5da33b282a7d612c0ab2a97f59cf9fff73e9110981aac2858c41399afa29624a7fd8a0add11654e3d882c0fd199e161bdad65e5e2548f7b68a437ea64293db1246e3011cbb94dc1bcdeaf0f2539bd88ff16d95547144d97cead6a8c5927660a91e6db0d16eb36b7b49a3525b54d1644e65599b032b7eb901a204533127a0110400bd3edaa09eff9809c4edc2c2a0ebe52e53c50a19c1e49ab78e6167bf61473bb08f2050d78a5cbbc6ed66aff7b42cd503f16b4a0b99fa1609681fca9b7ce2bbb1a5b3864d6cdda4d7ef7849d156d534dea30fb0efb9e4cf8959a2b2ce623905882d5430b995a15c3b9fe92906086788b891002924f94abe139b42cbbfaaabe42f00a0b65dc1a1ad27d798adbcb5b5ad02d2688c89477b03ff4eebb6f7b15a73b96a96bed201c0e5e4ea27e4c6e2dd1005b94d4b90137a5b1cf5e01c6226c070c4cc999938101578877ee76d296b9aab8246d57049caacf489e80a3f40589cade790a020b1ac146d6f7a6241184b8c7fcde680eae3188f5dcbe846d7f7bdad34f6fcfca08413e19c1d5df83fc7c7c627d493492e009c2f52a80400a2fe82de87136fd2e8845888c4431b032ba29d9a29a804277e31002a8201fb8591a3e55c7a0d0881496caf8b9fb07544a5a4879291d0dc026a0ea9e5bd88eb4aa4947bbd694b25012e208a250d65ddc6f1eea59d3aed3b4ec15fcab85e2afaa23a40ab1ef9ce3e11e1bc1c34a0e758e7aa64deb8739276df0af7d4121f834a9b88e70418010200090502533127a0021b02005209104ef7e4beccde97f047200419110200060502533127a0000a0910dbce4ee19529437fe045009c0b32f5ead48ee8a7e98fac0dea3d3e6c0e2c552500a0ad71fadc5007cfaf842d9b7db3335a8cdad15d3d1a6404009b08e2c68fe8f3b45c1bb72a4b3278cdf3012aa0f229883ad74aa1f6000bb90b18301b2f85372ca5d6b9bf478d235b733b1b197d19ccca48e9daf8e890cb64546b4ce1b178faccfff07003c172a2d4f5ebaba9f57153955f3f61a9b80a4f5cb959908f8b211b03b7026a8a82fc612bfedd3794969bcf458c4ce92be215a1176ab88d045331d144010400a5063000c5aaf34953c1aa3bfc95045b3aab9882b9a8027fecfe2142dc6b47ba8aca667399990244d513dd0504716908c17d92c65e74219e004f7b83fc125e575dd58efec3ab6dd22e3580106998523dea42ec75bf9aa111734c82df54630bebdff20fe981cfc36c76f865eb1c2fb62c9e85bc3a6e5015a361a2eb1c8431578d0011010001889f04280102000905025331d433021d03000a09104ef7e4beccde97f02e5503ff5e0630d1b65291f4882b6d40a29da4616bb5088717d469fbcc3648b8276de04a04988b1f1b9f3e18f52265c1f8b6c85861691c1a6b8a3a25a1809a0b32ad330aec5667cb4262f4450649184e8113849b05e5ad06a316ea80c001e8e71838190339a6e48bbde30647bcf245134b9a97fa875c1d83a9862cae87ffd7e2c4ce3a1b89013d04180102000905025331d144021b0200a809104ef7e4beccde97f09d2004190102000605025331d144000a0910677815e371c2fd23522203fe22ab62b8e7a151383cea3edd3a12995693911426f8ccf125e1f6426388c0010f88d9ca7da2224aee8d1c12135998640c5e1813d55a93df472faae75bef858457248db41b4505827590aeccf6f9eb646da7f980655dd3050c6897feddddaca90676dee856d66db8923477d251712bb9b3186b4d0114daf7d6b59272b53218dd1da94a03ff64006fcbe71211e5daecd9961fba66cdb6de3f914882c58ba5beddeba7dcb950c1156d7fba18c19ea880dccc800eae335deec34e3b84ac75ffa24864f782f87815cda1c0f634b3dd2fa67cea30811d21723d21d9551fa12ccbcfa62b6d3a15d01307b99925707992556d50065505b090aadb8579083a20fe65bd2a270da9b011"
-
-const missingCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-Charset: UTF-8
-
-mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY
-ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG
-zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54
-QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ
-QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo
-9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu
-Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/
-dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R
-JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL
-ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew
-RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW
-/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu
-yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAJcXQeP+NmuciE99YcJoffxv
-2gVLU4ZXBNHEaP0mgaJ1+tmMD089vUQAcyGRvw8jfsNsVZQIOAuRxY94aHQhIRHR
-bUzBN28ofo/AJJtfx62C15xt6fDKRV6HXYqAiygrHIpEoRLyiN69iScUsjIJeyFL
-C8wa72e8pSL6dkHoaV1N9ZH/xmrJ+k0vsgkQaAh9CzYufncDxcwkoP+aOlGtX1gP
-WwWoIbz0JwLEMPHBWvDDXQcQPQTYQyj+LGC9U6f9VZHN25E94subM1MjuT9OhN9Y
-MLfWaaIc5WyhLFyQKW2Upofn9wSFi8ubyBnv640Dfd0rVmaWv7LNTZpoZ/GbJAMA
-EQEAAYkBHwQYAQIACQUCU5ygeQIbAgAKCRDt1A0FCB6SP0zCB/sEzaVR38vpx+OQ
-MMynCBJrakiqDmUZv9xtplY7zsHSQjpd6xGflbU2n+iX99Q+nav0ETQZifNUEd4N
-1ljDGQejcTyKD6Pkg6wBL3x9/RJye7Zszazm4+toJXZ8xJ3800+BtaPoI39akYJm
-+ijzbskvN0v/j5GOFJwQO0pPRAFtdHqRs9Kf4YanxhedB4dIUblzlIJuKsxFit6N
-lgGRblagG3Vv2eBszbxzPbJjHCgVLR3RmrVezKOsZjr/2i7X+xLWIR0uD3IN1qOW
-CXQxLBizEEmSNVNxsp7KPGTLnqO3bPtqFirxS9PJLIMPTPLNBY7ZYuPNTMqVIUWF
-4artDmrG
-=7FfJ
------END PGP PUBLIC KEY BLOCK-----`
-
-const invalidCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY
-ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG
-zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54
-QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ
-QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo
-9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu
-Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/
-dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R
-JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL
-ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew
-RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW
-/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu
-yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAIINDqlj7X6jYKc6DjwrOkjQ
-UIRWbQQar0LwmNilehmt70g5DCL1SYm9q4LcgJJ2Nhxj0/5qqsYib50OSWMcKeEe
-iRXpXzv1ObpcQtI5ithp0gR53YPXBib80t3bUzomQ5UyZqAAHzMp3BKC54/vUrSK
-FeRaxDzNLrCeyI00+LHNUtwghAqHvdNcsIf8VRumK8oTm3RmDh0TyjASWYbrt9c8
-R1Um3zuoACOVy+mEIgIzsfHq0u7dwYwJB5+KeM7ZLx+HGIYdUYzHuUE1sLwVoELh
-+SHIGHI1HDicOjzqgajShuIjj5hZTyQySVprrsLKiXS6NEwHAP20+XjayJ/R3tEA
-EQEAAYkCPgQYAQIBKAUCU5ygeQIbAsBdIAQZAQIABgUCU5ygeQAKCRCpVlnFZmhO
-52RJB/9uD1MSa0wjY6tHOIgquZcP3bHBvHmrHNMw9HR2wRCMO91ZkhrpdS3ZHtgb
-u3/55etj0FdvDo1tb8P8FGSVtO5Vcwf5APM8sbbqoi8L951Q3i7qt847lfhu6sMl
-w0LWFvPTOLHrliZHItPRjOltS1WAWfr2jUYhsU9ytaDAJmvf9DujxEOsN5G1YJep
-54JCKVCkM/y585Zcnn+yxk/XwqoNQ0/iJUT9qRrZWvoeasxhl1PQcwihCwss44A+
-YXaAt3hbk+6LEQuZoYS73yR3WHj+42tfm7YxRGeubXfgCEz/brETEWXMh4pe0vCL
-bfWrmfSPq2rDegYcAybxRQz0lF8PAAoJEO3UDQUIHpI/exkH/0vQfdHA8g/N4T6E
-i6b1CUVBAkvtdJpCATZjWPhXmShOw62gkDw306vHPilL4SCvEEi4KzG72zkp6VsB
-DSRcpxCwT4mHue+duiy53/aRMtSJ+vDfiV1Vhq+3sWAck/yUtfDU9/u4eFaiNok1
-8/Gd7reyuZt5CiJnpdPpjCwelK21l2w7sHAnJF55ITXdOxI8oG3BRKufz0z5lyDY
-s2tXYmhhQIggdgelN8LbcMhWs/PBbtUr6uZlNJG2lW1yscD4aI529VjwJlCeo745
-U7pO4eF05VViUJ2mmfoivL3tkhoTUWhx8xs8xCUcCg8DoEoSIhxtOmoTPR22Z9BL
-6LCg2mg=
-=Dhm4
------END PGP PUBLIC KEY BLOCK-----`
-
-const goodCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v1
-
-mI0EVUqeVwEEAMufHRrMPWK3gyvi0O0tABCs/oON9zV9KDZlr1a1M91ShCSFwCPo
-7r80PxdWVWcj0V5h50/CJYtpN3eE/mUIgW2z1uDYQF1OzrQ8ubrksfsJvpAhENom
-lTQEppv9mV8qhcM278teb7TX0pgrUHLYF5CfPdp1L957JLLXoQR/lwLVABEBAAG0
-E2dvb2Qtc2lnbmluZy1zdWJrZXmIuAQTAQIAIgUCVUqeVwIbAwYLCQgHAwIGFQgC
-CQoLBBYCAwECHgECF4AACgkQNRjL95IRWP69XQQAlH6+eyXJN4DZTLX78KGjHrsw
-6FCvxxClEPtPUjcJy/1KCRQmtLAt9PbbA78dvgzjDeZMZqRAwdjyJhjyg/fkU2OH
-7wq4ktjUu+dLcOBb+BFMEY+YjKZhf6EJuVfxoTVr5f82XNPbYHfTho9/OABKH6kv
-X70PaKZhbwnwij8Nts65AaIEVUqftREEAJ3WxZfqAX0bTDbQPf2CMT2IVMGDfhK7
-GyubOZgDFFjwUJQvHNvsrbeGLZ0xOBumLINyPO1amIfTgJNm1iiWFWfmnHReGcDl
-y5mpYG60Mb79Whdcer7CMm3AqYh/dW4g6IB02NwZMKoUHo3PXmFLxMKXnWyJ0clw
-R0LI/Qn509yXAKDh1SO20rqrBM+EAP2c5bfI98kyNwQAi3buu94qo3RR1ZbvfxgW
-CKXDVm6N99jdZGNK7FbRifXqzJJDLcXZKLnstnC4Sd3uyfyf1uFhmDLIQRryn5m+
-LBYHfDBPN3kdm7bsZDDq9GbTHiFZUfm/tChVKXWxkhpAmHhU/tH6GGzNSMXuIWSO
-aOz3Rqq0ED4NXyNKjdF9MiwD/i83S0ZBc0LmJYt4Z10jtH2B6tYdqnAK29uQaadx
-yZCX2scE09UIm32/w7pV77CKr1Cp/4OzAXS1tmFzQ+bX7DR+Gl8t4wxr57VeEMvl
-BGw4Vjh3X8//m3xynxycQU18Q1zJ6PkiMyPw2owZ/nss3hpSRKFJsxMLhW3fKmKr
-Ey2KiOcEGAECAAkFAlVKn7UCGwIAUgkQNRjL95IRWP5HIAQZEQIABgUCVUqftQAK
-CRD98VjDN10SqkWrAKDTpEY8D8HC02E/KVC5YUI01B30wgCgurpILm20kXEDCeHp
-C5pygfXw1DJrhAP+NyPJ4um/bU1I+rXaHHJYroYJs8YSweiNcwiHDQn0Engh/mVZ
-SqLHvbKh2dL/RXymC3+rjPvQf5cup9bPxNMa6WagdYBNAfzWGtkVISeaQW+cTEp/
-MtgVijRGXR/lGLGETPg2X3Afwn9N9bLMBkBprKgbBqU7lpaoPupxT61bL70=
-=vtbN
------END PGP PUBLIC KEY BLOCK-----`
-
-const revokedUserIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-mQENBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0qlX2e
-DZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN91KtLsz/
-uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xOXO3YtLdmJMBW
-ClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBbnaIYO6fXVXELUjkx
-nmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX8vY7vwC34pm22fAUVLCJ
-x1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEBAAG0I0dvbGFuZyBHb3BoZXIg
-PG5vLXJlcGx5QGdvbGFuZy5jb20+iQFUBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy
-9I6cUoMFAlsgO5ECGwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AACgkQ
-1oFy9I6cUoMIkwf8DNPeD23i4jRwd/pylbvxwZintZl1fSwTJW1xcOa1emXaEtX2
-depuqhP04fjlRQGfsYAQh7X9jOJxAHjTmhqFBi5sD7QvKU00cPFYbJ/JTx0B41bl
-aXnSbGhRPh63QtEZL7ACAs+shwvvojJqysx7kyVRu0EW2wqjXdHwR/SJO6nhNBa2
-DXzSiOU/SUA42mmG+5kjF8Aabq9wPwT9wjraHShEweNerNMmOqJExBOy3yFeyDpa
-XwEZFzBfOKoxFNkIaVf5GSdIUGhFECkGvBMB935khftmgR8APxdU4BE7XrXexFJU
-8RCuPXonm4WQOwTWR0vQg64pb2WKAzZ8HhwTGbQiR29sYW5nIEdvcGhlciA8cmV2
-b2tlZEBnb2xhbmcuY29tPokBNgQwAQoAIBYhBOSJOSS3Dcepeq2X8NaBcvSOnFKD
-BQJbIDv3Ah0AAAoJENaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT6bC1JttG
-0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZq8KxHn/KvN6N
-s85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy+I0sGyI/Inro0Pzb
-tvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarYbYB2idtGRci4b9tObOK0
-BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8jSwEr2O2sUR0yjbgUAXbTxDVE
-/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3FazkkSYQD6b97+dkWwb1iWG5AQ0EWyA7
-kQEIALkg04REDZo1JgdYV4x8HJKFS4xAYWbIva1ZPqvDNmZRUbQZR2+gpJGEwn7z
-VofGvnOYiGW56AS5j31SFf5kro1+1bZQ5iOONBng08OOo58/l1hRseIIVGB5TGSa
-PCdChKKHreJI6hS3mShxH6hdfFtiZuB45rwoaArMMsYcjaezLwKeLc396cpUwwcZ
-snLUNd1Xu5EWEF2OdFkZ2a1qYdxBvAYdQf4+1Nr+NRIx1u1NS9c8jp3PuMOkrQEi
-bNtc1v6v0Jy52mKLG4y7mC/erIkvkQBYJdxPaP7LZVaPYc3/xskcyijrJ/5ufoD8
-K71/ShtsZUXSQn9jlRaYR0EbojMAEQEAAYkBPAQYAQoAJhYhBOSJOSS3Dcepeq2X
-8NaBcvSOnFKDBQJbIDuRAhsMBQkDwmcAAAoJENaBcvSOnFKDkFMIAIt64bVZ8x7+
-TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2NnDyf1cLOSimSTILpwLIuv9Uft5Pb
-OraQbYt3xi9yrqdKqGLv80bxqK0NuryNkvh9yyx5WoG1iKqMj9/FjGghuPrRaT4l
-QinNAghGVkEy1+aXGFrG2DsOC1FFI51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2V
-yJl9bD5R4SUNy8oQmhOxi+gbhD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+U
-heiQvzkApQup5c+BhH5zFDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB
-7qTZOahrETw=
-=IKnw
------END PGP PUBLIC KEY BLOCK-----`
-
-const keyWithFirstUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-Version: OpenPGP.js v4.10.10
-Comment: https://openpgpjs.org
-
-xsBNBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0q
-lX2eDZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN
-91KtLsz/uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xO
-XO3YtLdmJMBWClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBb
-naIYO6fXVXELUjkxnmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX
-8vY7vwC34pm22fAUVLCJx1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEB
-AAHNIkdvbGFuZyBHb3BoZXIgPHJldm9rZWRAZ29sYW5nLmNvbT7CwI0EMAEK
-ACAWIQTkiTkktw3HqXqtl/DWgXL0jpxSgwUCWyA79wIdAAAhCRDWgXL0jpxS
-gxYhBOSJOSS3Dcepeq2X8NaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT
-6bC1JttG0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZ
-q8KxHn/KvN6Ns85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy
-+I0sGyI/Inro0Pzbtvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarY
-bYB2idtGRci4b9tObOK0BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8j
-SwEr2O2sUR0yjbgUAXbTxDVE/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3Fazk
-kSYQD6b97+dkWwb1iWHNI0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFu
-Zy5jb20+wsCrBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy9I6cUoMFAlsgO5EC
-GwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AAIQkQ1oFy9I6cUoMW
-IQTkiTkktw3HqXqtl/DWgXL0jpxSgwiTB/wM094PbeLiNHB3+nKVu/HBmKe1
-mXV9LBMlbXFw5rV6ZdoS1fZ16m6qE/Th+OVFAZ+xgBCHtf2M4nEAeNOaGoUG
-LmwPtC8pTTRw8Vhsn8lPHQHjVuVpedJsaFE+HrdC0RkvsAICz6yHC++iMmrK
-zHuTJVG7QRbbCqNd0fBH9Ik7qeE0FrYNfNKI5T9JQDjaaYb7mSMXwBpur3A/
-BP3COtodKETB416s0yY6okTEE7LfIV7IOlpfARkXMF84qjEU2QhpV/kZJ0hQ
-aEUQKQa8EwH3fmSF+2aBHwA/F1TgETtetd7EUlTxEK49eiebhZA7BNZHS9CD
-rilvZYoDNnweHBMZzsBNBFsgO5EBCAC5INOERA2aNSYHWFeMfByShUuMQGFm
-yL2tWT6rwzZmUVG0GUdvoKSRhMJ+81aHxr5zmIhluegEuY99UhX+ZK6NftW2
-UOYjjjQZ4NPDjqOfP5dYUbHiCFRgeUxkmjwnQoSih63iSOoUt5kocR+oXXxb
-YmbgeOa8KGgKzDLGHI2nsy8Cni3N/enKVMMHGbJy1DXdV7uRFhBdjnRZGdmt
-amHcQbwGHUH+PtTa/jUSMdbtTUvXPI6dz7jDpK0BImzbXNb+r9CcudpiixuM
-u5gv3qyJL5EAWCXcT2j+y2VWj2HN/8bJHMoo6yf+bn6A/Cu9f0obbGVF0kJ/
-Y5UWmEdBG6IzABEBAAHCwJMEGAEKACYWIQTkiTkktw3HqXqtl/DWgXL0jpxS
-gwUCWyA7kQIbDAUJA8JnAAAhCRDWgXL0jpxSgxYhBOSJOSS3Dcepeq2X8NaB
-cvSOnFKDkFMIAIt64bVZ8x7+TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2N
-nDyf1cLOSimSTILpwLIuv9Uft5PbOraQbYt3xi9yrqdKqGLv80bxqK0NuryN
-kvh9yyx5WoG1iKqMj9/FjGghuPrRaT4lQinNAghGVkEy1+aXGFrG2DsOC1FF
-I51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2VyJl9bD5R4SUNy8oQmhOxi+gb
-hD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+UheiQvzkApQup5c+BhH5z
-FDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB7qTZOahrETw=
-=+2T8
------END PGP PUBLIC KEY BLOCK-----
-`
-
-const keyWithOnlyUserIDRevoked = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-mDMEYYwB7RYJKwYBBAHaRw8BAQdARimqhPPzyGAXmfQJjcqM1QVPzLtURJSzNVll
-JV4tEaW0KVJldm9rZWQgUHJpbWFyeSBVc2VyIElEIDxyZXZva2VkQGtleS5jb20+
-iHgEMBYIACAWIQSpyJZAXYqVEFkjyKutFcS0yeB0LQUCYYwCtgIdAAAKCRCtFcS0
-yeB0LbSsAQD8OYMaaBjrdzzpwIkP1stgmPd4/kzN/ZG28Ywl6a5F5QEA5Xg7aq4e
-/t6Fsb4F5iqB956kSPe6YJrikobD/tBbMwSIkAQTFggAOBYhBKnIlkBdipUQWSPI
-q60VxLTJ4HQtBQJhjAHtAhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEK0V
-xLTJ4HQtBaoBAPZL7luTCji+Tqhn7XNfFE/0QIahCt8k9wfO1cGlB3inAQDf8Tzw
-ZGR5fNluUcNoVxQT7bUSFStbaGo3k0BaOYPbCLg4BGGMAe0SCisGAQQBl1UBBQEB
-B0DLwSpveSrbIO/IVZD13yrs1XuB3FURZUnafGrRq7+jUAMBCAeIeAQYFggAIBYh
-BKnIlkBdipUQWSPIq60VxLTJ4HQtBQJhjAHtAhsMAAoJEK0VxLTJ4HQtZ1oA/j9u
-8+p3xTNzsmabTL6BkNbMeB/RUKCrlm6woM6AV+vxAQCcXTn3JC2sNoNrLoXuVzaA
-mcG3/TwG5GSQUUPkrDsGDA==
-=mFWy
------END PGP PUBLIC KEY BLOCK-----
-`
-
-const keyWithSubKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-mI0EWyKwKQEEALwXhKBnyaaNFeK3ljfc/qn9X/QFw+28EUfgZPHjRmHubuXLE2uR
-s3ZoSXY2z7Dkv+NyHYMt8p+X8q5fR7JvUjK2XbPyKoiJVnHINll83yl67DaWfKNL
-EjNoO0kIfbXfCkZ7EG6DL+iKtuxniGTcnGT47e+HJSqb/STpLMnWwXjBABEBAAG0
-I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQQ/
-lRafP/p9PytHbwxMvYJsOQdOOAUCWyKwKQIbAwULCQgHAwUVCgkICwUWAgMBAAIe
-AQIXgAAKCRBMvYJsOQdOOOsFBAC62mXww8XuqvYLcVOvHkWLT6mhxrQOJXnlfpn7
-2uBV9CMhoG/Ycd43NONsJrB95Apr9TDIqWnVszNbqPCuBhZQSGLdbiDKjxnCWBk0
-69qv4RNtkpOhYB7jK4s8F5oQZqId6JasT/PmJTH92mhBYhhTQr0GYFuPX2UJdkw9
-Sn9C67iNBFsisDUBBAC3A+Yo9lgCnxi/pfskyLrweYif6kIXWLAtLTsM6g/6jt7b
-wTrknuCPyTv0QKGXsAEe/cK/Xq3HvX9WfXPGIHc/X56ZIsHQ+RLowbZV/Lhok1IW
-FAuQm8axr/by80cRwFnzhfPc/ukkAq2Qyj4hLsGblu6mxeAhzcp8aqmWOO2H9QAR
-AQABiLYEKAEKACAWIQQ/lRafP/p9PytHbwxMvYJsOQdOOAUCWyK16gIdAAAKCRBM
-vYJsOQdOOB1vA/4u4uLONsE+2GVOyBsHyy7uTdkuxaR9b54A/cz6jT/tzUbeIzgx
-22neWhgvIEghnUZd0vEyK9k1wy5vbDlEo6nKzHso32N1QExGr5upRERAxweDxGOj
-7luDwNypI7QcifE64lS/JmlnunwRCdRWMKc0Fp+7jtRc5mpwyHN/Suf5RokBagQY
-AQoAIBYhBD+VFp8/+n0/K0dvDEy9gmw5B044BQJbIrA1AhsCAL8JEEy9gmw5B044
-tCAEGQEKAB0WIQSNdnkaWY6t62iX336UXbGvYdhXJwUCWyKwNQAKCRCUXbGvYdhX
-JxJSA/9fCPHP6sUtGF1o3G1a3yvOUDGr1JWcct9U+QpbCt1mZoNopCNDDQAJvDWl
-mvDgHfuogmgNJRjOMznvahbF+wpTXmB7LS0SK412gJzl1fFIpK4bgnhu0TwxNsO1
-8UkCZWqxRMgcNUn9z6XWONK8dgt5JNvHSHrwF4CxxwjL23AAtK+FA/UUoi3U4kbC
-0XnSr1Sl+mrzQi1+H7xyMe7zjqe+gGANtskqexHzwWPUJCPZ5qpIa2l8ghiUim6b
-4ymJ+N8/T8Yva1FaPEqfMzzqJr8McYFm0URioXJPvOAlRxdHPteZ0qUopt/Jawxl
-Xt6B9h1YpeLoJwjwsvbi98UTRs0jXwoY
-=3fWu
------END PGP PUBLIC KEY BLOCK-----`
-
-const keyWithSubKeyAndBadSelfSigOrder = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-mI0EWyLLDQEEAOqIOpJ/ha1OYAGduu9tS3rBz5vyjbNgJO4sFveEM0mgsHQ0X9/L
-plonW+d0gRoO1dhJ8QICjDAc6+cna1DE3tEb5m6JtQ30teLZuqrR398Cf6w7NNVz
-r3lrlmnH9JaKRuXl7tZciwyovneBfZVCdtsRZjaLI1uMQCz/BToiYe3DABEBAAG0
-I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQRZ
-sixZOfQcZdW0wUqmgmdsv1O9xgUCWyLLDQIbAwULCQgHAwUVCgkICwUWAgMBAAIe
-AQIXgAAKCRCmgmdsv1O9xql2A/4pix98NxjhdsXtazA9agpAKeADf9tG4Za27Gj+
-3DCww/E4iP2X35jZimSm/30QRB6j08uGCqd9vXkkJxtOt63y/IpVOtWX6vMWSTUm
-k8xKkaYMP0/IzKNJ1qC/qYEUYpwERBKg9Z+k99E2Ql4kRHdxXUHq6OzY79H18Y+s
-GdeM/riNBFsiyxsBBAC54Pxg/8ZWaZX1phGdwfe5mek27SOYpC0AxIDCSOdMeQ6G
-HPk38pywl1d+S+KmF/F4Tdi+kWro62O4eG2uc/T8JQuRDUhSjX0Qa51gPzJrUOVT
-CFyUkiZ/3ZDhtXkgfuso8ua2ChBgR9Ngr4v43tSqa9y6AK7v0qjxD1x+xMrjXQAR
-AQABiQFxBBgBCgAmAhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsizTIFCQAN
-MRcAv7QgBBkBCgAdFiEEJcoVUVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62j
-UpRPICQq5gQApoWIigZxXFoM0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBS
-YnjyA4+n1D+zB2VqliD2QrsX12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZs
-nRJmXV+bsvD4sidLZLjdwOVa3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/
-U73GGi0D/i20VW8AWYAPACm2zMlzExKTOAV01YTQH/3vW0WLrOse53WcIVZga6es
-HuO4So0SOEAvxKMe5HpRIu2dJxTvd99Bo9xk9xJU0AoFrO0vNCRnL+5y68xMlODK
-lEw5/kl0jeaTBp6xX0HDQOEVOpPGUwWV4Ij2EnvfNDXaE1vK1kffiQFrBBgBCgAg
-AhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsi0AYAv7QgBBkBCgAdFiEEJcoV
-UVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62jUpRPICQq5gQApoWIigZxXFoM
-0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBSYnjyA4+n1D+zB2VqliD2QrsX
-12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZsnRJmXV+bsvD4sidLZLjdwOVa
-3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/U73GRl0EAJokkXmy4zKDHWWi
-wvK9gi2gQgRkVnu2AiONxJb5vjeLhM/07BRmH6K1o+w3fOeEQp4FjXj1eQ5fPSM6
-Hhwx2CTl9SDnPSBMiKXsEFRkmwQ2AAsQZLmQZvKBkLZYeBiwf+IY621eYDhZfo+G
-1dh1WoUCyREZsJQg2YoIpWIcvw+a
-=bNRo
------END PGP PUBLIC KEY BLOCK-----
-`
-
-const onlySubkeyNoPrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-Version: GnuPG v1
-
-lQCVBFggvocBBAC7vBsHn7MKmS6IiiZNTXdciplVgS9cqVd+RTdIAoyNTcsiV1H0
-GQ3QtodOPeDlQDNoqinqaobd7R9g3m3hS53Nor7yBZkCWQ5x9v9JxRtoAq0sklh1
-I1X2zEqZk2l6YrfBF/64zWrhjnW3j23szkrAIVu0faQXbQ4z56tmZrw11wARAQAB
-/gdlAkdOVQG0CUdOVSBEdW1teYi4BBMBAgAiBQJYIL6HAhsDBgsJCAcDAgYVCAIJ
-CgsEFgIDAQIeAQIXgAAKCRCd1xxWp1CYAnjGA/9synn6ZXJUKAXQzySgmCZvCIbl
-rqBfEpxwLG4Q/lONhm5vthAE0z49I8hj5Gc5e2tLYUtq0o0OCRdCrYHa/efOYWpJ
-6RsK99bePOisVzmOABLIgZkcr022kHoMCmkPgv9CUGKP1yqbGl+zzAwQfUjRUmvD
-ZIcWLHi2ge4GzPMPi50B2ARYIL6cAQQAxWHnicKejAFcFcF1/3gUSgSH7eiwuBPX
-M7vDdgGzlve1o1jbV4tzrjN9jsCl6r0nJPDMfBSzgLr1auNTRG6HpJ4abcOx86ED
-Ad+avDcQPZb7z3dPhH/gb2lQejZsHh7bbeOS8WMSzHV3RqCLd8J/xwWPNR5zKn1f
-yp4IGfopidMAEQEAAQAD+wQOelnR82+dxyM2IFmZdOB9wSXQeCVOvxSaNMh6Y3lk
-UOOkO8Nlic4x0ungQRvjoRs4wBmCuwFK/MII6jKui0B7dn/NDf51i7rGdNGuJXDH
-e676By1sEY/NGkc74jr74T+5GWNU64W0vkpfgVmjSAzsUtpmhJMXsc7beBhJdnVl
-AgDKCb8hZqj1alcdmLoNvb7ibA3K/V8J462CPD7bMySPBa/uayoFhNxibpoXml2r
-oOtHa5izF3b0/9JY97F6rqkdAgD6GdTJ+xmlCoz1Sewoif1I6krq6xoa7gOYpIXo
-UL1Afr+LiJeyAnF/M34j/kjIVmPanZJjry0kkjHE5ILjH3uvAf4/6n9np+Th8ujS
-YDCIzKwR7639+H+qccOaddCep8Y6KGUMVdD/vTKEx1rMtK+hK/CDkkkxnFslifMJ
-kqoqv3WUqCWJAT0EGAECAAkFAlggvpwCGwIAqAkQndccVqdQmAKdIAQZAQIABgUC
-WCC+nAAKCRDmGUholQPwvQk+A/9latnSsR5s5/1A9TFki11GzSEnfLbx46FYOdkW
-n3YBxZoPQGxNA1vIn8GmouxZInw9CF4jdOJxEdzLlYQJ9YLTLtN5tQEMl/19/bR8
-/qLacAZ9IOezYRWxxZsyn6//jfl7A0Y+FV59d4YajKkEfItcIIlgVBSW6T+TNQT3
-R+EH5HJ/A/4/AN0CmBhhE2vGzTnVU0VPrE4V64pjn1rufFdclgpixNZCuuqpKpoE
-VVHn6mnBf4njKjZrAGPs5kfQ+H4NsM7v3Zz4yV6deu9FZc4O6E+V1WJ38rO8eBix
-7G2jko106CC6vtxsCPVIzY7aaG3H5pjRtomw+pX7SzrQ7FUg2PGumg==
-=F/T0
------END PGP PRIVATE KEY BLOCK-----`
-
-const ecdsaPrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-
-xaUEX1KsSRMIKoZIzj0DAQcCAwTpYqJsnJiFhKKh+8TulWD+lVmerBFNS+Ii
-B+nlG3T0xQQ4Sy5eIjJ0CExIQQzi3EElF/Z2l4F3WC5taFA11NgA/gkDCHSS
-PThf1M2K4LN8F1MRcvR+sb7i0nH55ojkwuVB1DE6jqIT9m9i+mX1tzjSAS+6
-lPQiweCJvG7xTC7Hs3AzRapf/r1At4TB+v+5G2/CKynNFEJpbGwgPGJpbGxA
-aG9tZS5jb20+wncEEBMIAB8FAl9SrEkGCwkHCAMCBBUICgIDFgIBAhkBAhsD
-Ah4BAAoJEMpwT3+q3+xqw5UBAMebZN9isEZ1ML+R/jWAAWMwa/knMugrEZ1v
-Bl9+ZwM0AQCZdf80/wYY4Nve01qSRFv8OmKswLli3TvDv6FKc4cLz8epBF9S
-rEkSCCqGSM49AwEHAgMEAjKnT9b5wY2bf9TpAV3d7OUfPOxKj9c4VzeVzSrH
-AtQgo/MuI1cdYVURicV4i76DNjFhQHQFTk7BrC+C2u1yqQMBCAf+CQMIHImA
-iYfzQtjgQWSFZYUkCFpbbwhNF0ch+3HNaZkaHCnZRIsWsRnc6FCb6lRQyK9+
-Dq59kHlduE5QgY40894jfmP2JdJHU6nBdYrivbEdbMJhBBgTCAAJBQJfUqxJ
-AhsMAAoJEMpwT3+q3+xqUI0BAMykhV08kQ4Ip9Qlbss6Jdufv7YrU0Vd5hou
-b5TmiPd0APoDBh3qIic+aLLUcAuG3+Gt1P1AbUlmqV61ozn1WfHxfw==
-=KLN8
------END PGP PRIVATE KEY BLOCK-----`
-
-const dsaPrivateKeyWithElGamalSubkey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-
-lQOBBF9/MLsRCACeaF6BI0jTgDAs86t8/kXPfwlPvR2MCYzB0BCqAdcq1hV/GTYd
-oNmJRna/ZJfsI/vf+d8Nv+EYOQkPheFS1MJVBitkAXjQPgm8i1tQWen1FCWZxqGk
-/vwZYF4yo8GhZ+Wxi3w09W9Cp9QM/CTmyE1Xe7wpPBGe+oD+me8Zxjyt8JBS4Qx+
-gvWbfHxfHnggh4pz7U8QkItlLsBNQEdX4R5+zwRN66g2ZSX/shaa/EkVnihUhD7r
-njP9I51ORWucTQD6OvgooaNQZCkQ/Se9TzdakwWKS2XSIFXiY/e2E5ZgKI/pfKDU
-iA/KessxddPb7nP/05OIJqg9AoDrD4vmehLzAQD+zsUS3LDU1m9/cG4LMsQbT2VK
-Te4HqbGIAle+eu/asQf8DDJMrbZpiJZvADum9j0TJ0oep6VdMbzo9RSDKvlLKT9m
-kG63H8oDWnCZm1a+HmGq9YIX+JHWmsLXXsFLeEouLzHO+mZo0X28eji3V2T87hyR
-MmUM0wFo4k7jK8uVmkDXv3XwNp2uByWxUKZd7EnWmcEZWqIiexJ7XpCS0Pg3tRaI
-zxve0SRe/dxfUPnTk/9KQ9hS6DWroBKquL182zx1Fggh4LIWWE2zq+UYn8BI0E8A
-rmIDFJdF8ymFQGRrEy6g79NnkPmkrZWsgMRYY65P6v4zLVmqohJKkpm3/Uxa6QAP
-CCoPh/JTOvPeCP2bOJH8z4Z9Py3ouMIjofQW8sXqRgf/RIHbh0KsINHrwwZ4gVIr
-MK3RofpaYxw1ztPIWb4cMWoWZHH1Pxh7ggTGSBpAhKXkiWw2Rxat8QF5aA7e962c
-bLvVv8dqsPrD/RnVJHag89cbPTzjn7gY9elE8EM8ithV3oQkwHTr4avYlpDZsgNd
-hUW3YgRwGo31tdzxoG04AcpV2t+07P8XMPr9hsfWs4rHohXPi38Hseu1Ji+dBoWQ
-3+1w/HH3o55s+jy4Ruaz78AIrjbmAJq+6rA2mIcCgrhw3DnzuwQAKeBvSeqn9zfS
-ZC812osMBVmkycwelpaIh64WZ0vWL3GvdXDctV2kXM+qVpDTLEny0LuiXxrwCKQL
-Ev4HAwK9uQBcreDEEud7pfRb8EYP5lzO2ZA7RaIvje6EWAGBvJGMRT0QQE5SGqc7
-Fw5geigBdt+vVyRuNNhg3c2fdn/OBQaYu0J/8AiOogG8EaM8tCFlbGdhbWFsQGRz
-YS5jb20gPGVsZ2FtYWxAZHNhLmNvbT6IkAQTEQgAOBYhBI+gnfiHQxB35/Dp0XAQ
-aE/rsWC5BQJffzC7AhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEHAQaE/r
-sWC5A4EA/0GcJmyPtN+Klc7b9sVT3JgKTRnB/URxOJfYJofP0hZLAQCkqyMO+adV
-JvbgDH0zaITQWZSSXPqpgMpCA6juTrDsd50CawRffzC7EAgAxFFFSAAEQzWTgKU5
-EBtpxxoPzHqcChawTHRxHxjcELXzmUBS5PzfA1HXSPnNqK/x3Ut5ycC3CsW41Fnt
-Gm3706Wu9VFbFZVn55F9lPiplUo61n5pqMvOr1gmuQsdXiTa0t5FRa4TZ2VSiHFw
-vdAVSPTUsT4ZxJ1rPyFYRtq1n3pQcvdZowd07r0JnzTMjLLMFYCKhwIowoOC4zqJ
-iB8enjwOlpaqBATRm9xpVF7SJkroPF6/B1vdhj7E3c1aJyHlo0PYBAg756sSHWHg
-UuLyUQ4TA0hcCVenn/L/aSY2LnbdZB1EBhlYjA7dTCgwIqsQhfQmPkjz6g64A7+Y
-HbbrLwADBQgAk14QIEQ+J/VHetpQV/jt2pNsFK1kVK7mXK0spTExaC2yj2sXlHjL
-Ie3bO5T/KqmIaBEB5db5fA5xK9cZt79qrQHDKsEqUetUeMUWLBx77zBsus3grIgy
-bwDZKseRzQ715pwxquxQlScGoDIBKEh08HpwHkq140eIj3w+MAIfndaZaSCNaxaP
-Snky7BQmJ7Wc7qrIwoQP6yrnUqyW2yNi81nJYUhxjChqaFSlwzLs/iNGryBKo0ic
-BqVIRjikKHBlwBng6WyrltQo/Vt9GG8w+lqaAVXbJRlaBZJUR+2NKi/YhP3qQse3
-v8fi4kns0gh5LK+2C01RvdX4T49QSExuIf4HAwLJqYIGwadA2uem5v7/765ZtFWV
-oL0iZ0ueTJDby4wTFDpLVzzDi/uVcB0ZRFrGOp7w6OYcNYTtV8n3xmli2Q5Trw0c
-wZVzvg+ABKWiv7faBjMczIFF8y6WZKOIeAQYEQgAIBYhBI+gnfiHQxB35/Dp0XAQ
-aE/rsWC5BQJffzC7AhsMAAoJEHAQaE/rsWC5ZmIA/jhS4r4lClbvjuPWt0Yqdn7R
-fss2SPMYvMrrDh42aE0OAQD8xn4G6CN8UtW9xihXOY6FpxiJ/sMc2VaneeUd34oa
-4g==
-=XZm8
------END PGP PRIVATE KEY BLOCK-----`
-
-// https://tests.sequoia-pgp.org/#Certificate_expiration
-// P _ U p
-const expiringPrimaryUIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv
-/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz
-/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/
-5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3
-X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv
-9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0
-qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb
-SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb
-vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w
-bGU+wsFcBBMBCgCQBYJhesp/BYkEWQPJBQsJCAcCCRD7/MgqAV5zMEcUAAAAAAAe
-ACBzYWx0QG5vdGF0aW9ucy5zZXF1b2lhLXBncC5vcmeEOQlNyTLFkc9I/elp+BpY
-495V7KatqtDmsyDr+zDAdwYVCgkICwIEFgIDAQIXgAIbAwIeARYhBNGmbhojsYLJ
-mA94jPv8yCoBXnMwAABSCQv/av8hKyynMtXVKFuWOGJw0mR8auDm84WdhMFRZg8t
-yTJ1L88+Ny4WUAFeqo2j7DU2yPGrm5rmuvzlEedFYFeOWt+A4adz+oumgRd0nsgG
-Lf3QYUWQhLWVlz+H7zubgKqSB2A2RqV65S7mTTVro42nb2Mng6rvGWiqeKG5nrXN
-/01p1mIBQGR/KnZSqYLzA2Pw2PiJoSkXT26PDz/kiEMXpjKMR6sicV4bKVlEdUvm
-pIImIPBHZq1EsKXEyWtWC41w/pc+FofGE+uSFs2aef1vvEHFkj3BHSK8gRcH3kfR
-eFroTET8C2q9V1AOELWm+Ys6PzGzF72URK1MKXlThuL4t4LjvXWGNA78IKW+/RQH
-DzK4U0jqSO0mL6qxqVS5Ij6jjL6OTrVEGdtDf5n0vI8tcUTBKtVqYAYk+t2YGT05
-ayxALtb7viVKo8f10WEcCuKshn0gdsEFMRZQzJ89uQIY3R3FbsdRCaE6OEaDgKMQ
-UTFROyfhthgzRKbRxfcplMUCzsDNBF2lnPIBDADWML9cbGMrp12CtF9b2P6z9TTT
-74S8iyBOzaSvdGDQY/sUtZXRg21HWamXnn9sSXvIDEINOQ6A9QxdxoqWdCHrOuW3
-ofneYXoG+zeKc4dC86wa1TR2q9vW+RMXSO4uImA+Uzula/6k1DogDf28qhCxMwG/
-i/m9g1c/0aApuDyKdQ1PXsHHNlgd/Dn6rrd5y2AObaifV7wIhEJnvqgFXDN2RXGj
-LeCOHV4Q2WTYPg/S4k1nMXVDwZXrvIsA0YwIMgIT86Rafp1qKlgPNbiIlC1g9RY/
-iFaGN2b4Ir6GDohBQSfZW2+LXoPZuVE/wGlQ01rh827KVZW4lXvqsge+wtnWlszc
-selGATyzqOK9LdHPdZGzROZYI2e8c+paLNDdVPL6vdRBUnkCaEkOtl1mr2JpQi5n
-TU+gTX4IeInC7E+1a9UDF/Y85ybUz8XV8rUnR76UqVC7KidNepdHbZjjXCt8/Zo+
-Tec9JNbYNQB/e9ExmDntmlHEsSEQzFwzj8sxH48AEQEAAcLA9gQYAQoAIBYhBNGm
-bhojsYLJmA94jPv8yCoBXnMwBQJdpZzyAhsMAAoJEPv8yCoBXnMw6f8L/26C34dk
-jBffTzMj5Bdzm8MtF67OYneJ4TQMw7+41IL4rVcSKhIhk/3Ud5knaRtP2ef1+5F6
-6h9/RPQOJ5+tvBwhBAcUWSupKnUrdVaZQanYmtSxcVV2PL9+QEiNN3tzluhaWO//
-rACxJ+K/ZXQlIzwQVTpNhfGzAaMVV9zpf3u0k14itcv6alKY8+rLZvO1wIIeRZLm
-U0tZDD5HtWDvUV7rIFI1WuoLb+KZgbYn3OWjCPHVdTrdZ2CqnZbG3SXw6awH9bzR
-LV9EXkbhIMez0deCVdeo+wFFklh8/5VK2b0vk/+wqMJxfpa1lHvJLobzOP9fvrsw
-sr92MA2+k901WeISR7qEzcI0Fdg8AyFAExaEK6VyjP7SXGLwvfisw34OxuZr3qmx
-1Sufu4toH3XrB7QJN8XyqqbsGxUCBqWif9RSK4xjzRTe56iPeiSJJOIciMP9i2ld
-I+KgLycyeDvGoBj0HCLO3gVaBe4ubVrj5KjhX2PVNEJd3XZRzaXZE2aAMQ==
-=AmgT
------END PGP PUBLIC KEY BLOCK-----`
-
-const rsa2048PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-Comment: gpg (GnuPG) 2.2.27 with libgcrypt 1.9.4
-
-lQPGBGL07P0BCADL0etN8efyAXA6sL2WfQvHe5wEKYXPWeN2+jiqSppfeRZAOlzP
-kZ3U+cloeJriplYvVJwI3ID2aw52Z/TRn8iKRP5eOUFrEgcgl06lazLtOndK7o7p
-oBV5mLtHEirFHm6W61fNt10jzM0jx0PV6nseLhFB2J42F1cmU/aBgFo41wjLSZYr
-owR+v+O9S5sUXblQF6sEDcY01sBEu09zrIgT49VFwQ1Cvdh9XZEOTQBfdiugoj5a
-DS3fAqAka3r1VoQK4eR7/upnYSgSACGeaQ4pUelKku5rpm50gdWTY8ppq0k9e1eT
-y2x0OQcW3hWE+j4os1ca0ZEADMdqr/99MOxrABEBAAH+BwMCJWxU4VOZOJ7/I6vX
-FxdfBhIBEXlJ52FM3S/oYtXqLhkGyrtmZOeEazVvUtuCe3M3ScHI8xCthcmE8E0j
-bi+ZEHPS2NiBZtgHFF27BLn7zZuTc+oD5WKduZdK3463egnyThTqIIMl25WZBuab
-k5ycwYrWwBH0jfA4gwJ13ai4pufKC2RM8qIu6YAVPglYBKFLKGvvJHa5vI+LuA0E
-K+k35hIic7yVUcQneNnAF2598X5yWiieYnOZpmHlRw1zfbMwOJr3ZNj2v94u7b+L
-sTa/1Uv9887Vb6sJp0c2Sh4cwEccoPYkvMqFn3ZrJUr3UdDu1K2vWohPtswzhrYV
-+RdPZE5RLoCQufKvlPezk0Pzhzb3bBU7XjUbdGY1nH/EyQeBNp+Gw6qldKvzcBaB
-cyOK1c6hPSszpJX93m5UxCN55IeifmcNjmbDh8vGCCdajy6d56qV2n4F3k7vt1J1
-0UlxIGhqijJoaTCX66xjLMC6VXkSz6aHQ35rnXosm/cqPcQshsZTdlfSyWkorfdr
-4Hj8viBER26mjYurTMLBKDtUN724ZrR0Ev5jorX9uoKlgl87bDZHty2Ku2S+vR68
-VAvnj6Fi1BYNclnDoqxdRB2z5T9JbWE52HuG83/QsplhEqXxESDxriTyTHMbNxEe
-88soVCDh4tgflZFa2ucUr6gEKJKij7jgahARnyaXfPZlQBUAS1YUeILYmN+VR+M/
-sHENpwDWc7TInn8VN638nJV+ScZGMih3AwWZTIoiLju3MMt1K0YZ3NuiqwGH4Jwg
-/BbEdTWeCci9y3NEQHQ3uZZ5p6j2CwFVlK11idemCMvAiTVxF+gKdaLMkeCwKxru
-J3YzhKEo+iDVYbPYBYizx/EHBn2U5kITQ5SBXzjTaaFMNZJEf9JYsL1ybPB6HOFY
-VNVB2KT8CGVwtCJHb2xhbmcgR29waGVyIDxnb2xhbmdAZXhhbXBsZS5vcmc+iQFO
-BBMBCgA4FiEEC6K7U7f4qesybTnqSkra7gHusm0FAmL07P0CGwMFCwkIBwIGFQoJ
-CAsCBBYCAwECHgECF4AACgkQSkra7gHusm1MvwgAxpClWkeSqIhMQfbiuz0+lOkE
-89y1DCFw8bHjZoUf4/4K8hFA3dGkk+q72XFgiyaCpfXxMt6Gi+dN47t+tTv9NIqC
-sukbaoJBmJDhN6+djmJOgOYy+FWsW2LAk2LOwKYulpnBZdcA5rlMAhBg7gevQpF+
-ruSU69P7UUaFJl/DC7hDmaIcj+4cjBE/HO26SnVQjoTfjZT82rDh1Wsuf8LnkJUk
-b3wezBLpXKjDvdHikdv4gdlR4AputVM38aZntYYglh/EASo5TneyZ7ZscdLNRdcF
-r5O2fKqrOJLOdaoYRFZZWOvP5GtEVFDU7WGivOSVfiszBE0wZR3dgZRJipHCXJ0D
-xgRi9Oz9AQgAtMJcJqLLVANJHl90tWuoizDkm+Imcwq2ubQAjpclnNrODnDK+7o4
-pBsWmXbZSdkC4gY+LhOQA6bPDD0JEHM58DOnrm49BddxXAyK0HPsk4sGGt2SS86B
-OawWNdfJVyqw4bAiHWDmQg4PcjBbt3ocOIxAR6I5kBSiQVxuGQs9T+Zvg3G1r3Or
-fS6DzlgY3HFUML5YsGH4lOxNSOoKAP68GIH/WNdUZ+feiRg9knIib6I3Hgtf5eO8
-JRH7aWE/TD7eNu36bLLjT5TZPq5r6xaD2plbtPOyXbNPWs9qI1yG+VnErfaLY0w8
-Qo0aqzbgID+CTZVomXSOpOcQseaFKw8ZfQARAQAB/gcDArha6+/+d4OY/w9N32K9
-hFNYt4LufTETMQ+k/sBeaMuAVzmT47DlAXzkrZhGW4dZOtXMu1rXaUwHlqkhEyzL
-L4MYEWVXfD+LbZNEK3MEFss6RK+UAMeT/PTV9aA8cXQVPcSJYzfBXHQ1U1hnOgrO
-apn92MN8RmkhX8wJLyeWTMMuP4lXByJMmmGo8WvifeRD2kFY4y0WVBDAXJAV4Ljf
-Di/bBiwoc5a+gxHuZT2W9ZSxBQJNXdt4Un2IlyZuo58s5MLx2N0EaNJ8PwRUE6fM
-RZYO8aZCEPUtINE4njbvsWOMCtrblsMPwZ1B0SiIaWmLaNyGdCNKea+fCIW7kasC
-JYMhnLumpUTXg5HNexkCsl7ABWj0PYBflOE61h8EjWpnQ7JBBVKS2ua4lMjwHRX7
-5o5yxym9k5UZNFdGoXVL7xpizCcdGawxTJvwhs3vBqu1ZWYCegOAZWDrOkCyhUpq
-8uKMROZFbn+FwE+7tjt+v2ed62FVEvD6g4V3ThCA6mQqeOARfJWN8GZY8BDm8lht
-crOXriUkrx+FlrgGtm2CkwjW5/9Xd7AhFpHnQdFeozOHyq1asNSgJF9sNi9Lz94W
-skQSVRi0IExxSXYGI3Y0nnAZUe2BAQflYPJdEveSr3sKlUqXiETTA1VXsTPK3kOC
-92CbLzj/Hz199jZvywwyu53I+GKMpF42rMq7zxr2oa61YWY4YE/GDezwwys/wLx/
-QpCW4X3ppI7wJjCSSqEV0baYZSSli1ayheS6dxi8QnSpX1Bmpz6gU7m/M9Sns+hl
-J7ZvgpjCAiV7KJTjtclr5/S02zP78LTVkoTWoz/6MOTROwaP63VBUXX8pbJhf/vu
-DLmNnDk8joMJxoDXWeNU0EnNl4hP7Z/jExRBOEO4oAnUf/Sf6gCWQhL5qcajtg6w
-tGv7vx3f2IkBNgQYAQoAIBYhBAuiu1O3+KnrMm056kpK2u4B7rJtBQJi9Oz9AhsM
-AAoJEEpK2u4B7rJt6lgIAMBWqP4BCOGnQXBbgJ0+ACVghpkFUXZTb/tXJc8UUvTM
-8uov6k/RsqDGZrvhhufD7Wwt7j9v7dD7VPp7bPyjVWyimglQzWguTUUqLDGlstYH
-5uYv1pzma0ZsAGNqFeGlTLsKOSGKFMH4rB2KfN2n51L8POvtp1y7GKZQbWIWneaB
-cZr3BINU5GMvYYU7pAYcoR+mJPdJx5Up3Ocn+bn8Tu1sy9C/ArtCQucazGnoE9u1
-HhNLrh0CdzzX7TNH6TQ8LwPOvq0K5l/WqbN9lE0WBBhMv2HydxhluO8AhU+A5GqC
-C+wET7nVDnhoOm/fstIeb7/LN7OYejKPeHdFBJEL9GA=
-=u442
------END PGP PRIVATE KEY BLOCK-----`
-
-const curve25519PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-Comment: gpg (GnuPG) 2.2.27 with libgcrypt 1.9.4
-
-lFgEYvTtQBYJKwYBBAHaRw8BAQdAxsNXLbrk5xOjpO24VhOMvQ0/F+JcyIkckMDH
-X3FIGxcAAQDFOlunZWYuPsCx5JLp78vKqUTfgef9TGG4oD6I/Sa0zBMstCJHb2xh
-bmcgR29waGVyIDxnb2xhbmdAZXhhbXBsZS5vcmc+iJAEExYIADgWIQSFQHEOazmo
-h1ldII4MvfnLQ4JBNwUCYvTtQAIbAwULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAK
-CRAMvfnLQ4JBN5yeAQCKdry8B5ScCPrev2+UByMCss7Sdu5RhomCFsHdNPLcKAEA
-8ugei+1owHsV+3cGwWWzKk6sLa8ZN87i3SKuOGp9DQycXQRi9O1AEgorBgEEAZdV
-AQUBAQdA5CubPp8l7lrVQ25h7Hx5XN2C8xanRnnpcjzEooCaEA0DAQgHAAD/Rpc+
-sOZUXrFk9HOWB1XU41LoWbDBoG8sP8RWAVYwD5AQRYh4BBgWCAAgFiEEhUBxDms5
-qIdZXSCODL35y0OCQTcFAmL07UACGwwACgkQDL35y0OCQTcvdwEA7lb5g/YisrEf
-iq660uwMGoepLUfvtqKzuQ6heYe83y0BAN65Ffg5HYOJzUEi0kZQRf7OhdtuL2kJ
-SRXn8DmCTfEB
-=cELM
------END PGP PRIVATE KEY BLOCK-----`
-
-const curve448PrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-Comment: C1DB 65D5 80D7 B922 7254 4B1E A699 9895 FABA CE52
-
-xYUEYV2UmRYDK2VxAc9AFyxgh5xnSbyt50TWl558mw9xdMN+/UBLr5+UMP8IsrvV
-MdXuTIE8CyaUQKSotHtH2RkYEXj5nsMAAAHPQIbTMSzjIWug8UFECzAex5FHgAgH
-gYF3RK+TS8D24wX8kOu2C/NoVxwGY+p+i0JHaB+7yljriSKAGxs6wsBEBB8WCgCD
-BYJhXZSZBYkFpI+9AwsJBwkQppmYlfq6zlJHFAAAAAAAHgAgc2FsdEBub3RhdGlv
-bnMuc2VxdW9pYS1wZ3Aub3Jn5wSpIutJ5HncJWk4ruUV8GzQF390rR5+qWEAnAoY
-akcDFQoIApsBAh4BFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAALzdA5dA/fsgYg/J
-qaQriYKaPUkyHL7EB3BXhV2d1h/gk+qJLvXQuU2WEJ/XSs3GrsBRiiZwvPH4o+7b
-mleAxjy5wpS523vqrrBR2YZ5FwIku7WS4litSdn4AtVam/TlLdMNIf41CtFeZKBe
-c5R5VNdQy8y7qy8AAADNEUN1cnZlNDQ4IE9wdGlvbiA4wsBHBBMWCgCGBYJhXZSZ
-BYkFpI+9AwsJBwkQppmYlfq6zlJHFAAAAAAAHgAgc2FsdEBub3RhdGlvbnMuc2Vx
-dW9pYS1wZ3Aub3JnD55UsYMzE6OACP+mgw5zvT+BBgol8/uFQjHg4krjUCMDFQoI
-ApkBApsBAh4BFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAAPQJA5dA0Xqwzn/0uwCq
-RlsOVCB3f5NOj1exKnlBvRw0xT1VBee1yxvlUt5eIAoCxWoRlWBJob3TTkhm9AEA
-8dyhwPmyGfWHzPw5NFG3xsXrZdNXNvit9WMVAPcmsyR7teXuDlJItxRAdJJc/qfJ
-YVbBFoaNrhYAAADHhQRhXZSZFgMrZXEBz0BL7THZ9MnCLfSPJ1FMLim9eGkQ3Bfn
-M3he5rOwO3t14QI1LjI96OjkeJipMgcFAmEP1Bq/ZHGO7oAAAc9AFnE8iNBaT3OU
-EFtxkmWHXtdaYMmGGRdopw9JPXr/UxuunDln5o9dxPxf7q7z26zXrZen+qed/Isa
-HsDCwSwEGBYKAWsFgmFdlJkFiQWkj70JEKaZmJX6us5SRxQAAAAAAB4AIHNhbHRA
-bm90YXRpb25zLnNlcXVvaWEtcGdwLm9yZxREUizdTcepBzgSMOv2VWQCWbl++3CZ
-EbgAWDryvSsyApsCwDGgBBkWCgBvBYJhXZSZCRBKo3SL4S5djkcUAAAAAAAeACBz
-YWx0QG5vdGF0aW9ucy5zZXF1b2lhLXBncC5vcmemoGTDjmNQiIzw6HOEddvS0OB7
-UZ/P07jM/EVmnYxTlBYhBAxsnkGpx1UCiH6gUUqjdIvhLl2OAAALYQOXQAMB1oKq
-OWxSFmvmgCKNcbAAyA3piF5ERIqs4z07oJvqDYrOWt75UsEIH/04gU/vHc4EmfG2
-JDLJgOLlyTUPkL/08f0ydGZPofFQBhn8HkuFFjnNtJ5oz3GIP4cdWMQFaUw0uvjb
-PM9Tm3ptENGd6Ts1AAAAFiEEwdtl1YDXuSJyVEseppmYlfq6zlIAAGpTA5dATR6i
-U2GrpUcQgpG+JqfAsGmF4yAOhgFxc1UfidFk3nTup3fLgjipkYY170WLRNbyKkVO
-Sodx93GAs58rizO1acDAWiLq3cyEPBFXbyFThbcNPcLl+/77Uk/mgkYrPQFAQWdK
-1kSRm4SizDBK37K8ChAAAADHhwRhXZSZEgMrZW8Bx0DMhzvhQo+OsXeqQ6QVw4sF
-CaexHh6rLohh7TzL3hQSjoJ27fV6JBkIWdn0LfrMlJIDbSv2SLdlgQMBCgkAAcdA
-MO7Dc1myF6Co1fAH+EuP+OxhxP/7V6ljuSCZENDfA49tQkzTta+PniG+pOVB2LHb
-huyaKBkqiaogo8LAOQQYFgoAeAWCYV2UmQWJBaSPvQkQppmYlfq6zlJHFAAAAAAA
-HgAgc2FsdEBub3RhdGlvbnMuc2VxdW9pYS1wZ3Aub3JnEjBMQAmc/2u45u5FQGmB
-QAytjSG2LM3JQN+PPVl5vEkCmwwWIQTB22XVgNe5InJUSx6mmZiV+rrOUgAASdYD
-l0DXEHQ9ykNP2rZP35ET1dmiFagFtTj/hLQcWlg16LqvJNGqOgYXuqTerbiOOt02
-XLCBln+wdewpU4ChEffMUDRBfqfQco/YsMqWV7bHJHAO0eC/DMKCjyU90xdH7R/d
-QgqsfguR1PqPuJxpXV4bSr6CGAAAAA==
-=MSvh
------END PGP PRIVATE KEY BLOCK-----`
-
-const keyWithNotation = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-
-xVgEY9gIshYJKwYBBAHaRw8BAQdAF25fSM8OpFlXZhop4Qpqo5ywGZ4jgWlR
-ppjhIKDthREAAQC+LFpzFcMJYcjxGKzBGHN0Px2jU4d04YSRnFAik+lVVQ6u
-zRdUZXN0IDx0ZXN0QGV4YW1wbGUuY29tPsLACgQQFgoAfAUCY9gIsgQLCQcI
-CRD/utJOCym8pR0UgAAAAAAQAAR0ZXh0QGV4YW1wbGUuY29tdGVzdB8UAAAA
-AAASAARiaW5hcnlAZXhhbXBsZS5jb20AAQIDAxUICgQWAAIBAhkBAhsDAh4B
-FiEEEMCQTUVGKgCX5rDQ/7rSTgspvKUAAPl5AP9Npz90LxzrB97Qr2DrGwfG
-wuYn4FSYwtuPfZHHeoIabwD/QEbvpQJ/NBb9EAZuow4Rirlt1yv19mmnF+j5
-8yUzhQjHXQRj2AiyEgorBgEEAZdVAQUBAQdARXAo30DmKcyUg6co7OUm0RNT
-z9iqFbDBzA8A47JEt1MDAQgHAAD/XKK3lBm0SqMR558HLWdBrNG6NqKuqb5X
-joCML987ZNgRD8J4BBgWCAAqBQJj2AiyCRD/utJOCym8pQIbDBYhBBDAkE1F
-RioAl+aw0P+60k4LKbylAADRxgEAg7UfBDiDPp5LHcW9D+SgFHk6+GyEU4ev
-VppQxdtxPvAA/34snHBX7Twnip1nMt7P4e2hDiw/hwQ7oqioOvc6jMkP
-=Z8YJ
------END PGP PRIVATE KEY BLOCK-----
-`
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go
deleted file mode 100644
index fec41a0e..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (C) 2019 ProtonTech AG
-
-package packet
-
-import "math/bits"
-
-// CipherSuite contains a combination of Cipher and Mode
-type CipherSuite struct {
- // The cipher function
- Cipher CipherFunction
- // The AEAD mode of operation.
- Mode AEADMode
-}
-
-// AEADConfig collects a number of AEAD parameters along with sensible defaults.
-// A nil AEADConfig is valid and results in all default values.
-type AEADConfig struct {
- // The AEAD mode of operation.
- DefaultMode AEADMode
- // Amount of octets in each chunk of data
- ChunkSize uint64
-}
-
-// Mode returns the AEAD mode of operation.
-func (conf *AEADConfig) Mode() AEADMode {
- // If no preference is specified, OCB is used (which is mandatory to implement).
- if conf == nil || conf.DefaultMode == 0 {
- return AEADModeOCB
- }
-
- mode := conf.DefaultMode
- if mode != AEADModeEAX && mode != AEADModeOCB && mode != AEADModeGCM {
- panic("AEAD mode unsupported")
- }
- return mode
-}
-
-// ChunkSizeByte returns the byte indicating the chunk size. The effective
-// chunk size is computed with the formula uint64(1) << (chunkSizeByte + 6)
-// limit to 16 = 4 MiB
-// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2
-func (conf *AEADConfig) ChunkSizeByte() byte {
- if conf == nil || conf.ChunkSize == 0 {
- return 12 // 1 << (12 + 6) == 262144 bytes
- }
-
- chunkSize := conf.ChunkSize
- exponent := bits.Len64(chunkSize) - 1
- switch {
- case exponent < 6:
- exponent = 6
- case exponent > 16:
- exponent = 16
- }
-
- return byte(exponent - 6)
-}
-
-// decodeAEADChunkSize returns the effective chunk size. In 32-bit systems, the
-// maximum returned value is 1 << 30.
-func decodeAEADChunkSize(c byte) int {
- size := uint64(1 << (c + 6))
- if size != uint64(int(size)) {
- return 1 << 30
- }
- return int(size)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go
deleted file mode 100644
index 2d1aeed6..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_crypter.go
+++ /dev/null
@@ -1,273 +0,0 @@
-// Copyright (C) 2019 ProtonTech AG
-
-package packet
-
-import (
- "bytes"
- "crypto/cipher"
- "encoding/binary"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-// aeadCrypter is an AEAD opener/sealer, its configuration, and data for en/decryption.
-type aeadCrypter struct {
- aead cipher.AEAD
- chunkSize int
- initialNonce []byte
- associatedData []byte // Chunk-independent associated data
- chunkIndex []byte // Chunk counter
- packetTag packetType // SEIP packet (v2) or AEAD Encrypted Data packet
- bytesProcessed int // Amount of plaintext bytes encrypted/decrypted
- buffer bytes.Buffer // Buffered bytes across chunks
-}
-
-// computeNonce takes the incremental index and computes an eXclusive OR with
-// the least significant 8 bytes of the receivers' initial nonce (see sec.
-// 5.16.1 and 5.16.2). It returns the resulting nonce.
-func (wo *aeadCrypter) computeNextNonce() (nonce []byte) {
- if wo.packetTag == packetTypeSymmetricallyEncryptedIntegrityProtected {
- return append(wo.initialNonce, wo.chunkIndex...)
- }
-
- nonce = make([]byte, len(wo.initialNonce))
- copy(nonce, wo.initialNonce)
- offset := len(wo.initialNonce) - 8
- for i := 0; i < 8; i++ {
- nonce[i+offset] ^= wo.chunkIndex[i]
- }
- return
-}
-
-// incrementIndex performs an integer increment by 1 of the integer represented by the
-// slice, modifying it accordingly.
-func (wo *aeadCrypter) incrementIndex() error {
- index := wo.chunkIndex
- if len(index) == 0 {
- return errors.AEADError("Index has length 0")
- }
- for i := len(index) - 1; i >= 0; i-- {
- if index[i] < 255 {
- index[i]++
- return nil
- }
- index[i] = 0
- }
- return errors.AEADError("cannot further increment index")
-}
-
-// aeadDecrypter reads and decrypts bytes. It buffers extra decrypted bytes when
-// necessary, similar to aeadEncrypter.
-type aeadDecrypter struct {
- aeadCrypter // Embedded ciphertext opener
- reader io.Reader // 'reader' is a partialLengthReader
- peekedBytes []byte // Used to detect last chunk
- eof bool
-}
-
-// Read decrypts bytes and reads them into dst. It decrypts when necessary and
-// buffers extra decrypted bytes. It returns the number of bytes copied into dst
-// and an error.
-func (ar *aeadDecrypter) Read(dst []byte) (n int, err error) {
- // Return buffered plaintext bytes from previous calls
- if ar.buffer.Len() > 0 {
- return ar.buffer.Read(dst)
- }
-
- // Return EOF if we've previously validated the final tag
- if ar.eof {
- return 0, io.EOF
- }
-
- // Read a chunk
- tagLen := ar.aead.Overhead()
- cipherChunkBuf := new(bytes.Buffer)
- _, errRead := io.CopyN(cipherChunkBuf, ar.reader, int64(ar.chunkSize+tagLen))
- cipherChunk := cipherChunkBuf.Bytes()
- if errRead != nil && errRead != io.EOF {
- return 0, errRead
- }
-
- if len(cipherChunk) > 0 {
- decrypted, errChunk := ar.openChunk(cipherChunk)
- if errChunk != nil {
- return 0, errChunk
- }
-
- // Return decrypted bytes, buffering if necessary
- if len(dst) < len(decrypted) {
- n = copy(dst, decrypted[:len(dst)])
- ar.buffer.Write(decrypted[len(dst):])
- } else {
- n = copy(dst, decrypted)
- }
- }
-
- // Check final authentication tag
- if errRead == io.EOF {
- errChunk := ar.validateFinalTag(ar.peekedBytes)
- if errChunk != nil {
- return n, errChunk
- }
- ar.eof = true // Mark EOF for when we've returned all buffered data
- }
- return
-}
-
-// Close is noOp. The final authentication tag of the stream was already
-// checked in the last Read call. In the future, this function could be used to
-// wipe the reader and peeked, decrypted bytes, if necessary.
-func (ar *aeadDecrypter) Close() (err error) {
- if !ar.eof {
- errChunk := ar.validateFinalTag(ar.peekedBytes)
- if errChunk != nil {
- return errChunk
- }
- }
- return nil
-}
-
-// openChunk decrypts and checks integrity of an encrypted chunk, returning
-// the underlying plaintext and an error. It accesses peeked bytes from next
-// chunk, to identify the last chunk and decrypt/validate accordingly.
-func (ar *aeadDecrypter) openChunk(data []byte) ([]byte, error) {
- tagLen := ar.aead.Overhead()
- // Restore carried bytes from last call
- chunkExtra := append(ar.peekedBytes, data...)
- // 'chunk' contains encrypted bytes, followed by an authentication tag.
- chunk := chunkExtra[:len(chunkExtra)-tagLen]
- ar.peekedBytes = chunkExtra[len(chunkExtra)-tagLen:]
-
- adata := ar.associatedData
- if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted {
- adata = append(ar.associatedData, ar.chunkIndex...)
- }
-
- nonce := ar.computeNextNonce()
- plainChunk, err := ar.aead.Open(nil, nonce, chunk, adata)
- if err != nil {
- return nil, err
- }
- ar.bytesProcessed += len(plainChunk)
- if err = ar.aeadCrypter.incrementIndex(); err != nil {
- return nil, err
- }
- return plainChunk, nil
-}
-
-// Checks the summary tag. It takes into account the total decrypted bytes into
-// the associated data. It returns an error, or nil if the tag is valid.
-func (ar *aeadDecrypter) validateFinalTag(tag []byte) error {
- // Associated: tag, version, cipher, aead, chunk size, ...
- amountBytes := make([]byte, 8)
- binary.BigEndian.PutUint64(amountBytes, uint64(ar.bytesProcessed))
-
- adata := ar.associatedData
- if ar.aeadCrypter.packetTag == packetTypeAEADEncrypted {
- // ... index ...
- adata = append(ar.associatedData, ar.chunkIndex...)
- }
-
- // ... and total number of encrypted octets
- adata = append(adata, amountBytes...)
- nonce := ar.computeNextNonce()
- _, err := ar.aead.Open(nil, nonce, tag, adata)
- if err != nil {
- return err
- }
- return nil
-}
-
-// aeadEncrypter encrypts and writes bytes. It encrypts when necessary according
-// to the AEAD block size, and buffers the extra encrypted bytes for next write.
-type aeadEncrypter struct {
- aeadCrypter // Embedded plaintext sealer
- writer io.WriteCloser // 'writer' is a partialLengthWriter
-}
-
-// Write encrypts and writes bytes. It encrypts when necessary and buffers extra
-// plaintext bytes for next call. When the stream is finished, Close() MUST be
-// called to append the final tag.
-func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) {
- // Append plaintextBytes to existing buffered bytes
- n, err = aw.buffer.Write(plaintextBytes)
- if err != nil {
- return n, err
- }
- // Encrypt and write chunks
- for aw.buffer.Len() >= aw.chunkSize {
- plainChunk := aw.buffer.Next(aw.chunkSize)
- encryptedChunk, err := aw.sealChunk(plainChunk)
- if err != nil {
- return n, err
- }
- _, err = aw.writer.Write(encryptedChunk)
- if err != nil {
- return n, err
- }
- }
- return
-}
-
-// Close encrypts and writes the remaining buffered plaintext if any, appends
-// the final authentication tag, and closes the embedded writer. This function
-// MUST be called at the end of a stream.
-func (aw *aeadEncrypter) Close() (err error) {
- // Encrypt and write a chunk if there's buffered data left, or if we haven't
- // written any chunks yet.
- if aw.buffer.Len() > 0 || aw.bytesProcessed == 0 {
- plainChunk := aw.buffer.Bytes()
- lastEncryptedChunk, err := aw.sealChunk(plainChunk)
- if err != nil {
- return err
- }
- _, err = aw.writer.Write(lastEncryptedChunk)
- if err != nil {
- return err
- }
- }
- // Compute final tag (associated data: packet tag, version, cipher, aead,
- // chunk size...
- adata := aw.associatedData
-
- if aw.aeadCrypter.packetTag == packetTypeAEADEncrypted {
- // ... index ...
- adata = append(aw.associatedData, aw.chunkIndex...)
- }
-
- // ... and total number of encrypted octets
- amountBytes := make([]byte, 8)
- binary.BigEndian.PutUint64(amountBytes, uint64(aw.bytesProcessed))
- adata = append(adata, amountBytes...)
-
- nonce := aw.computeNextNonce()
- finalTag := aw.aead.Seal(nil, nonce, nil, adata)
- _, err = aw.writer.Write(finalTag)
- if err != nil {
- return err
- }
- return aw.writer.Close()
-}
-
-// sealChunk Encrypts and authenticates the given chunk.
-func (aw *aeadEncrypter) sealChunk(data []byte) ([]byte, error) {
- if len(data) > aw.chunkSize {
- return nil, errors.AEADError("chunk exceeds maximum length")
- }
- if aw.associatedData == nil {
- return nil, errors.AEADError("can't seal without headers")
- }
- adata := aw.associatedData
- if aw.aeadCrypter.packetTag == packetTypeAEADEncrypted {
- adata = append(aw.associatedData, aw.chunkIndex...)
- }
-
- nonce := aw.computeNextNonce()
- encrypted := aw.aead.Seal(nil, nonce, data, adata)
- aw.bytesProcessed += len(data)
- if err := aw.aeadCrypter.incrementIndex(); err != nil {
- return nil, err
- }
- return encrypted, nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go
deleted file mode 100644
index 98bd876b..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright (C) 2019 ProtonTech AG
-
-package packet
-
-import (
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
-)
-
-// AEADEncrypted represents an AEAD Encrypted Packet.
-// See https://www.ietf.org/archive/id/draft-koch-openpgp-2015-rfc4880bis-00.html#name-aead-encrypted-data-packet-t
-type AEADEncrypted struct {
- cipher CipherFunction
- mode AEADMode
- chunkSizeByte byte
- Contents io.Reader // Encrypted chunks and tags
- initialNonce []byte // Referred to as IV in RFC4880-bis
-}
-
-// Only currently defined version
-const aeadEncryptedVersion = 1
-
-func (ae *AEADEncrypted) parse(buf io.Reader) error {
- headerData := make([]byte, 4)
- if n, err := io.ReadFull(buf, headerData); n < 4 {
- return errors.AEADError("could not read aead header:" + err.Error())
- }
- // Read initial nonce
- mode := AEADMode(headerData[2])
- nonceLen := mode.IvLength()
-
- // This packet supports only EAX and OCB
- // https://www.ietf.org/archive/id/draft-koch-openpgp-2015-rfc4880bis-00.html#name-aead-encrypted-data-packet-t
- if nonceLen == 0 || mode > AEADModeOCB {
- return errors.AEADError("unknown mode")
- }
-
- initialNonce := make([]byte, nonceLen)
- if n, err := io.ReadFull(buf, initialNonce); n < nonceLen {
- return errors.AEADError("could not read aead nonce:" + err.Error())
- }
- ae.Contents = buf
- ae.initialNonce = initialNonce
- c := headerData[1]
- if _, ok := algorithm.CipherById[c]; !ok {
- return errors.UnsupportedError("unknown cipher: " + string(c))
- }
- ae.cipher = CipherFunction(c)
- ae.mode = mode
- ae.chunkSizeByte = headerData[3]
- return nil
-}
-
-// Decrypt returns a io.ReadCloser from which decrypted bytes can be read, or
-// an error.
-func (ae *AEADEncrypted) Decrypt(ciph CipherFunction, key []byte) (io.ReadCloser, error) {
- return ae.decrypt(key)
-}
-
-// decrypt prepares an aeadCrypter and returns a ReadCloser from which
-// decrypted bytes can be read (see aeadDecrypter.Read()).
-func (ae *AEADEncrypted) decrypt(key []byte) (io.ReadCloser, error) {
- blockCipher := ae.cipher.new(key)
- aead := ae.mode.new(blockCipher)
- // Carry the first tagLen bytes
- tagLen := ae.mode.TagLength()
- peekedBytes := make([]byte, tagLen)
- n, err := io.ReadFull(ae.Contents, peekedBytes)
- if n < tagLen || (err != nil && err != io.EOF) {
- return nil, errors.AEADError("Not enough data to decrypt:" + err.Error())
- }
- chunkSize := decodeAEADChunkSize(ae.chunkSizeByte)
- return &aeadDecrypter{
- aeadCrypter: aeadCrypter{
- aead: aead,
- chunkSize: chunkSize,
- initialNonce: ae.initialNonce,
- associatedData: ae.associatedData(),
- chunkIndex: make([]byte, 8),
- packetTag: packetTypeAEADEncrypted,
- },
- reader: ae.Contents,
- peekedBytes: peekedBytes}, nil
-}
-
-// associatedData for chunks: tag, version, cipher, mode, chunk size byte
-func (ae *AEADEncrypted) associatedData() []byte {
- return []byte{
- 0xD4,
- aeadEncryptedVersion,
- byte(ae.cipher),
- byte(ae.mode),
- ae.chunkSizeByte}
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go
deleted file mode 100644
index 334de286..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "compress/bzip2"
- "compress/flate"
- "compress/zlib"
- "io"
- "io/ioutil"
- "strconv"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-// Compressed represents a compressed OpenPGP packet. The decompressed contents
-// will contain more OpenPGP packets. See RFC 4880, section 5.6.
-type Compressed struct {
- Body io.Reader
-}
-
-const (
- NoCompression = flate.NoCompression
- BestSpeed = flate.BestSpeed
- BestCompression = flate.BestCompression
- DefaultCompression = flate.DefaultCompression
-)
-
-// CompressionConfig contains compressor configuration settings.
-type CompressionConfig struct {
- // Level is the compression level to use. It must be set to
- // between -1 and 9, with -1 causing the compressor to use the
- // default compression level, 0 causing the compressor to use
- // no compression and 1 to 9 representing increasing (better,
- // slower) compression levels. If Level is less than -1 or
- // more then 9, a non-nil error will be returned during
- // encryption. See the constants above for convenient common
- // settings for Level.
- Level int
-}
-
-// decompressionReader ensures that the whole compression packet is read.
-type decompressionReader struct {
- compressed io.Reader
- decompressed io.ReadCloser
- readAll bool
-}
-
-func newDecompressionReader(r io.Reader, decompressor io.ReadCloser) *decompressionReader {
- return &decompressionReader{
- compressed: r,
- decompressed: decompressor,
- }
-}
-
-func (dr *decompressionReader) Read(data []byte) (n int, err error) {
- if dr.readAll {
- return 0, io.EOF
- }
- n, err = dr.decompressed.Read(data)
- if err == io.EOF {
- dr.readAll = true
- // Close the decompressor.
- if errDec := dr.decompressed.Close(); errDec != nil {
- return n, errDec
- }
- // Consume all remaining data from the compressed packet.
- consumeAll(dr.compressed)
- }
- return n, err
-}
-
-func (c *Compressed) parse(r io.Reader) error {
- var buf [1]byte
- _, err := readFull(r, buf[:])
- if err != nil {
- return err
- }
-
- switch buf[0] {
- case 0:
- c.Body = r
- case 1:
- c.Body = newDecompressionReader(r, flate.NewReader(r))
- case 2:
- decompressor, err := zlib.NewReader(r)
- if err != nil {
- return err
- }
- c.Body = newDecompressionReader(r, decompressor)
- case 3:
- c.Body = newDecompressionReader(r, ioutil.NopCloser(bzip2.NewReader(r)))
- default:
- err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0])))
- }
-
- return err
-}
-
-// compressedWriterCloser represents the serialized compression stream
-// header and the compressor. Its Close() method ensures that both the
-// compressor and serialized stream header are closed. Its Write()
-// method writes to the compressor.
-type compressedWriteCloser struct {
- sh io.Closer // Stream Header
- c io.WriteCloser // Compressor
-}
-
-func (cwc compressedWriteCloser) Write(p []byte) (int, error) {
- return cwc.c.Write(p)
-}
-
-func (cwc compressedWriteCloser) Close() (err error) {
- err = cwc.c.Close()
- if err != nil {
- return err
- }
-
- return cwc.sh.Close()
-}
-
-// SerializeCompressed serializes a compressed data packet to w and
-// returns a WriteCloser to which the literal data packets themselves
-// can be written and which MUST be closed on completion. If cc is
-// nil, sensible defaults will be used to configure the compression
-// algorithm.
-func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) {
- compressed, err := serializeStreamHeader(w, packetTypeCompressed)
- if err != nil {
- return
- }
-
- _, err = compressed.Write([]byte{uint8(algo)})
- if err != nil {
- return
- }
-
- level := DefaultCompression
- if cc != nil {
- level = cc.Level
- }
-
- var compressor io.WriteCloser
- switch algo {
- case CompressionZIP:
- compressor, err = flate.NewWriter(compressed, level)
- case CompressionZLIB:
- compressor, err = zlib.NewWriterLevel(compressed, level)
- default:
- s := strconv.Itoa(int(algo))
- err = errors.UnsupportedError("Unsupported compression algorithm: " + s)
- }
- if err != nil {
- return
- }
-
- literaldata = compressedWriteCloser{compressed, compressor}
-
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go
deleted file mode 100644
index 181d5d34..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go
+++ /dev/null
@@ -1,352 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "crypto"
- "crypto/rand"
- "io"
- "math/big"
- "time"
-
- "github.com/ProtonMail/go-crypto/openpgp/s2k"
-)
-
-var (
- defaultRejectPublicKeyAlgorithms = map[PublicKeyAlgorithm]bool{
- PubKeyAlgoElGamal: true,
- PubKeyAlgoDSA: true,
- }
- defaultRejectMessageHashAlgorithms = map[crypto.Hash]bool{
- crypto.SHA1: true,
- crypto.MD5: true,
- crypto.RIPEMD160: true,
- }
- defaultRejectCurves = map[Curve]bool{
- CurveSecP256k1: true,
- }
-)
-
-// Config collects a number of parameters along with sensible defaults.
-// A nil *Config is valid and results in all default values.
-type Config struct {
- // Rand provides the source of entropy.
- // If nil, the crypto/rand Reader is used.
- Rand io.Reader
- // DefaultHash is the default hash function to be used.
- // If zero, SHA-256 is used.
- DefaultHash crypto.Hash
- // DefaultCipher is the cipher to be used.
- // If zero, AES-128 is used.
- DefaultCipher CipherFunction
- // Time returns the current time as the number of seconds since the
- // epoch. If Time is nil, time.Now is used.
- Time func() time.Time
- // DefaultCompressionAlgo is the compression algorithm to be
- // applied to the plaintext before encryption. If zero, no
- // compression is done.
- DefaultCompressionAlgo CompressionAlgo
- // CompressionConfig configures the compression settings.
- CompressionConfig *CompressionConfig
- // S2K (String to Key) config, used for key derivation in the context of secret key encryption
- // and password-encrypted data.
- // If nil, the default configuration is used
- S2KConfig *s2k.Config
- // Iteration count for Iterated S2K (String to Key).
- // Only used if sk2.Mode is nil.
- // This value is duplicated here from s2k.Config for backwards compatibility.
- // It determines the strength of the passphrase stretching when
- // the said passphrase is hashed to produce a key. S2KCount
- // should be between 65536 and 65011712, inclusive. If Config
- // is nil or S2KCount is 0, the value 16777216 used. Not all
- // values in the above range can be represented. S2KCount will
- // be rounded up to the next representable value if it cannot
- // be encoded exactly. When set, it is strongly encrouraged to
- // use a value that is at least 65536. See RFC 4880 Section
- // 3.7.1.3.
- //
- // Deprecated: SK2Count should be configured in S2KConfig instead.
- S2KCount int
- // RSABits is the number of bits in new RSA keys made with NewEntity.
- // If zero, then 2048 bit keys are created.
- RSABits int
- // The public key algorithm to use - will always create a signing primary
- // key and encryption subkey.
- Algorithm PublicKeyAlgorithm
- // Some known primes that are optionally prepopulated by the caller
- RSAPrimes []*big.Int
- // Curve configures the desired packet.Curve if the Algorithm is PubKeyAlgoECDSA,
- // PubKeyAlgoEdDSA, or PubKeyAlgoECDH. If empty Curve25519 is used.
- Curve Curve
- // AEADConfig configures the use of the new AEAD Encrypted Data Packet,
- // defined in the draft of the next version of the OpenPGP specification.
- // If a non-nil AEADConfig is passed, usage of this packet is enabled. By
- // default, it is disabled. See the documentation of AEADConfig for more
- // configuration options related to AEAD.
- // **Note: using this option may break compatibility with other OpenPGP
- // implementations, as well as future versions of this library.**
- AEADConfig *AEADConfig
- // V6Keys configures version 6 key generation. If false, this package still
- // supports version 6 keys, but produces version 4 keys.
- V6Keys bool
- // Minimum RSA key size allowed for key generation and message signing, verification and encryption.
- MinRSABits uint16
- // Reject insecure algorithms, only works with v2 api
- RejectPublicKeyAlgorithms map[PublicKeyAlgorithm]bool
- RejectMessageHashAlgorithms map[crypto.Hash]bool
- RejectCurves map[Curve]bool
- // "The validity period of the key. This is the number of seconds after
- // the key creation time that the key expires. If this is not present
- // or has a value of zero, the key never expires. This is found only on
- // a self-signature.""
- // https://tools.ietf.org/html/rfc4880#section-5.2.3.6
- KeyLifetimeSecs uint32
- // "The validity period of the signature. This is the number of seconds
- // after the signature creation time that the signature expires. If
- // this is not present or has a value of zero, it never expires."
- // https://tools.ietf.org/html/rfc4880#section-5.2.3.10
- SigLifetimeSecs uint32
- // SigningKeyId is used to specify the signing key to use (by Key ID).
- // By default, the signing key is selected automatically, preferring
- // signing subkeys if available.
- SigningKeyId uint64
- // SigningIdentity is used to specify a user ID (packet Signer's User ID, type 28)
- // when producing a generic certification signature onto an existing user ID.
- // The identity must be present in the signer Entity.
- SigningIdentity string
- // InsecureAllowUnauthenticatedMessages controls, whether it is tolerated to read
- // encrypted messages without Modification Detection Code (MDC).
- // MDC is mandated by the IETF OpenPGP Crypto Refresh draft and has long been implemented
- // in most OpenPGP implementations. Messages without MDC are considered unnecessarily
- // insecure and should be prevented whenever possible.
- // In case one needs to deal with messages from very old OpenPGP implementations, there
- // might be no other way than to tolerate the missing MDC. Setting this flag, allows this
- // mode of operation. It should be considered a measure of last resort.
- InsecureAllowUnauthenticatedMessages bool
- // KnownNotations is a map of Notation Data names to bools, which controls
- // the notation names that are allowed to be present in critical Notation Data
- // signature subpackets.
- KnownNotations map[string]bool
- // SignatureNotations is a list of Notations to be added to any signatures.
- SignatureNotations []*Notation
- // CheckIntendedRecipients controls, whether the OpenPGP Intended Recipient Fingerprint feature
- // should be enabled for encryption and decryption.
- // (See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-12.html#name-intended-recipient-fingerpr).
- // When the flag is set, encryption produces Intended Recipient Fingerprint signature sub-packets and decryption
- // checks whether the key it was encrypted to is one of the included fingerprints in the signature.
- // If the flag is disabled, no Intended Recipient Fingerprint sub-packets are created or checked.
- // The default behavior, when the config or flag is nil, is to enable the feature.
- CheckIntendedRecipients *bool
- // CacheSessionKey controls if decryption should return the session key used for decryption.
- // If the flag is set, the session key is cached in the message details struct.
- CacheSessionKey bool
- // CheckPacketSequence is a flag that controls if the pgp message reader should strictly check
- // that the packet sequence conforms with the grammar mandated by rfc4880.
- // The default behavior, when the config or flag is nil, is to check the packet sequence.
- CheckPacketSequence *bool
-}
-
-func (c *Config) Random() io.Reader {
- if c == nil || c.Rand == nil {
- return rand.Reader
- }
- return c.Rand
-}
-
-func (c *Config) Hash() crypto.Hash {
- if c == nil || uint(c.DefaultHash) == 0 {
- return crypto.SHA256
- }
- return c.DefaultHash
-}
-
-func (c *Config) Cipher() CipherFunction {
- if c == nil || uint8(c.DefaultCipher) == 0 {
- return CipherAES128
- }
- return c.DefaultCipher
-}
-
-func (c *Config) Now() time.Time {
- if c == nil || c.Time == nil {
- return time.Now().Truncate(time.Second)
- }
- return c.Time().Truncate(time.Second)
-}
-
-// KeyLifetime returns the validity period of the key.
-func (c *Config) KeyLifetime() uint32 {
- if c == nil {
- return 0
- }
- return c.KeyLifetimeSecs
-}
-
-// SigLifetime returns the validity period of the signature.
-func (c *Config) SigLifetime() uint32 {
- if c == nil {
- return 0
- }
- return c.SigLifetimeSecs
-}
-
-func (c *Config) Compression() CompressionAlgo {
- if c == nil {
- return CompressionNone
- }
- return c.DefaultCompressionAlgo
-}
-
-func (c *Config) RSAModulusBits() int {
- if c == nil || c.RSABits == 0 {
- return 2048
- }
- return c.RSABits
-}
-
-func (c *Config) PublicKeyAlgorithm() PublicKeyAlgorithm {
- if c == nil || c.Algorithm == 0 {
- return PubKeyAlgoRSA
- }
- return c.Algorithm
-}
-
-func (c *Config) CurveName() Curve {
- if c == nil || c.Curve == "" {
- return Curve25519
- }
- return c.Curve
-}
-
-// Deprecated: The hash iterations should now be queried via the S2K() method.
-func (c *Config) PasswordHashIterations() int {
- if c == nil || c.S2KCount == 0 {
- return 0
- }
- return c.S2KCount
-}
-
-func (c *Config) S2K() *s2k.Config {
- if c == nil {
- return nil
- }
- // for backwards compatibility
- if c != nil && c.S2KCount > 0 && c.S2KConfig == nil {
- return &s2k.Config{
- S2KCount: c.S2KCount,
- }
- }
- return c.S2KConfig
-}
-
-func (c *Config) AEAD() *AEADConfig {
- if c == nil {
- return nil
- }
- return c.AEADConfig
-}
-
-func (c *Config) SigningKey() uint64 {
- if c == nil {
- return 0
- }
- return c.SigningKeyId
-}
-
-func (c *Config) SigningUserId() string {
- if c == nil {
- return ""
- }
- return c.SigningIdentity
-}
-
-func (c *Config) AllowUnauthenticatedMessages() bool {
- if c == nil {
- return false
- }
- return c.InsecureAllowUnauthenticatedMessages
-}
-
-func (c *Config) KnownNotation(notationName string) bool {
- if c == nil {
- return false
- }
- return c.KnownNotations[notationName]
-}
-
-func (c *Config) Notations() []*Notation {
- if c == nil {
- return nil
- }
- return c.SignatureNotations
-}
-
-func (c *Config) V6() bool {
- if c == nil {
- return false
- }
- return c.V6Keys
-}
-
-func (c *Config) IntendedRecipients() bool {
- if c == nil || c.CheckIntendedRecipients == nil {
- return true
- }
- return *c.CheckIntendedRecipients
-}
-
-func (c *Config) RetrieveSessionKey() bool {
- if c == nil {
- return false
- }
- return c.CacheSessionKey
-}
-
-func (c *Config) MinimumRSABits() uint16 {
- if c == nil || c.MinRSABits == 0 {
- return 2047
- }
- return c.MinRSABits
-}
-
-func (c *Config) RejectPublicKeyAlgorithm(alg PublicKeyAlgorithm) bool {
- var rejectedAlgorithms map[PublicKeyAlgorithm]bool
- if c == nil || c.RejectPublicKeyAlgorithms == nil {
- // Default
- rejectedAlgorithms = defaultRejectPublicKeyAlgorithms
- } else {
- rejectedAlgorithms = c.RejectPublicKeyAlgorithms
- }
- return rejectedAlgorithms[alg]
-}
-
-func (c *Config) RejectMessageHashAlgorithm(hash crypto.Hash) bool {
- var rejectedAlgorithms map[crypto.Hash]bool
- if c == nil || c.RejectMessageHashAlgorithms == nil {
- // Default
- rejectedAlgorithms = defaultRejectMessageHashAlgorithms
- } else {
- rejectedAlgorithms = c.RejectMessageHashAlgorithms
- }
- return rejectedAlgorithms[hash]
-}
-
-func (c *Config) RejectCurve(curve Curve) bool {
- var rejectedCurve map[Curve]bool
- if c == nil || c.RejectCurves == nil {
- // Default
- rejectedCurve = defaultRejectCurves
- } else {
- rejectedCurve = c.RejectCurves
- }
- return rejectedCurve[curve]
-}
-
-func (c *Config) StrictPacketSequence() bool {
- if c == nil || c.CheckPacketSequence == nil {
- return true
- }
- return *c.CheckPacketSequence
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go
deleted file mode 100644
index 64d898f7..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go
+++ /dev/null
@@ -1,659 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "crypto"
- "crypto/rsa"
- "encoding/binary"
- "encoding/hex"
- "io"
- "math/big"
- "strconv"
-
- "github.com/ProtonMail/go-crypto/openpgp/ecdh"
- "github.com/ProtonMail/go-crypto/openpgp/elgamal"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
- "github.com/ProtonMail/go-crypto/openpgp/internal/encoding"
- "github.com/ProtonMail/go-crypto/openpgp/symmetric"
- "github.com/ProtonMail/go-crypto/openpgp/x25519"
- "github.com/ProtonMail/go-crypto/openpgp/x448"
-)
-
-// EncryptedKey represents a public-key encrypted session key. See RFC 4880,
-// section 5.1.
-type EncryptedKey struct {
- Version int
- KeyId uint64
- KeyVersion int // v6
- KeyFingerprint []byte // v6
- Algo PublicKeyAlgorithm
- CipherFunc CipherFunction // only valid after a successful Decrypt for a v3 packet
- Key []byte // only valid after a successful Decrypt
-
- encryptedMPI1, encryptedMPI2 encoding.Field
- ephemeralPublicX25519 *x25519.PublicKey // used for x25519
- ephemeralPublicX448 *x448.PublicKey // used for x448
- encryptedSession []byte // used for x25519 and x448
-
- nonce []byte
- aeadMode algorithm.AEADMode
-}
-
-func (e *EncryptedKey) parse(r io.Reader) (err error) {
- var buf [8]byte
- _, err = readFull(r, buf[:versionSize])
- if err != nil {
- return
- }
- e.Version = int(buf[0])
- if e.Version != 3 && e.Version != 6 {
- return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0])))
- }
- if e.Version == 6 {
- //Read a one-octet size of the following two fields.
- if _, err = readFull(r, buf[:1]); err != nil {
- return
- }
- // The size may also be zero, and the key version and
- // fingerprint omitted for an "anonymous recipient"
- if buf[0] != 0 {
- // non-anonymous case
- _, err = readFull(r, buf[:versionSize])
- if err != nil {
- return
- }
- e.KeyVersion = int(buf[0])
- if e.KeyVersion != 4 && e.KeyVersion != 6 {
- return errors.UnsupportedError("unknown public key version " + strconv.Itoa(e.KeyVersion))
- }
- var fingerprint []byte
- if e.KeyVersion == 6 {
- fingerprint = make([]byte, fingerprintSizeV6)
- } else if e.KeyVersion == 4 {
- fingerprint = make([]byte, fingerprintSize)
- }
- _, err = readFull(r, fingerprint)
- if err != nil {
- return
- }
- e.KeyFingerprint = fingerprint
- if e.KeyVersion == 6 {
- e.KeyId = binary.BigEndian.Uint64(e.KeyFingerprint[:keyIdSize])
- } else if e.KeyVersion == 4 {
- e.KeyId = binary.BigEndian.Uint64(e.KeyFingerprint[fingerprintSize-keyIdSize : fingerprintSize])
- }
- }
- } else {
- _, err = readFull(r, buf[:8])
- if err != nil {
- return
- }
- e.KeyId = binary.BigEndian.Uint64(buf[:keyIdSize])
- }
-
- _, err = readFull(r, buf[:1])
- if err != nil {
- return
- }
- e.Algo = PublicKeyAlgorithm(buf[0])
- var cipherFunction byte
- switch e.Algo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
- e.encryptedMPI1 = new(encoding.MPI)
- if _, err = e.encryptedMPI1.ReadFrom(r); err != nil {
- return
- }
- case PubKeyAlgoElGamal:
- e.encryptedMPI1 = new(encoding.MPI)
- if _, err = e.encryptedMPI1.ReadFrom(r); err != nil {
- return
- }
-
- e.encryptedMPI2 = new(encoding.MPI)
- if _, err = e.encryptedMPI2.ReadFrom(r); err != nil {
- return
- }
- case PubKeyAlgoECDH:
- e.encryptedMPI1 = new(encoding.MPI)
- if _, err = e.encryptedMPI1.ReadFrom(r); err != nil {
- return
- }
-
- e.encryptedMPI2 = new(encoding.OID)
- if _, err = e.encryptedMPI2.ReadFrom(r); err != nil {
- return
- }
- case PubKeyAlgoX25519:
- e.ephemeralPublicX25519, e.encryptedSession, cipherFunction, err = x25519.DecodeFields(r, e.Version == 6)
- if err != nil {
- return
- }
- case PubKeyAlgoX448:
- e.ephemeralPublicX448, e.encryptedSession, cipherFunction, err = x448.DecodeFields(r, e.Version == 6)
- if err != nil {
- return
- }
- case ExperimentalPubKeyAlgoAEAD:
- var aeadMode [1]byte
- if _, err = readFull(r, aeadMode[:]); err != nil {
- return
- }
- e.aeadMode = algorithm.AEADMode(aeadMode[0])
- nonceLength := e.aeadMode.NonceLength()
- e.nonce = make([]byte, nonceLength)
- if _, err = readFull(r, e.nonce); err != nil {
- return
- }
- e.encryptedMPI1 = new(encoding.ShortByteString)
- if _, err = e.encryptedMPI1.ReadFrom(r); err != nil {
- return
- }
- }
- if e.Version < 6 {
- switch e.Algo {
- case PubKeyAlgoX25519, PubKeyAlgoX448:
- e.CipherFunc = CipherFunction(cipherFunction)
- // Check for validiy is in the Decrypt method
- }
- }
-
- _, err = consumeAll(r)
- return
-}
-
-// Decrypt decrypts an encrypted session key with the given private key. The
-// private key must have been decrypted first.
-// If config is nil, sensible defaults will be used.
-func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error {
- if e.Version < 6 && e.KeyId != 0 && e.KeyId != priv.KeyId {
- return errors.InvalidArgumentError("cannot decrypt encrypted session key for key id " + strconv.FormatUint(e.KeyId, 16) + " with private key id " + strconv.FormatUint(priv.KeyId, 16))
- }
- if e.Version == 6 && e.KeyVersion != 0 && !bytes.Equal(e.KeyFingerprint, priv.Fingerprint) {
- return errors.InvalidArgumentError("cannot decrypt encrypted session key for key fingerprint " + hex.EncodeToString(e.KeyFingerprint) + " with private key fingerprint " + hex.EncodeToString(priv.Fingerprint))
- }
- if e.Algo != priv.PubKeyAlgo {
- return errors.InvalidArgumentError("cannot decrypt encrypted session key of type " + strconv.Itoa(int(e.Algo)) + " with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo)))
- }
- if priv.Dummy() {
- return errors.ErrDummyPrivateKey("dummy key found")
- }
-
- var err error
- var b []byte
-
- // TODO(agl): use session key decryption routines here to avoid
- // padding oracle attacks.
- switch priv.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
- // Supports both *rsa.PrivateKey and crypto.Decrypter
- k := priv.PrivateKey.(crypto.Decrypter)
- b, err = k.Decrypt(config.Random(), padToKeySize(k.Public().(*rsa.PublicKey), e.encryptedMPI1.Bytes()), nil)
- case PubKeyAlgoElGamal:
- c1 := new(big.Int).SetBytes(e.encryptedMPI1.Bytes())
- c2 := new(big.Int).SetBytes(e.encryptedMPI2.Bytes())
- b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2)
- case PubKeyAlgoECDH:
- vsG := e.encryptedMPI1.Bytes()
- m := e.encryptedMPI2.Bytes()
- oid := priv.PublicKey.oid.EncodedBytes()
- b, err = ecdh.Decrypt(priv.PrivateKey.(*ecdh.PrivateKey), vsG, m, oid, priv.PublicKey.Fingerprint[:])
- case PubKeyAlgoX25519:
- b, err = x25519.Decrypt(priv.PrivateKey.(*x25519.PrivateKey), e.ephemeralPublicX25519, e.encryptedSession)
- case PubKeyAlgoX448:
- b, err = x448.Decrypt(priv.PrivateKey.(*x448.PrivateKey), e.ephemeralPublicX448, e.encryptedSession)
- case ExperimentalPubKeyAlgoAEAD:
- priv := priv.PrivateKey.(*symmetric.AEADPrivateKey)
- b, err = priv.Decrypt(e.nonce, e.encryptedMPI1.Bytes(), e.aeadMode)
- default:
- err = errors.InvalidArgumentError("cannot decrypt encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo)))
- }
- if err != nil {
- return err
- }
-
- var key []byte
- switch priv.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH, ExperimentalPubKeyAlgoAEAD:
- keyOffset := 0
- if e.Version < 6 {
- e.CipherFunc = CipherFunction(b[0])
- keyOffset = 1
- if !e.CipherFunc.IsSupported() {
- return errors.UnsupportedError("unsupported encryption function")
- }
- }
- key, err = decodeChecksumKey(b[keyOffset:])
- if err != nil {
- return err
- }
- case PubKeyAlgoX25519, PubKeyAlgoX448:
- if e.Version < 6 {
- switch e.CipherFunc {
- case CipherAES128, CipherAES192, CipherAES256:
- break
- default:
- return errors.StructuralError("v3 PKESK mandates AES as cipher function for x25519 and x448")
- }
- }
- key = b[:]
- default:
- return errors.UnsupportedError("unsupported algorithm for decryption")
- }
- e.Key = key
- return nil
-}
-
-// Serialize writes the encrypted key packet, e, to w.
-func (e *EncryptedKey) Serialize(w io.Writer) error {
- var encodedLength int
- switch e.Algo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
- encodedLength = int(e.encryptedMPI1.EncodedLength())
- case PubKeyAlgoElGamal:
- encodedLength = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength())
- case PubKeyAlgoECDH:
- encodedLength = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength())
- case PubKeyAlgoX25519:
- encodedLength = x25519.EncodedFieldsLength(e.encryptedSession, e.Version == 6)
- case PubKeyAlgoX448:
- encodedLength = x448.EncodedFieldsLength(e.encryptedSession, e.Version == 6)
- default:
- return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo)))
- }
-
- packetLen := versionSize /* version */ + keyIdSize /* key id */ + algorithmSize /* algo */ + encodedLength
- if e.Version == 6 {
- packetLen = versionSize /* version */ + algorithmSize /* algo */ + encodedLength + keyVersionSize /* key version */
- if e.KeyVersion == 6 {
- packetLen += fingerprintSizeV6
- } else if e.KeyVersion == 4 {
- packetLen += fingerprintSize
- }
- }
-
- err := serializeHeader(w, packetTypeEncryptedKey, packetLen)
- if err != nil {
- return err
- }
-
- _, err = w.Write([]byte{byte(e.Version)})
- if err != nil {
- return err
- }
- if e.Version == 6 {
- _, err = w.Write([]byte{byte(e.KeyVersion)})
- if err != nil {
- return err
- }
- // The key version number may also be zero,
- // and the fingerprint omitted
- if e.KeyVersion != 0 {
- _, err = w.Write(e.KeyFingerprint)
- if err != nil {
- return err
- }
- }
- } else {
- // Write KeyID
- err = binary.Write(w, binary.BigEndian, e.KeyId)
- if err != nil {
- return err
- }
- }
- _, err = w.Write([]byte{byte(e.Algo)})
- if err != nil {
- return err
- }
-
- switch e.Algo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
- _, err := w.Write(e.encryptedMPI1.EncodedBytes())
- return err
- case PubKeyAlgoElGamal:
- if _, err := w.Write(e.encryptedMPI1.EncodedBytes()); err != nil {
- return err
- }
- _, err := w.Write(e.encryptedMPI2.EncodedBytes())
- return err
- case PubKeyAlgoECDH:
- if _, err := w.Write(e.encryptedMPI1.EncodedBytes()); err != nil {
- return err
- }
- _, err := w.Write(e.encryptedMPI2.EncodedBytes())
- return err
- case PubKeyAlgoX25519:
- err := x25519.EncodeFields(w, e.ephemeralPublicX25519, e.encryptedSession, byte(e.CipherFunc), e.Version == 6)
- return err
- case PubKeyAlgoX448:
- err := x448.EncodeFields(w, e.ephemeralPublicX448, e.encryptedSession, byte(e.CipherFunc), e.Version == 6)
- return err
- default:
- panic("internal error")
- }
-}
-
-// SerializeEncryptedKeyAEAD serializes an encrypted key packet to w that contains
-// key, encrypted to pub.
-// If aeadSupported is set, PKESK v6 is used else v4.
-// If config is nil, sensible defaults will be used.
-func SerializeEncryptedKeyAEAD(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, aeadSupported bool, key []byte, config *Config) error {
- return SerializeEncryptedKeyAEADwithHiddenOption(w, pub, cipherFunc, aeadSupported, key, false, config)
-}
-
-// SerializeEncryptedKeyAEADwithHiddenOption serializes an encrypted key packet to w that contains
-// key, encrypted to pub.
-// Offers the hidden flag option to indicated if the PKESK packet should include a wildcard KeyID.
-// If aeadSupported is set, PKESK v6 is used else v4.
-// If config is nil, sensible defaults will be used.
-func SerializeEncryptedKeyAEADwithHiddenOption(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, aeadSupported bool, key []byte, hidden bool, config *Config) error {
- var buf [36]byte // max possible header size is v6
- lenHeaderWritten := versionSize
- version := 3
-
- if aeadSupported {
- version = 6
- }
- // An implementation MUST NOT generate ElGamal v6 PKESKs.
- if version == 6 && pub.PubKeyAlgo == PubKeyAlgoElGamal {
- return errors.InvalidArgumentError("ElGamal v6 PKESK are not allowed")
- }
- // In v3 PKESKs, for x25519 and x448, mandate using AES
- if version == 3 && (pub.PubKeyAlgo == PubKeyAlgoX25519 || pub.PubKeyAlgo == PubKeyAlgoX448) {
- switch cipherFunc {
- case CipherAES128, CipherAES192, CipherAES256:
- break
- default:
- return errors.InvalidArgumentError("v3 PKESK mandates AES for x25519 and x448")
- }
- }
-
- buf[0] = byte(version)
-
- // If hidden is set, the key should be hidden
- // An implementation MAY accept or use a Key ID of all zeros,
- // or a key version of zero and no key fingerprint, to hide the intended decryption key.
- // See Section 5.1.8. in the open pgp crypto refresh
- if version == 6 {
- if !hidden {
- // A one-octet size of the following two fields.
- buf[1] = byte(keyVersionSize + len(pub.Fingerprint))
- // A one octet key version number.
- buf[2] = byte(pub.Version)
- lenHeaderWritten += keyVersionSize + 1
- // The fingerprint of the public key
- copy(buf[lenHeaderWritten:lenHeaderWritten+len(pub.Fingerprint)], pub.Fingerprint)
- lenHeaderWritten += len(pub.Fingerprint)
- } else {
- // The size may also be zero, and the key version
- // and fingerprint omitted for an "anonymous recipient"
- buf[1] = 0
- lenHeaderWritten += 1
- }
- } else {
- if !hidden {
- binary.BigEndian.PutUint64(buf[versionSize:(versionSize+keyIdSize)], pub.KeyId)
- }
- lenHeaderWritten += keyIdSize
- }
- buf[lenHeaderWritten] = byte(pub.PubKeyAlgo)
- lenHeaderWritten += algorithmSize
-
- var keyBlock []byte
- switch pub.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH, ExperimentalPubKeyAlgoAEAD:
- lenKeyBlock := len(key) + 2
- if version < 6 {
- lenKeyBlock += 1 // cipher type included
- }
- keyBlock = make([]byte, lenKeyBlock)
- keyOffset := 0
- if version < 6 {
- keyBlock[0] = byte(cipherFunc)
- keyOffset = 1
- }
- encodeChecksumKey(keyBlock[keyOffset:], key)
- case PubKeyAlgoX25519, PubKeyAlgoX448:
- // algorithm is added in plaintext below
- keyBlock = key
- }
-
- switch pub.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly:
- return serializeEncryptedKeyRSA(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*rsa.PublicKey), keyBlock)
- case PubKeyAlgoElGamal:
- return serializeEncryptedKeyElGamal(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*elgamal.PublicKey), keyBlock)
- case PubKeyAlgoECDH:
- return serializeEncryptedKeyECDH(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*ecdh.PublicKey), keyBlock, pub.oid, pub.Fingerprint)
- case PubKeyAlgoX25519:
- return serializeEncryptedKeyX25519(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*x25519.PublicKey), keyBlock, byte(cipherFunc), version)
- case PubKeyAlgoX448:
- return serializeEncryptedKeyX448(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*x448.PublicKey), keyBlock, byte(cipherFunc), version)
- case ExperimentalPubKeyAlgoAEAD:
- return serializeEncryptedKeyAEAD(w, config.Random(), buf[:lenHeaderWritten], pub.PublicKey.(*symmetric.AEADPublicKey), keyBlock, config.AEAD())
- case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly, ExperimentalPubKeyAlgoHMAC:
- return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
- }
-
- return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo)))
-}
-
-// SerializeEncryptedKey serializes an encrypted key packet to w that contains
-// key, encrypted to pub.
-// PKESKv6 is used if config.AEAD() is not nil.
-// If config is nil, sensible defaults will be used.
-func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error {
- return SerializeEncryptedKeyAEAD(w, pub, cipherFunc, config.AEAD() != nil, key, config)
-}
-
-// SerializeEncryptedKeyWithHiddenOption serializes an encrypted key packet to w that contains
-// key, encrypted to pub. PKESKv6 is used if config.AEAD() is not nil.
-// The hidden option controls if the packet should be anonymous, i.e., omit key metadata.
-// If config is nil, sensible defaults will be used.
-func SerializeEncryptedKeyWithHiddenOption(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, hidden bool, config *Config) error {
- return SerializeEncryptedKeyAEADwithHiddenOption(w, pub, cipherFunc, config.AEAD() != nil, key, hidden, config)
-}
-
-func (e *EncryptedKey) ProxyTransform(instance ForwardingInstance) (transformed *EncryptedKey, err error) {
- if e.Algo != PubKeyAlgoECDH {
- return nil, errors.InvalidArgumentError("invalid PKESK")
- }
-
- if e.KeyId != 0 && e.KeyId != instance.GetForwarderKeyId() {
- return nil, errors.InvalidArgumentError("invalid key id in PKESK")
- }
-
- ephemeral := e.encryptedMPI1.Bytes()
- transformedEphemeral, err := ecdh.ProxyTransform(ephemeral, instance.ProxyParameter)
- if err != nil {
- return nil, err
- }
-
- wrappedKey := e.encryptedMPI2.Bytes()
- copiedWrappedKey := make([]byte, len(wrappedKey))
- copy(copiedWrappedKey, wrappedKey)
-
- transformed = &EncryptedKey{
- Version: e.Version,
- KeyId: instance.getForwardeeKeyIdOrZero(e.KeyId),
- Algo: e.Algo,
- encryptedMPI1: encoding.NewMPI(transformedEphemeral),
- encryptedMPI2: encoding.NewOID(copiedWrappedKey),
- }
-
- return transformed, nil
-}
-
-func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header []byte, pub *rsa.PublicKey, keyBlock []byte) error {
- cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock)
- if err != nil {
- return errors.InvalidArgumentError("RSA encryption failed: " + err.Error())
- }
-
- cipherMPI := encoding.NewMPI(cipherText)
- packetLen := len(header) /* header length */ + int(cipherMPI.EncodedLength())
-
- err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
- if err != nil {
- return err
- }
- _, err = w.Write(header[:])
- if err != nil {
- return err
- }
- _, err = w.Write(cipherMPI.EncodedBytes())
- return err
-}
-
-func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header []byte, pub *elgamal.PublicKey, keyBlock []byte) error {
- c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock)
- if err != nil {
- return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error())
- }
-
- packetLen := len(header) /* header length */
- packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8
- packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8
-
- err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
- if err != nil {
- return err
- }
- _, err = w.Write(header[:])
- if err != nil {
- return err
- }
- if _, err = w.Write(new(encoding.MPI).SetBig(c1).EncodedBytes()); err != nil {
- return err
- }
- _, err = w.Write(new(encoding.MPI).SetBig(c2).EncodedBytes())
- return err
-}
-
-func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header []byte, pub *ecdh.PublicKey, keyBlock []byte, oid encoding.Field, fingerprint []byte) error {
- vsG, c, err := ecdh.Encrypt(rand, pub, keyBlock, oid.EncodedBytes(), fingerprint)
- if err != nil {
- return errors.InvalidArgumentError("ECDH encryption failed: " + err.Error())
- }
-
- g := encoding.NewMPI(vsG)
- m := encoding.NewOID(c)
-
- packetLen := len(header) /* header length */
- packetLen += int(g.EncodedLength()) + int(m.EncodedLength())
-
- err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
- if err != nil {
- return err
- }
-
- _, err = w.Write(header[:])
- if err != nil {
- return err
- }
- if _, err = w.Write(g.EncodedBytes()); err != nil {
- return err
- }
- _, err = w.Write(m.EncodedBytes())
- return err
-}
-
-func serializeEncryptedKeyX25519(w io.Writer, rand io.Reader, header []byte, pub *x25519.PublicKey, keyBlock []byte, cipherFunc byte, version int) error {
- ephemeralPublicX25519, ciphertext, err := x25519.Encrypt(rand, pub, keyBlock)
- if err != nil {
- return errors.InvalidArgumentError("x25519 encryption failed: " + err.Error())
- }
-
- packetLen := len(header) /* header length */
- packetLen += x25519.EncodedFieldsLength(ciphertext, version == 6)
-
- err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
- if err != nil {
- return err
- }
-
- _, err = w.Write(header[:])
- if err != nil {
- return err
- }
- return x25519.EncodeFields(w, ephemeralPublicX25519, ciphertext, cipherFunc, version == 6)
-}
-
-func serializeEncryptedKeyX448(w io.Writer, rand io.Reader, header []byte, pub *x448.PublicKey, keyBlock []byte, cipherFunc byte, version int) error {
- ephemeralPublicX448, ciphertext, err := x448.Encrypt(rand, pub, keyBlock)
- if err != nil {
- return errors.InvalidArgumentError("x448 encryption failed: " + err.Error())
- }
-
- packetLen := len(header) /* header length */
- packetLen += x448.EncodedFieldsLength(ciphertext, version == 6)
-
- err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
- if err != nil {
- return err
- }
-
- _, err = w.Write(header[:])
- if err != nil {
- return err
- }
- return x448.EncodeFields(w, ephemeralPublicX448, ciphertext, cipherFunc, version == 6)
-}
-
-func serializeEncryptedKeyAEAD(w io.Writer, rand io.Reader, header []byte, pub *symmetric.AEADPublicKey, keyBlock []byte, config *AEADConfig) error {
- mode := algorithm.AEADMode(config.Mode())
- iv, ciphertextRaw, err := pub.Encrypt(rand, keyBlock, mode)
- if err != nil {
- return errors.InvalidArgumentError("AEAD encryption failed: " + err.Error())
- }
-
- ciphertextShortByteString := encoding.NewShortByteString(ciphertextRaw)
-
- buffer := append([]byte{byte(mode)}, iv...)
- buffer = append(buffer, ciphertextShortByteString.EncodedBytes()...)
-
- packetLen := len(header) /* header length */
- packetLen += int(len(buffer))
-
- err = serializeHeader(w, packetTypeEncryptedKey, packetLen)
- if err != nil {
- return err
- }
-
- _, err = w.Write(header[:])
- if err != nil {
- return err
- }
-
- _, err = w.Write(buffer)
- return err
-}
-
-func checksumKeyMaterial(key []byte) uint16 {
- var checksum uint16
- for _, v := range key {
- checksum += uint16(v)
- }
- return checksum
-}
-
-func decodeChecksumKey(msg []byte) (key []byte, err error) {
- key = msg[:len(msg)-2]
- expectedChecksum := uint16(msg[len(msg)-2])<<8 | uint16(msg[len(msg)-1])
- checksum := checksumKeyMaterial(key)
- if checksum != expectedChecksum {
- err = errors.StructuralError("session key checksum is incorrect")
- }
- return
-}
-
-func encodeChecksumKey(buffer []byte, key []byte) {
- copy(buffer, key)
- checksum := checksumKeyMaterial(key)
- buffer[len(key)] = byte(checksum >> 8)
- buffer[len(key)+1] = byte(checksum)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/forwarding.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/forwarding.go
deleted file mode 100644
index 50b4de44..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/forwarding.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package packet
-
-import "encoding/binary"
-
-// ForwardingInstance represents a single forwarding instance (mapping IDs to a Proxy Param)
-type ForwardingInstance struct {
- KeyVersion int
- ForwarderFingerprint []byte
- ForwardeeFingerprint []byte
- ProxyParameter []byte
-}
-
-func (f *ForwardingInstance) GetForwarderKeyId() uint64 {
- return computeForwardingKeyId(f.ForwarderFingerprint, f.KeyVersion)
-}
-
-func (f *ForwardingInstance) GetForwardeeKeyId() uint64 {
- return computeForwardingKeyId(f.ForwardeeFingerprint, f.KeyVersion)
-}
-
-func (f *ForwardingInstance) getForwardeeKeyIdOrZero(originalKeyId uint64) uint64 {
- if originalKeyId == 0 {
- return 0
- }
-
- return f.GetForwardeeKeyId()
-}
-
-func computeForwardingKeyId(fingerprint []byte, version int) uint64 {
- switch version {
- case 4:
- return binary.BigEndian.Uint64(fingerprint[12:20])
- default:
- panic("invalid pgp key version")
- }
-}
\ No newline at end of file
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go
deleted file mode 100644
index 8a028c8a..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "encoding/binary"
- "io"
-)
-
-// LiteralData represents an encrypted file. See RFC 4880, section 5.9.
-type LiteralData struct {
- Format uint8
- IsBinary bool
- FileName string
- Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined.
- Body io.Reader
-}
-
-// ForEyesOnly returns whether the contents of the LiteralData have been marked
-// as especially sensitive.
-func (l *LiteralData) ForEyesOnly() bool {
- return l.FileName == "_CONSOLE"
-}
-
-func (l *LiteralData) parse(r io.Reader) (err error) {
- var buf [256]byte
-
- _, err = readFull(r, buf[:2])
- if err != nil {
- return
- }
-
- l.Format = buf[0]
- l.IsBinary = l.Format == 'b'
- fileNameLen := int(buf[1])
-
- _, err = readFull(r, buf[:fileNameLen])
- if err != nil {
- return
- }
-
- l.FileName = string(buf[:fileNameLen])
-
- _, err = readFull(r, buf[:4])
- if err != nil {
- return
- }
-
- l.Time = binary.BigEndian.Uint32(buf[:4])
- l.Body = r
- return
-}
-
-// SerializeLiteral serializes a literal data packet to w and returns a
-// WriteCloser to which the data itself can be written and which MUST be closed
-// on completion. The fileName is truncated to 255 bytes.
-func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) {
- var buf [4]byte
- buf[0] = 'b'
- if !isBinary {
- buf[0] = 'u'
- }
- if len(fileName) > 255 {
- fileName = fileName[:255]
- }
- buf[1] = byte(len(fileName))
-
- inner, err := serializeStreamHeader(w, packetTypeLiteralData)
- if err != nil {
- return
- }
-
- _, err = inner.Write(buf[:2])
- if err != nil {
- return
- }
- _, err = inner.Write([]byte(fileName))
- if err != nil {
- return
- }
- binary.BigEndian.PutUint32(buf[:], time)
- _, err = inner.Write(buf[:])
- if err != nil {
- return
- }
-
- plaintext = inner
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/marker.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/marker.go
deleted file mode 100644
index 1ee378ba..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/marker.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package packet
-
-import (
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-type Marker struct{}
-
-const markerString = "PGP"
-
-// parse just checks if the packet contains "PGP".
-func (m *Marker) parse(reader io.Reader) error {
- var buffer [3]byte
- if _, err := io.ReadFull(reader, buffer[:]); err != nil {
- return err
- }
- if string(buffer[:]) != markerString {
- return errors.StructuralError("invalid marker packet")
- }
- return nil
-}
-
-// SerializeMarker writes a marker packet to writer.
-func SerializeMarker(writer io.Writer) error {
- err := serializeHeader(writer, packetTypeMarker, len(markerString))
- if err != nil {
- return err
- }
- _, err = writer.Write([]byte(markerString))
- return err
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go
deleted file mode 100644
index 2c3e3f50..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/notation.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package packet
-
-// Notation type represents a Notation Data subpacket
-// see https://tools.ietf.org/html/rfc4880#section-5.2.3.16
-type Notation struct {
- Name string
- Value []byte
- IsCritical bool
- IsHumanReadable bool
-}
-
-func (notation *Notation) getData() []byte {
- nameData := []byte(notation.Name)
- nameLen := len(nameData)
- valueLen := len(notation.Value)
-
- data := make([]byte, 8+nameLen+valueLen)
- if notation.IsHumanReadable {
- data[0] = 0x80
- }
-
- data[4] = byte(nameLen >> 8)
- data[5] = byte(nameLen)
- data[6] = byte(valueLen >> 8)
- data[7] = byte(valueLen)
- copy(data[8:8+nameLen], nameData)
- copy(data[8+nameLen:], notation.Value)
- return data
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go
deleted file mode 100644
index 4f26d0a0..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9
-
-package packet
-
-import (
- "crypto/cipher"
-)
-
-type ocfbEncrypter struct {
- b cipher.Block
- fre []byte
- outUsed int
-}
-
-// An OCFBResyncOption determines if the "resynchronization step" of OCFB is
-// performed.
-type OCFBResyncOption bool
-
-const (
- OCFBResync OCFBResyncOption = true
- OCFBNoResync OCFBResyncOption = false
-)
-
-// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's
-// cipher feedback mode using the given cipher.Block, and an initial amount of
-// ciphertext. randData must be random bytes and be the same length as the
-// cipher.Block's block size. Resync determines if the "resynchronization step"
-// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on
-// this point.
-func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) {
- blockSize := block.BlockSize()
- if len(randData) != blockSize {
- return nil, nil
- }
-
- x := &ocfbEncrypter{
- b: block,
- fre: make([]byte, blockSize),
- outUsed: 0,
- }
- prefix := make([]byte, blockSize+2)
-
- block.Encrypt(x.fre, x.fre)
- for i := 0; i < blockSize; i++ {
- prefix[i] = randData[i] ^ x.fre[i]
- }
-
- block.Encrypt(x.fre, prefix[:blockSize])
- prefix[blockSize] = x.fre[0] ^ randData[blockSize-2]
- prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1]
-
- if resync {
- block.Encrypt(x.fre, prefix[2:])
- } else {
- x.fre[0] = prefix[blockSize]
- x.fre[1] = prefix[blockSize+1]
- x.outUsed = 2
- }
- return x, prefix
-}
-
-func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) {
- for i := 0; i < len(src); i++ {
- if x.outUsed == len(x.fre) {
- x.b.Encrypt(x.fre, x.fre)
- x.outUsed = 0
- }
-
- x.fre[x.outUsed] ^= src[i]
- dst[i] = x.fre[x.outUsed]
- x.outUsed++
- }
-}
-
-type ocfbDecrypter struct {
- b cipher.Block
- fre []byte
- outUsed int
-}
-
-// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's
-// cipher feedback mode using the given cipher.Block. Prefix must be the first
-// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's
-// block size. On successful exit, blockSize+2 bytes of decrypted data are written into
-// prefix. Resync determines if the "resynchronization step" from RFC 4880,
-// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point.
-func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream {
- blockSize := block.BlockSize()
- if len(prefix) != blockSize+2 {
- return nil
- }
-
- x := &ocfbDecrypter{
- b: block,
- fre: make([]byte, blockSize),
- outUsed: 0,
- }
- prefixCopy := make([]byte, len(prefix))
- copy(prefixCopy, prefix)
-
- block.Encrypt(x.fre, x.fre)
- for i := 0; i < blockSize; i++ {
- prefixCopy[i] ^= x.fre[i]
- }
-
- block.Encrypt(x.fre, prefix[:blockSize])
- prefixCopy[blockSize] ^= x.fre[0]
- prefixCopy[blockSize+1] ^= x.fre[1]
-
- if resync {
- block.Encrypt(x.fre, prefix[2:])
- } else {
- x.fre[0] = prefix[blockSize]
- x.fre[1] = prefix[blockSize+1]
- x.outUsed = 2
- }
- copy(prefix, prefixCopy)
- return x
-}
-
-func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) {
- for i := 0; i < len(src); i++ {
- if x.outUsed == len(x.fre) {
- x.b.Encrypt(x.fre, x.fre)
- x.outUsed = 0
- }
-
- c := src[i]
- dst[i] = x.fre[x.outUsed] ^ src[i]
- x.fre[x.outUsed] = c
- x.outUsed++
- }
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go
deleted file mode 100644
index f393c406..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "crypto"
- "encoding/binary"
- "io"
- "strconv"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
-)
-
-// OnePassSignature represents a one-pass signature packet. See RFC 4880,
-// section 5.4.
-type OnePassSignature struct {
- Version int
- SigType SignatureType
- Hash crypto.Hash
- PubKeyAlgo PublicKeyAlgorithm
- KeyId uint64
- IsLast bool
- Salt []byte // v6 only
- KeyFingerprint []byte // v6 only
-}
-
-func (ops *OnePassSignature) parse(r io.Reader) (err error) {
- var buf [8]byte
- // Read: version | signature type | hash algorithm | public-key algorithm
- _, err = readFull(r, buf[:4])
- if err != nil {
- return
- }
- if buf[0] != 3 && buf[0] != 6 {
- return errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0])))
- }
- ops.Version = int(buf[0])
-
- var ok bool
- ops.Hash, ok = algorithm.HashIdToHashWithSha1(buf[2])
- if !ok {
- return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2])))
- }
-
- ops.SigType = SignatureType(buf[1])
- ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3])
-
- if ops.Version == 6 {
- // Only for v6, a variable-length field containing the salt
- _, err = readFull(r, buf[:1])
- if err != nil {
- return
- }
- saltLength := int(buf[0])
- var expectedSaltLength int
- expectedSaltLength, err = SaltLengthForHash(ops.Hash)
- if err != nil {
- return
- }
- if saltLength != expectedSaltLength {
- err = errors.StructuralError("unexpected salt size for the given hash algorithm")
- return
- }
- salt := make([]byte, expectedSaltLength)
- _, err = readFull(r, salt)
- if err != nil {
- return
- }
- ops.Salt = salt
-
- // Only for v6 packets, 32 octets of the fingerprint of the signing key.
- fingerprint := make([]byte, 32)
- _, err = readFull(r, fingerprint)
- if err != nil {
- return
- }
- ops.KeyFingerprint = fingerprint
- ops.KeyId = binary.BigEndian.Uint64(ops.KeyFingerprint[:8])
- } else {
- _, err = readFull(r, buf[:8])
- if err != nil {
- return
- }
- ops.KeyId = binary.BigEndian.Uint64(buf[:8])
- }
-
- _, err = readFull(r, buf[:1])
- if err != nil {
- return
- }
- ops.IsLast = buf[0] != 0
- return
-}
-
-// Serialize marshals the given OnePassSignature to w.
-func (ops *OnePassSignature) Serialize(w io.Writer) error {
- //v3 length 1+1+1+1+8+1 =
- packetLength := 13
- if ops.Version == 6 {
- // v6 length 1+1+1+1+1+len(salt)+32+1 =
- packetLength = 38 + len(ops.Salt)
- }
-
- if err := serializeHeader(w, packetTypeOnePassSignature, packetLength); err != nil {
- return err
- }
-
- var buf [8]byte
- buf[0] = byte(ops.Version)
- buf[1] = uint8(ops.SigType)
- var ok bool
- buf[2], ok = algorithm.HashToHashIdWithSha1(ops.Hash)
- if !ok {
- return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash)))
- }
- buf[3] = uint8(ops.PubKeyAlgo)
-
- _, err := w.Write(buf[:4])
- if err != nil {
- return err
- }
-
- if ops.Version == 6 {
- // write salt for v6 signatures
- _, err := w.Write([]byte{uint8(len(ops.Salt))})
- if err != nil {
- return err
- }
- _, err = w.Write(ops.Salt)
- if err != nil {
- return err
- }
-
- // write fingerprint v6 signatures
- _, err = w.Write(ops.KeyFingerprint)
- if err != nil {
- return err
- }
- } else {
- binary.BigEndian.PutUint64(buf[:8], ops.KeyId)
- _, err := w.Write(buf[:8])
- if err != nil {
- return err
- }
- }
-
- isLast := []byte{byte(0)}
- if ops.IsLast {
- isLast[0] = 1
- }
-
- _, err = w.Write(isLast)
- return err
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go
deleted file mode 100644
index cef7c661..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is
-// useful for splitting and storing the original packet contents separately,
-// handling unsupported packet types or accessing parts of the packet not yet
-// implemented by this package.
-type OpaquePacket struct {
- // Packet type
- Tag uint8
- // Reason why the packet was parsed opaquely
- Reason error
- // Binary contents of the packet data
- Contents []byte
-}
-
-func (op *OpaquePacket) parse(r io.Reader) (err error) {
- op.Contents, err = io.ReadAll(r)
- return
-}
-
-// Serialize marshals the packet to a writer in its original form, including
-// the packet header.
-func (op *OpaquePacket) Serialize(w io.Writer) (err error) {
- err = serializeHeader(w, packetType(op.Tag), len(op.Contents))
- if err == nil {
- _, err = w.Write(op.Contents)
- }
- return
-}
-
-// Parse attempts to parse the opaque contents into a structure supported by
-// this package. If the packet is not known then the result will be another
-// OpaquePacket.
-func (op *OpaquePacket) Parse() (p Packet, err error) {
- hdr := bytes.NewBuffer(nil)
- err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents))
- if err != nil {
- op.Reason = err
- return op, err
- }
- p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents)))
- if err != nil {
- op.Reason = err
- p = op
- }
- return
-}
-
-// OpaqueReader reads OpaquePackets from an io.Reader.
-type OpaqueReader struct {
- r io.Reader
-}
-
-func NewOpaqueReader(r io.Reader) *OpaqueReader {
- return &OpaqueReader{r: r}
-}
-
-// Read the next OpaquePacket.
-func (or *OpaqueReader) Next() (op *OpaquePacket, err error) {
- tag, _, contents, err := readHeader(or.r)
- if err != nil {
- return
- }
- op = &OpaquePacket{Tag: uint8(tag), Reason: err}
- err = op.parse(contents)
- if err != nil {
- consumeAll(contents)
- }
- return
-}
-
-// OpaqueSubpacket represents an unparsed OpenPGP subpacket,
-// as found in signature and user attribute packets.
-type OpaqueSubpacket struct {
- SubType uint8
- EncodedLength []byte // Store the original encoded length for signature verifications.
- Contents []byte
-}
-
-// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from
-// their byte representation.
-func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) {
- var (
- subHeaderLen int
- subPacket *OpaqueSubpacket
- )
- for len(contents) > 0 {
- subHeaderLen, subPacket, err = nextSubpacket(contents)
- if err != nil {
- break
- }
- result = append(result, subPacket)
- contents = contents[subHeaderLen+len(subPacket.Contents):]
- }
- return
-}
-
-func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) {
- // RFC 4880, section 5.2.3.1
- var subLen uint32
- var encodedLength []byte
- if len(contents) < 1 {
- goto Truncated
- }
- subPacket = &OpaqueSubpacket{}
- switch {
- case contents[0] < 192:
- subHeaderLen = 2 // 1 length byte, 1 subtype byte
- if len(contents) < subHeaderLen {
- goto Truncated
- }
- encodedLength = contents[0:1]
- subLen = uint32(contents[0])
- contents = contents[1:]
- case contents[0] < 255:
- subHeaderLen = 3 // 2 length bytes, 1 subtype
- if len(contents) < subHeaderLen {
- goto Truncated
- }
- encodedLength = contents[0:2]
- subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192
- contents = contents[2:]
- default:
- subHeaderLen = 6 // 5 length bytes, 1 subtype
- if len(contents) < subHeaderLen {
- goto Truncated
- }
- encodedLength = contents[0:5]
- subLen = uint32(contents[1])<<24 |
- uint32(contents[2])<<16 |
- uint32(contents[3])<<8 |
- uint32(contents[4])
- contents = contents[5:]
-
- }
- if subLen > uint32(len(contents)) || subLen == 0 {
- goto Truncated
- }
- subPacket.SubType = contents[0]
- subPacket.EncodedLength = encodedLength
- subPacket.Contents = contents[1:subLen]
- return
-Truncated:
- err = errors.StructuralError("subpacket truncated")
- return
-}
-
-func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) {
- buf := make([]byte, 6)
- copy(buf, osp.EncodedLength)
- n := len(osp.EncodedLength)
-
- buf[n] = osp.SubType
- if _, err = w.Write(buf[:n+1]); err != nil {
- return
- }
- _, err = w.Write(osp.Contents)
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go
deleted file mode 100644
index decc8691..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go
+++ /dev/null
@@ -1,678 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package packet implements parsing and serialization of OpenPGP packets, as
-// specified in RFC 4880.
-package packet // import "github.com/ProtonMail/go-crypto/v2/openpgp/packet"
-
-import (
- "bytes"
- "crypto/cipher"
- "crypto/rsa"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
-)
-
-// readFull is the same as io.ReadFull except that reading zero bytes returns
-// ErrUnexpectedEOF rather than EOF.
-func readFull(r io.Reader, buf []byte) (n int, err error) {
- n, err = io.ReadFull(r, buf)
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return
-}
-
-// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2.
-func readLength(r io.Reader) (length int64, isPartial bool, err error) {
- var buf [4]byte
- _, err = readFull(r, buf[:1])
- if err != nil {
- return
- }
- switch {
- case buf[0] < 192:
- length = int64(buf[0])
- case buf[0] < 224:
- length = int64(buf[0]-192) << 8
- _, err = readFull(r, buf[0:1])
- if err != nil {
- return
- }
- length += int64(buf[0]) + 192
- case buf[0] < 255:
- length = int64(1) << (buf[0] & 0x1f)
- isPartial = true
- default:
- _, err = readFull(r, buf[0:4])
- if err != nil {
- return
- }
- length = int64(buf[0])<<24 |
- int64(buf[1])<<16 |
- int64(buf[2])<<8 |
- int64(buf[3])
- }
- return
-}
-
-// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths.
-// The continuation lengths are parsed and removed from the stream and EOF is
-// returned at the end of the packet. See RFC 4880, section 4.2.2.4.
-type partialLengthReader struct {
- r io.Reader
- remaining int64
- isPartial bool
-}
-
-func (r *partialLengthReader) Read(p []byte) (n int, err error) {
- for r.remaining == 0 {
- if !r.isPartial {
- return 0, io.EOF
- }
- r.remaining, r.isPartial, err = readLength(r.r)
- if err != nil {
- return 0, err
- }
- }
-
- toRead := int64(len(p))
- if toRead > r.remaining {
- toRead = r.remaining
- }
-
- n, err = r.r.Read(p[:int(toRead)])
- r.remaining -= int64(n)
- if n < int(toRead) && err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return
-}
-
-// partialLengthWriter writes a stream of data using OpenPGP partial lengths.
-// See RFC 4880, section 4.2.2.4.
-type partialLengthWriter struct {
- w io.WriteCloser
- buf bytes.Buffer
- lengthByte [1]byte
-}
-
-func (w *partialLengthWriter) Write(p []byte) (n int, err error) {
- bufLen := w.buf.Len()
- if bufLen > 512 {
- for power := uint(30); ; power-- {
- l := 1 << power
- if bufLen >= l {
- w.lengthByte[0] = 224 + uint8(power)
- _, err = w.w.Write(w.lengthByte[:])
- if err != nil {
- return
- }
- var m int
- m, err = w.w.Write(w.buf.Next(l))
- if err != nil {
- return
- }
- if m != l {
- return 0, io.ErrShortWrite
- }
- break
- }
- }
- }
- return w.buf.Write(p)
-}
-
-func (w *partialLengthWriter) Close() (err error) {
- len := w.buf.Len()
- err = serializeLength(w.w, len)
- if err != nil {
- return err
- }
- _, err = w.buf.WriteTo(w.w)
- if err != nil {
- return err
- }
- return w.w.Close()
-}
-
-// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the
-// underlying Reader returns EOF before the limit has been reached.
-type spanReader struct {
- r io.Reader
- n int64
-}
-
-func (l *spanReader) Read(p []byte) (n int, err error) {
- if l.n <= 0 {
- return 0, io.EOF
- }
- if int64(len(p)) > l.n {
- p = p[0:l.n]
- }
- n, err = l.r.Read(p)
- l.n -= int64(n)
- if l.n > 0 && err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return
-}
-
-// readHeader parses a packet header and returns an io.Reader which will return
-// the contents of the packet. See RFC 4880, section 4.2.
-func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) {
- var buf [4]byte
- _, err = io.ReadFull(r, buf[:1])
- if err != nil {
- return
- }
- if buf[0]&0x80 == 0 {
- err = errors.StructuralError("tag byte does not have MSB set")
- return
- }
- if buf[0]&0x40 == 0 {
- // Old format packet
- tag = packetType((buf[0] & 0x3f) >> 2)
- lengthType := buf[0] & 3
- if lengthType == 3 {
- length = -1
- contents = r
- return
- }
- lengthBytes := 1 << lengthType
- _, err = readFull(r, buf[0:lengthBytes])
- if err != nil {
- return
- }
- for i := 0; i < lengthBytes; i++ {
- length <<= 8
- length |= int64(buf[i])
- }
- contents = &spanReader{r, length}
- return
- }
-
- // New format packet
- tag = packetType(buf[0] & 0x3f)
- length, isPartial, err := readLength(r)
- if err != nil {
- return
- }
- if isPartial {
- contents = &partialLengthReader{
- remaining: length,
- isPartial: true,
- r: r,
- }
- length = -1
- } else {
- contents = &spanReader{r, length}
- }
- return
-}
-
-// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section
-// 4.2.
-func serializeHeader(w io.Writer, ptype packetType, length int) (err error) {
- err = serializeType(w, ptype)
- if err != nil {
- return
- }
- return serializeLength(w, length)
-}
-
-// serializeType writes an OpenPGP packet type to w. See RFC 4880, section
-// 4.2.
-func serializeType(w io.Writer, ptype packetType) (err error) {
- var buf [1]byte
- buf[0] = 0x80 | 0x40 | byte(ptype)
- _, err = w.Write(buf[:])
- return
-}
-
-// serializeLength writes an OpenPGP packet length to w. See RFC 4880, section
-// 4.2.2.
-func serializeLength(w io.Writer, length int) (err error) {
- var buf [5]byte
- var n int
-
- if length < 192 {
- buf[0] = byte(length)
- n = 1
- } else if length < 8384 {
- length -= 192
- buf[0] = 192 + byte(length>>8)
- buf[1] = byte(length)
- n = 2
- } else {
- buf[0] = 255
- buf[1] = byte(length >> 24)
- buf[2] = byte(length >> 16)
- buf[3] = byte(length >> 8)
- buf[4] = byte(length)
- n = 5
- }
-
- _, err = w.Write(buf[:n])
- return
-}
-
-// serializeStreamHeader writes an OpenPGP packet header to w where the
-// length of the packet is unknown. It returns a io.WriteCloser which can be
-// used to write the contents of the packet. See RFC 4880, section 4.2.
-func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) {
- err = serializeType(w, ptype)
- if err != nil {
- return
- }
- out = &partialLengthWriter{w: w}
- return
-}
-
-// Packet represents an OpenPGP packet. Users are expected to try casting
-// instances of this interface to specific packet types.
-type Packet interface {
- parse(io.Reader) error
-}
-
-// consumeAll reads from the given Reader until error, returning the number of
-// bytes read.
-func consumeAll(r io.Reader) (n int64, err error) {
- var m int
- var buf [1024]byte
-
- for {
- m, err = r.Read(buf[:])
- n += int64(m)
- if err == io.EOF {
- err = nil
- return
- }
- if err != nil {
- return
- }
- }
-}
-
-// packetType represents the numeric ids of the different OpenPGP packet types. See
-// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2
-type packetType uint8
-
-const (
- packetTypeEncryptedKey packetType = 1
- packetTypeSignature packetType = 2
- packetTypeSymmetricKeyEncrypted packetType = 3
- packetTypeOnePassSignature packetType = 4
- packetTypePrivateKey packetType = 5
- packetTypePublicKey packetType = 6
- packetTypePrivateSubkey packetType = 7
- packetTypeCompressed packetType = 8
- packetTypeSymmetricallyEncrypted packetType = 9
- packetTypeMarker packetType = 10
- packetTypeLiteralData packetType = 11
- packetTypeTrust packetType = 12
- packetTypeUserId packetType = 13
- packetTypePublicSubkey packetType = 14
- packetTypeUserAttribute packetType = 17
- packetTypeSymmetricallyEncryptedIntegrityProtected packetType = 18
- packetTypeAEADEncrypted packetType = 20
- packetPadding packetType = 21
-)
-
-// EncryptedDataPacket holds encrypted data. It is currently implemented by
-// SymmetricallyEncrypted and AEADEncrypted.
-type EncryptedDataPacket interface {
- Decrypt(CipherFunction, []byte) (io.ReadCloser, error)
-}
-
-// Read reads a single OpenPGP packet from the given io.Reader. If there is an
-// error parsing a packet, the whole packet is consumed from the input.
-func Read(r io.Reader) (p Packet, err error) {
- tag, len, contents, err := readHeader(r)
- if err != nil {
- return
- }
-
- switch tag {
- case packetTypeEncryptedKey:
- p = new(EncryptedKey)
- case packetTypeSignature:
- p = new(Signature)
- case packetTypeSymmetricKeyEncrypted:
- p = new(SymmetricKeyEncrypted)
- case packetTypeOnePassSignature:
- p = new(OnePassSignature)
- case packetTypePrivateKey, packetTypePrivateSubkey:
- pk := new(PrivateKey)
- if tag == packetTypePrivateSubkey {
- pk.IsSubkey = true
- }
- p = pk
- case packetTypePublicKey, packetTypePublicSubkey:
- isSubkey := tag == packetTypePublicSubkey
- p = &PublicKey{IsSubkey: isSubkey}
- case packetTypeCompressed:
- p = new(Compressed)
- case packetTypeSymmetricallyEncrypted:
- p = new(SymmetricallyEncrypted)
- case packetTypeLiteralData:
- p = new(LiteralData)
- case packetTypeUserId:
- p = new(UserId)
- case packetTypeUserAttribute:
- p = new(UserAttribute)
- case packetTypeSymmetricallyEncryptedIntegrityProtected:
- se := new(SymmetricallyEncrypted)
- se.IntegrityProtected = true
- p = se
- case packetTypeAEADEncrypted:
- p = new(AEADEncrypted)
- case packetPadding:
- p = Padding(len)
- case packetTypeMarker:
- p = new(Marker)
- case packetTypeTrust:
- // Not implemented, just consume
- err = errors.UnknownPacketTypeError(tag)
- default:
- // Packet Tags from 0 to 39 are critical.
- // Packet Tags from 40 to 63 are non-critical.
- if tag < 40 {
- err = errors.CriticalUnknownPacketTypeError(tag)
- } else {
- err = errors.UnknownPacketTypeError(tag)
- }
- }
- if p != nil {
- err = p.parse(contents)
- }
- if err != nil {
- consumeAll(contents)
- }
- return
-}
-
-// ReadWithCheck reads a single OpenPGP message packet from the given io.Reader. If there is an
-// error parsing a packet, the whole packet is consumed from the input.
-// ReadWithCheck additionally checks if the OpenPGP message packet sequence adheres
-// to the packet composition rules in rfc4880, if not throws an error.
-func ReadWithCheck(r io.Reader, sequence *SequenceVerifier) (p Packet, msgErr error, err error) {
- tag, len, contents, err := readHeader(r)
- if err != nil {
- return
- }
- switch tag {
- case packetTypeEncryptedKey:
- msgErr = sequence.Next(ESKSymbol)
- p = new(EncryptedKey)
- case packetTypeSignature:
- msgErr = sequence.Next(SigSymbol)
- p = new(Signature)
- case packetTypeSymmetricKeyEncrypted:
- msgErr = sequence.Next(ESKSymbol)
- p = new(SymmetricKeyEncrypted)
- case packetTypeOnePassSignature:
- msgErr = sequence.Next(OPSSymbol)
- p = new(OnePassSignature)
- case packetTypeCompressed:
- msgErr = sequence.Next(CompSymbol)
- p = new(Compressed)
- case packetTypeSymmetricallyEncrypted:
- msgErr = sequence.Next(EncSymbol)
- p = new(SymmetricallyEncrypted)
- case packetTypeLiteralData:
- msgErr = sequence.Next(LDSymbol)
- p = new(LiteralData)
- case packetTypeSymmetricallyEncryptedIntegrityProtected:
- msgErr = sequence.Next(EncSymbol)
- se := new(SymmetricallyEncrypted)
- se.IntegrityProtected = true
- p = se
- case packetTypeAEADEncrypted:
- msgErr = sequence.Next(EncSymbol)
- p = new(AEADEncrypted)
- case packetPadding:
- p = Padding(len)
- case packetTypeMarker:
- p = new(Marker)
- case packetTypeTrust:
- // Not implemented, just consume
- err = errors.UnknownPacketTypeError(tag)
- case packetTypePrivateKey,
- packetTypePrivateSubkey,
- packetTypePublicKey,
- packetTypePublicSubkey,
- packetTypeUserId,
- packetTypeUserAttribute:
- msgErr = sequence.Next(UnknownSymbol)
- consumeAll(contents)
- default:
- // Packet Tags from 0 to 39 are critical.
- // Packet Tags from 40 to 63 are non-critical.
- if tag < 40 {
- err = errors.CriticalUnknownPacketTypeError(tag)
- } else {
- err = errors.UnknownPacketTypeError(tag)
- }
- }
- if p != nil {
- err = p.parse(contents)
- }
- if err != nil {
- consumeAll(contents)
- }
- return
-}
-
-// SignatureType represents the different semantic meanings of an OpenPGP
-// signature. See RFC 4880, section 5.2.1.
-type SignatureType uint8
-
-const (
- SigTypeBinary SignatureType = 0x00
- SigTypeText SignatureType = 0x01
- SigTypeGenericCert SignatureType = 0x10
- SigTypePersonaCert SignatureType = 0x11
- SigTypeCasualCert SignatureType = 0x12
- SigTypePositiveCert SignatureType = 0x13
- SigTypeSubkeyBinding SignatureType = 0x18
- SigTypePrimaryKeyBinding SignatureType = 0x19
- SigTypeDirectSignature SignatureType = 0x1F
- SigTypeKeyRevocation SignatureType = 0x20
- SigTypeSubkeyRevocation SignatureType = 0x28
- SigTypeCertificationRevocation SignatureType = 0x30
-)
-
-// PublicKeyAlgorithm represents the different public key system specified for
-// OpenPGP. See
-// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12
-type PublicKeyAlgorithm uint8
-
-const (
- PubKeyAlgoRSA PublicKeyAlgorithm = 1
- PubKeyAlgoElGamal PublicKeyAlgorithm = 16
- PubKeyAlgoDSA PublicKeyAlgorithm = 17
- // RFC 6637, Section 5.
- PubKeyAlgoECDH PublicKeyAlgorithm = 18
- PubKeyAlgoECDSA PublicKeyAlgorithm = 19
- // https://www.ietf.org/archive/id/draft-koch-eddsa-for-openpgp-04.txt
- PubKeyAlgoEdDSA PublicKeyAlgorithm = 22
- // https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh
- PubKeyAlgoX25519 PublicKeyAlgorithm = 25
- PubKeyAlgoX448 PublicKeyAlgorithm = 26
- PubKeyAlgoEd25519 PublicKeyAlgorithm = 27
- PubKeyAlgoEd448 PublicKeyAlgorithm = 28
-
- ExperimentalPubKeyAlgoAEAD PublicKeyAlgorithm = 100
- ExperimentalPubKeyAlgoHMAC PublicKeyAlgorithm = 101
-
- // Deprecated in RFC 4880, Section 13.5. Use key flags instead.
- PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2
- PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3
-)
-
-// CanEncrypt returns true if it's possible to encrypt a message to a public
-// key of the given type.
-func (pka PublicKeyAlgorithm) CanEncrypt() bool {
- switch pka {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH, PubKeyAlgoX25519, PubKeyAlgoX448, ExperimentalPubKeyAlgoAEAD:
- return true
- }
- return false
-}
-
-// CanSign returns true if it's possible for a public key of the given type to
-// sign a message.
-func (pka PublicKeyAlgorithm) CanSign() bool {
- switch pka {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA, PubKeyAlgoEd25519, PubKeyAlgoEd448, ExperimentalPubKeyAlgoHMAC:
- return true
- }
- return false
-}
-
-// CipherFunction represents the different block ciphers specified for OpenPGP. See
-// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13
-type CipherFunction algorithm.CipherFunction
-
-const (
- Cipher3DES CipherFunction = 2
- CipherCAST5 CipherFunction = 3
- CipherAES128 CipherFunction = 7
- CipherAES192 CipherFunction = 8
- CipherAES256 CipherFunction = 9
-)
-
-// KeySize returns the key size, in bytes, of cipher.
-func (cipher CipherFunction) KeySize() int {
- return algorithm.CipherFunction(cipher).KeySize()
-}
-
-// IsSupported returns true if the cipher is supported from the library
-func (cipher CipherFunction) IsSupported() bool {
- return algorithm.CipherFunction(cipher).KeySize() > 0
-}
-
-// blockSize returns the block size, in bytes, of cipher.
-func (cipher CipherFunction) blockSize() int {
- return algorithm.CipherFunction(cipher).BlockSize()
-}
-
-// new returns a fresh instance of the given cipher.
-func (cipher CipherFunction) new(key []byte) (block cipher.Block) {
- return algorithm.CipherFunction(cipher).New(key)
-}
-
-// padToKeySize left-pads a MPI with zeroes to match the length of the
-// specified RSA public.
-func padToKeySize(pub *rsa.PublicKey, b []byte) []byte {
- k := (pub.N.BitLen() + 7) / 8
- if len(b) >= k {
- return b
- }
- bb := make([]byte, k)
- copy(bb[len(bb)-len(b):], b)
- return bb
-}
-
-// CompressionAlgo Represents the different compression algorithms
-// supported by OpenPGP (except for BZIP2, which is not currently
-// supported). See Section 9.3 of RFC 4880.
-type CompressionAlgo uint8
-
-const (
- CompressionNone CompressionAlgo = 0
- CompressionZIP CompressionAlgo = 1
- CompressionZLIB CompressionAlgo = 2
-)
-
-// AEADMode represents the different Authenticated Encryption with Associated
-// Data specified for OpenPGP.
-// See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.6
-type AEADMode algorithm.AEADMode
-
-const (
- AEADModeEAX AEADMode = 1
- AEADModeOCB AEADMode = 2
- AEADModeGCM AEADMode = 3
-)
-
-func (mode AEADMode) IvLength() int {
- return algorithm.AEADMode(mode).NonceLength()
-}
-
-func (mode AEADMode) TagLength() int {
- return algorithm.AEADMode(mode).TagLength()
-}
-
-// IsSupported returns true if the aead mode is supported from the library
-func (mode AEADMode) IsSupported() bool {
- return algorithm.AEADMode(mode).TagLength() > 0
-}
-
-// new returns a fresh instance of the given mode.
-func (mode AEADMode) new(block cipher.Block) cipher.AEAD {
- return algorithm.AEADMode(mode).New(block)
-}
-
-// ReasonForRevocation represents a revocation reason code as per RFC4880
-// section 5.2.3.23.
-type ReasonForRevocation uint8
-
-const (
- NoReason ReasonForRevocation = 0
- KeySuperseded ReasonForRevocation = 1
- KeyCompromised ReasonForRevocation = 2
- KeyRetired ReasonForRevocation = 3
- UserIDNotValid ReasonForRevocation = 32
- Unknown ReasonForRevocation = 200
-)
-
-func NewReasonForRevocation(value byte) ReasonForRevocation {
- if value < 4 || value == 32 {
- return ReasonForRevocation(value)
- }
- return Unknown
-}
-
-// Curve is a mapping to supported ECC curves for key generation.
-// See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-06.html#name-curve-specific-wire-formats
-type Curve string
-
-const (
- Curve25519 Curve = "Curve25519"
- Curve448 Curve = "Curve448"
- CurveNistP256 Curve = "P256"
- CurveNistP384 Curve = "P384"
- CurveNistP521 Curve = "P521"
- CurveSecP256k1 Curve = "SecP256k1"
- CurveBrainpoolP256 Curve = "BrainpoolP256"
- CurveBrainpoolP384 Curve = "BrainpoolP384"
- CurveBrainpoolP512 Curve = "BrainpoolP512"
-)
-
-// TrustLevel represents a trust level per RFC4880 5.2.3.13
-type TrustLevel uint8
-
-// TrustAmount represents a trust amount per RFC4880 5.2.3.13
-type TrustAmount uint8
-
-const (
- // versionSize is the length in bytes of the version value.
- versionSize = 1
- // algorithmSize is the length in bytes of the key algorithm value.
- algorithmSize = 1
- // keyVersionSize is the length in bytes of the key version value
- keyVersionSize = 1
- // keyIdSize is the length in bytes of the key identifier value.
- keyIdSize = 8
- // timestampSize is the length in bytes of encoded timestamps.
- timestampSize = 4
- // fingerprintSizeV6 is the length in bytes of the key fingerprint in v6.
- fingerprintSizeV6 = 32
- // fingerprintSize is the length in bytes of the key fingerprint.
- fingerprintSize = 20
-)
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_sequence.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_sequence.go
deleted file mode 100644
index 55a8a56c..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_sequence.go
+++ /dev/null
@@ -1,222 +0,0 @@
-package packet
-
-// This file implements the pushdown automata (PDA) from PGPainless (Paul Schaub)
-// to verify pgp packet sequences. See Paul's blogpost for more details:
-// https://blog.jabberhead.tk/2022/10/26/implementing-packet-sequence-validation-using-pushdown-automata/
-import (
- "fmt"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-func NewErrMalformedMessage(from State, input InputSymbol, stackSymbol StackSymbol) errors.ErrMalformedMessage {
- return errors.ErrMalformedMessage(fmt.Sprintf("state %d, input symbol %d, stack symbol %d ", from, input, stackSymbol))
-}
-
-// InputSymbol defines the input alphabet of the PDA
-type InputSymbol uint8
-
-const (
- LDSymbol InputSymbol = iota
- SigSymbol
- OPSSymbol
- CompSymbol
- ESKSymbol
- EncSymbol
- EOSSymbol
- UnknownSymbol
-)
-
-// StackSymbol defines the stack alphabet of the PDA
-type StackSymbol int8
-
-const (
- MsgStackSymbol StackSymbol = iota
- OpsStackSymbol
- KeyStackSymbol
- EndStackSymbol
- EmptyStackSymbol
-)
-
-// State defines the states of the PDA
-type State int8
-
-const (
- OpenPGPMessage State = iota
- ESKMessage
- LiteralMessage
- CompressedMessage
- EncryptedMessage
- ValidMessage
-)
-
-// transition represents a state transition in the PDA
-type transition func(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error)
-
-// SequenceVerifier is a pushdown automata to verify
-// PGP messages packet sequences according to rfc4880.
-type SequenceVerifier struct {
- stack []StackSymbol
- state State
-}
-
-// Next performs a state transition with the given input symbol.
-// If the transition fails a ErrMalformedMessage is returned.
-func (sv *SequenceVerifier) Next(input InputSymbol) error {
- for {
- stackSymbol := sv.popStack()
- transitionFunc := getTransition(sv.state)
- nextState, newStackSymbols, redo, err := transitionFunc(input, stackSymbol)
- if err != nil {
- return err
- }
- if redo {
- sv.pushStack(stackSymbol)
- }
- for _, newStackSymbol := range newStackSymbols {
- sv.pushStack(newStackSymbol)
- }
- sv.state = nextState
- if !redo {
- break
- }
- }
- return nil
-}
-
-// Valid returns true if RDA is in a valid state.
-func (sv *SequenceVerifier) Valid() bool {
- return sv.state == ValidMessage && len(sv.stack) == 0
-}
-
-func (sv *SequenceVerifier) AssertValid() error {
- if !sv.Valid() {
- return errors.ErrMalformedMessage("invalid message")
- }
- return nil
-}
-
-func NewSequenceVerifier() *SequenceVerifier {
- return &SequenceVerifier{
- stack: []StackSymbol{EndStackSymbol, MsgStackSymbol},
- state: OpenPGPMessage,
- }
-}
-
-func (sv *SequenceVerifier) popStack() StackSymbol {
- if len(sv.stack) == 0 {
- return EmptyStackSymbol
- }
- elemIndex := len(sv.stack) - 1
- stackSymbol := sv.stack[elemIndex]
- sv.stack = sv.stack[:elemIndex]
- return stackSymbol
-}
-
-func (sv *SequenceVerifier) pushStack(stackSymbol StackSymbol) {
- sv.stack = append(sv.stack, stackSymbol)
-}
-
-func getTransition(from State) transition {
- switch from {
- case OpenPGPMessage:
- return fromOpenPGPMessage
- case LiteralMessage:
- return fromLiteralMessage
- case CompressedMessage:
- return fromCompressedMessage
- case EncryptedMessage:
- return fromEncryptedMessage
- case ESKMessage:
- return fromESKMessage
- case ValidMessage:
- return fromValidMessage
- }
- return nil
-}
-
-// fromOpenPGPMessage is the transition for the state OpenPGPMessage.
-func fromOpenPGPMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) {
- if stackSymbol != MsgStackSymbol {
- return 0, nil, false, NewErrMalformedMessage(OpenPGPMessage, input, stackSymbol)
- }
- switch input {
- case LDSymbol:
- return LiteralMessage, nil, false, nil
- case SigSymbol:
- return OpenPGPMessage, []StackSymbol{MsgStackSymbol}, false, nil
- case OPSSymbol:
- return OpenPGPMessage, []StackSymbol{OpsStackSymbol, MsgStackSymbol}, false, nil
- case CompSymbol:
- return CompressedMessage, nil, false, nil
- case ESKSymbol:
- return ESKMessage, []StackSymbol{KeyStackSymbol}, false, nil
- case EncSymbol:
- return EncryptedMessage, nil, false, nil
- }
- return 0, nil, false, NewErrMalformedMessage(OpenPGPMessage, input, stackSymbol)
-}
-
-// fromESKMessage is the transition for the state ESKMessage.
-func fromESKMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) {
- if stackSymbol != KeyStackSymbol {
- return 0, nil, false, NewErrMalformedMessage(ESKMessage, input, stackSymbol)
- }
- switch input {
- case ESKSymbol:
- return ESKMessage, []StackSymbol{KeyStackSymbol}, false, nil
- case EncSymbol:
- return EncryptedMessage, nil, false, nil
- }
- return 0, nil, false, NewErrMalformedMessage(ESKMessage, input, stackSymbol)
-}
-
-// fromLiteralMessage is the transition for the state LiteralMessage.
-func fromLiteralMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) {
- switch input {
- case SigSymbol:
- if stackSymbol == OpsStackSymbol {
- return LiteralMessage, nil, false, nil
- }
- case EOSSymbol:
- if stackSymbol == EndStackSymbol {
- return ValidMessage, nil, false, nil
- }
- }
- return 0, nil, false, NewErrMalformedMessage(LiteralMessage, input, stackSymbol)
-}
-
-// fromLiteralMessage is the transition for the state CompressedMessage.
-func fromCompressedMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) {
- switch input {
- case SigSymbol:
- if stackSymbol == OpsStackSymbol {
- return CompressedMessage, nil, false, nil
- }
- case EOSSymbol:
- if stackSymbol == EndStackSymbol {
- return ValidMessage, nil, false, nil
- }
- }
- return OpenPGPMessage, []StackSymbol{MsgStackSymbol}, true, nil
-}
-
-// fromEncryptedMessage is the transition for the state EncryptedMessage.
-func fromEncryptedMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) {
- switch input {
- case SigSymbol:
- if stackSymbol == OpsStackSymbol {
- return EncryptedMessage, nil, false, nil
- }
- case EOSSymbol:
- if stackSymbol == EndStackSymbol {
- return ValidMessage, nil, false, nil
- }
- }
- return OpenPGPMessage, []StackSymbol{MsgStackSymbol}, true, nil
-}
-
-// fromValidMessage is the transition for the state ValidMessage.
-func fromValidMessage(input InputSymbol, stackSymbol StackSymbol) (State, []StackSymbol, bool, error) {
- return 0, nil, false, NewErrMalformedMessage(ValidMessage, input, stackSymbol)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_unsupported.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_unsupported.go
deleted file mode 100644
index 2d714723..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet_unsupported.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package packet
-
-import (
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-// UnsupportedPackage represents a OpenPGP packet with a known packet type
-// but with unsupported content.
-type UnsupportedPacket struct {
- IncompletePacket Packet
- Error errors.UnsupportedError
-}
-
-// Implements the Packet interface
-func (up *UnsupportedPacket) parse(read io.Reader) error {
- err := up.IncompletePacket.parse(read)
- if castedErr, ok := err.(errors.UnsupportedError); ok {
- up.Error = castedErr
- return nil
- }
- return err
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/padding.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/padding.go
deleted file mode 100644
index 06fa8374..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/padding.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package packet
-
-import (
- "io"
- "io/ioutil"
-)
-
-// Padding type represents a Padding Packet (Tag 21).
-// The padding type is represented by the length of its padding.
-// see https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#name-padding-packet-tag-21
-type Padding int
-
-// parse just ignores the padding content.
-func (pad Padding) parse(reader io.Reader) error {
- _, err := io.CopyN(ioutil.Discard, reader, int64(pad))
- return err
-}
-
-// SerializePadding writes the padding to writer.
-func (pad Padding) SerializePadding(writer io.Writer, rand io.Reader) error {
- err := serializeHeader(writer, packetPadding, int(pad))
- if err != nil {
- return err
- }
- _, err = io.CopyN(writer, rand, int64(pad))
- return err
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go
deleted file mode 100644
index 485abd81..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go
+++ /dev/null
@@ -1,1264 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "crypto"
- "crypto/cipher"
- "crypto/dsa"
- "crypto/rsa"
- "crypto/sha1"
- "crypto/sha256"
- "crypto/subtle"
- "fmt"
- "io"
- "math/big"
- "strconv"
- "time"
-
- "github.com/ProtonMail/go-crypto/openpgp/ecdh"
- "github.com/ProtonMail/go-crypto/openpgp/ecdsa"
- "github.com/ProtonMail/go-crypto/openpgp/ed25519"
- "github.com/ProtonMail/go-crypto/openpgp/ed448"
- "github.com/ProtonMail/go-crypto/openpgp/eddsa"
- "github.com/ProtonMail/go-crypto/openpgp/elgamal"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/encoding"
- "github.com/ProtonMail/go-crypto/openpgp/s2k"
- "github.com/ProtonMail/go-crypto/openpgp/symmetric"
- "github.com/ProtonMail/go-crypto/openpgp/x25519"
- "github.com/ProtonMail/go-crypto/openpgp/x448"
- "golang.org/x/crypto/hkdf"
-)
-
-// PrivateKey represents a possibly encrypted private key. See RFC 4880,
-// section 5.5.3.
-type PrivateKey struct {
- PublicKey
- Encrypted bool // if true then the private key is unavailable until Decrypt has been called.
- encryptedData []byte
- cipher CipherFunction
- s2k func(out, in []byte)
- aead AEADMode // only relevant if S2KAEAD is enabled
- // An *{rsa|dsa|elgamal|ecdh|ecdsa|ed25519|ed448}.PrivateKey or
- // crypto.Signer/crypto.Decrypter (Decryptor RSA only).
- PrivateKey interface{}
- iv []byte
-
- // Type of encryption of the S2K packet
- // Allowed values are 0 (Not encrypted), 253 (AEAD), 254 (SHA1), or
- // 255 (2-byte checksum)
- s2kType S2KType
- // Full parameters of the S2K packet
- s2kParams *s2k.Params
-}
-
-// S2KType s2k packet type
-type S2KType uint8
-
-const (
- // S2KNON unencrypt
- S2KNON S2KType = 0
- // S2KAEAD use authenticated encryption
- S2KAEAD S2KType = 253
- // S2KSHA1 sha1 sum check
- S2KSHA1 S2KType = 254
- // S2KCHECKSUM sum check
- S2KCHECKSUM S2KType = 255
-)
-
-func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewEdDSAPrivateKey(creationTime time.Time, priv *eddsa.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewEdDSAPublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewECDHPrivateKey(creationTime time.Time, priv *ecdh.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewX25519PrivateKey(creationTime time.Time, priv *x25519.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewX25519PublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewX448PrivateKey(creationTime time.Time, priv *x448.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewX448PublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewEd25519PrivateKey(creationTime time.Time, priv *ed25519.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewEd25519PublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-func NewEd448PrivateKey(creationTime time.Time, priv *ed448.PrivateKey) *PrivateKey {
- pk := new(PrivateKey)
- pk.PublicKey = *NewEd448PublicKey(creationTime, &priv.PublicKey)
- pk.PrivateKey = priv
- return pk
-}
-
-// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that
-// implements RSA, ECDSA or EdDSA.
-func NewSignerPrivateKey(creationTime time.Time, signer interface{}) *PrivateKey {
- pk := new(PrivateKey)
- // In general, the public Keys should be used as pointers. We still
- // type-switch on the values, for backwards-compatibility.
- switch pubkey := signer.(type) {
- case *rsa.PrivateKey:
- pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey.PublicKey)
- case rsa.PrivateKey:
- pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey.PublicKey)
- case *ecdsa.PrivateKey:
- pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey.PublicKey)
- case ecdsa.PrivateKey:
- pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey.PublicKey)
- case *eddsa.PrivateKey:
- pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey)
- case eddsa.PrivateKey:
- pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey.PublicKey)
- case *ed25519.PrivateKey:
- pk.PublicKey = *NewEd25519PublicKey(creationTime, &pubkey.PublicKey)
- case ed25519.PrivateKey:
- pk.PublicKey = *NewEd25519PublicKey(creationTime, &pubkey.PublicKey)
- case *ed448.PrivateKey:
- pk.PublicKey = *NewEd448PublicKey(creationTime, &pubkey.PublicKey)
- case ed448.PrivateKey:
- pk.PublicKey = *NewEd448PublicKey(creationTime, &pubkey.PublicKey)
- case *symmetric.HMACPrivateKey:
- pk.PublicKey = *NewHMACPublicKey(creationTime, &pubkey.PublicKey)
- default:
- panic("openpgp: unknown signer type in NewSignerPrivateKey")
- }
- pk.PrivateKey = signer
- return pk
-}
-
-// NewDecrypterPrivateKey creates a PrivateKey from a *{rsa|elgamal|ecdh|x25519|x448}.PrivateKey.
-func NewDecrypterPrivateKey(creationTime time.Time, decrypter interface{}) *PrivateKey {
- pk := new(PrivateKey)
- switch priv := decrypter.(type) {
- case *rsa.PrivateKey:
- pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey)
- case *elgamal.PrivateKey:
- pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey)
- case *ecdh.PrivateKey:
- pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey)
- case *x25519.PrivateKey:
- pk.PublicKey = *NewX25519PublicKey(creationTime, &priv.PublicKey)
- case *x448.PrivateKey:
- pk.PublicKey = *NewX448PublicKey(creationTime, &priv.PublicKey)
- case *symmetric.AEADPrivateKey:
- pk.PublicKey = *NewAEADPublicKey(creationTime, &priv.PublicKey)
- default:
- panic("openpgp: unknown decrypter type in NewDecrypterPrivateKey")
- }
- pk.PrivateKey = decrypter
- return pk
-}
-
-func (pk *PrivateKey) parse(r io.Reader) (err error) {
- err = (&pk.PublicKey).parse(r)
- if err != nil {
- return
- }
- v5 := pk.PublicKey.Version == 5
- v6 := pk.PublicKey.Version == 6
-
- var buf [1]byte
- _, err = readFull(r, buf[:])
- if err != nil {
- return
- }
- pk.s2kType = S2KType(buf[0])
- var optCount [1]byte
- if v5 || (v6 && pk.s2kType != S2KNON) {
- if _, err = readFull(r, optCount[:]); err != nil {
- return
- }
- }
-
- switch pk.s2kType {
- case S2KNON:
- pk.s2k = nil
- pk.Encrypted = false
- case S2KSHA1, S2KCHECKSUM, S2KAEAD:
- if (v5 || v6) && pk.s2kType == S2KCHECKSUM {
- return errors.StructuralError(fmt.Sprintf("wrong s2k identifier for version %d", pk.Version))
- }
- _, err = readFull(r, buf[:])
- if err != nil {
- return
- }
- pk.cipher = CipherFunction(buf[0])
- if pk.cipher != 0 && !pk.cipher.IsSupported() {
- return errors.UnsupportedError("unsupported cipher function in private key")
- }
- // [Optional] If string-to-key usage octet was 253,
- // a one-octet AEAD algorithm.
- if pk.s2kType == S2KAEAD {
- _, err = readFull(r, buf[:])
- if err != nil {
- return
- }
- pk.aead = AEADMode(buf[0])
- if !pk.aead.IsSupported() {
- return errors.UnsupportedError("unsupported aead mode in private key")
- }
- }
-
- // [Optional] Only for a version 6 packet,
- // and if string-to-key usage octet was 255, 254, or 253,
- // an one-octet count of the following field.
- if v6 {
- _, err = readFull(r, buf[:])
- if err != nil {
- return
- }
- }
-
- pk.s2kParams, err = s2k.ParseIntoParams(r)
- if err != nil {
- return
- }
- if pk.s2kParams.Dummy() {
- return
- }
- pk.s2k, err = pk.s2kParams.Function()
- if err != nil {
- return
- }
- pk.Encrypted = true
- default:
- return errors.UnsupportedError("deprecated s2k function in private key")
- }
-
- if pk.Encrypted {
- var ivSize int
- // If the S2K usage octet was 253, the IV is of the size expected by the AEAD mode,
- // unless it's a version 5 key, in which case it's the size of the symmetric cipher's block size.
- // For all other S2K modes, it's always the block size.
- if !v5 && pk.s2kType == S2KAEAD {
- ivSize = pk.aead.IvLength()
- } else {
- ivSize = pk.cipher.blockSize()
- }
-
- if ivSize == 0 {
- return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher)))
- }
- pk.iv = make([]byte, ivSize)
- _, err = readFull(r, pk.iv)
- if err != nil {
- return
- }
- if v5 && pk.s2kType == S2KAEAD {
- pk.iv = pk.iv[:pk.aead.IvLength()]
- }
- }
-
- var privateKeyData []byte
- if v5 {
- var n [4]byte /* secret material four octet count */
- _, err = readFull(r, n[:])
- if err != nil {
- return
- }
- count := uint32(uint32(n[0])<<24 | uint32(n[1])<<16 | uint32(n[2])<<8 | uint32(n[3]))
- if !pk.Encrypted {
- count = count + 2 /* two octet checksum */
- }
- privateKeyData = make([]byte, count)
- _, err = readFull(r, privateKeyData)
- if err != nil {
- return
- }
- } else {
- privateKeyData, err = io.ReadAll(r)
- if err != nil {
- return
- }
- }
- if !pk.Encrypted {
- if len(privateKeyData) < 2 {
- return errors.StructuralError("truncated private key data")
- }
- if pk.Version != 6 {
- // checksum
- var sum uint16
- for i := 0; i < len(privateKeyData)-2; i++ {
- sum += uint16(privateKeyData[i])
- }
- if privateKeyData[len(privateKeyData)-2] != uint8(sum>>8) ||
- privateKeyData[len(privateKeyData)-1] != uint8(sum) {
- return errors.StructuralError("private key checksum failure")
- }
- privateKeyData = privateKeyData[:len(privateKeyData)-2]
- return pk.parsePrivateKey(privateKeyData)
- } else {
- // No checksum
- return pk.parsePrivateKey(privateKeyData)
- }
- }
-
- pk.encryptedData = privateKeyData
- return
-}
-
-// Dummy returns true if the private key is a dummy key. This is a GNU extension.
-func (pk *PrivateKey) Dummy() bool {
- return pk.s2kParams.Dummy()
-}
-
-func mod64kHash(d []byte) uint16 {
- var h uint16
- for _, b := range d {
- h += uint16(b)
- }
- return h
-}
-
-func (pk *PrivateKey) Serialize(w io.Writer) (err error) {
- contents := bytes.NewBuffer(nil)
- err = pk.PublicKey.serializeWithoutHeaders(contents)
- if err != nil {
- return
- }
- if _, err = contents.Write([]byte{uint8(pk.s2kType)}); err != nil {
- return
- }
-
- optional := bytes.NewBuffer(nil)
- if pk.Encrypted || pk.Dummy() {
- // [Optional] If string-to-key usage octet was 255, 254, or 253,
- // a one-octet symmetric encryption algorithm.
- if _, err = optional.Write([]byte{uint8(pk.cipher)}); err != nil {
- return
- }
- // [Optional] If string-to-key usage octet was 253,
- // a one-octet AEAD algorithm.
- if pk.s2kType == S2KAEAD {
- if _, err = optional.Write([]byte{uint8(pk.aead)}); err != nil {
- return
- }
- }
-
- s2kBuffer := bytes.NewBuffer(nil)
- if err := pk.s2kParams.Serialize(s2kBuffer); err != nil {
- return err
- }
- // [Optional] Only for a version 6 packet, and if string-to-key
- // usage octet was 255, 254, or 253, an one-octet
- // count of the following field.
- if pk.Version == 6 {
- if _, err = optional.Write([]byte{uint8(s2kBuffer.Len())}); err != nil {
- return
- }
- }
- // [Optional] If string-to-key usage octet was 255, 254, or 253,
- // a string-to-key (S2K) specifier. The length of the string-to-key specifier
- // depends on its type
- if _, err = io.Copy(optional, s2kBuffer); err != nil {
- return
- }
-
- // IV
- if pk.Encrypted {
- if _, err = optional.Write(pk.iv); err != nil {
- return
- }
- if pk.Version == 5 && pk.s2kType == S2KAEAD {
- // Add padding for version 5
- padding := make([]byte, pk.cipher.blockSize()-len(pk.iv))
- if _, err = optional.Write(padding); err != nil {
- return
- }
- }
- }
- }
- if pk.Version == 5 || (pk.Version == 6 && pk.s2kType != S2KNON) {
- contents.Write([]byte{uint8(optional.Len())})
- }
-
- if _, err := io.Copy(contents, optional); err != nil {
- return err
- }
-
- if !pk.Dummy() {
- l := 0
- var priv []byte
- if !pk.Encrypted {
- buf := bytes.NewBuffer(nil)
- err = pk.serializePrivateKey(buf)
- if err != nil {
- return err
- }
- l = buf.Len()
- if pk.Version != 6 {
- checksum := mod64kHash(buf.Bytes())
- buf.Write([]byte{byte(checksum >> 8), byte(checksum)})
- }
- priv = buf.Bytes()
- } else {
- priv, l = pk.encryptedData, len(pk.encryptedData)
- }
-
- if pk.Version == 5 {
- contents.Write([]byte{byte(l >> 24), byte(l >> 16), byte(l >> 8), byte(l)})
- }
- contents.Write(priv)
- }
-
- ptype := packetTypePrivateKey
- if pk.IsSubkey {
- ptype = packetTypePrivateSubkey
- }
- err = serializeHeader(w, ptype, contents.Len())
- if err != nil {
- return
- }
- _, err = io.Copy(w, contents)
- if err != nil {
- return
- }
- return
-}
-
-func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error {
- if _, err := w.Write(new(encoding.MPI).SetBig(priv.D).EncodedBytes()); err != nil {
- return err
- }
- if _, err := w.Write(new(encoding.MPI).SetBig(priv.Primes[1]).EncodedBytes()); err != nil {
- return err
- }
- if _, err := w.Write(new(encoding.MPI).SetBig(priv.Primes[0]).EncodedBytes()); err != nil {
- return err
- }
- _, err := w.Write(new(encoding.MPI).SetBig(priv.Precomputed.Qinv).EncodedBytes())
- return err
-}
-
-func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error {
- _, err := w.Write(new(encoding.MPI).SetBig(priv.X).EncodedBytes())
- return err
-}
-
-func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error {
- _, err := w.Write(new(encoding.MPI).SetBig(priv.X).EncodedBytes())
- return err
-}
-
-func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error {
- _, err := w.Write(encoding.NewMPI(priv.MarshalIntegerSecret()).EncodedBytes())
- return err
-}
-
-func serializeEdDSAPrivateKey(w io.Writer, priv *eddsa.PrivateKey) error {
- _, err := w.Write(encoding.NewMPI(priv.MarshalByteSecret()).EncodedBytes())
- return err
-}
-
-func serializeECDHPrivateKey(w io.Writer, priv *ecdh.PrivateKey) error {
- _, err := w.Write(encoding.NewMPI(priv.MarshalByteSecret()).EncodedBytes())
- return err
-}
-
-func serializeX25519PrivateKey(w io.Writer, priv *x25519.PrivateKey) error {
- _, err := w.Write(priv.Secret)
- return err
-}
-
-func serializeX448PrivateKey(w io.Writer, priv *x448.PrivateKey) error {
- _, err := w.Write(priv.Secret)
- return err
-}
-
-func serializeEd25519PrivateKey(w io.Writer, priv *ed25519.PrivateKey) error {
- _, err := w.Write(priv.MarshalByteSecret())
- return err
-}
-
-func serializeEd448PrivateKey(w io.Writer, priv *ed448.PrivateKey) error {
- _, err := w.Write(priv.MarshalByteSecret())
- return err
-}
-
-func serializeAEADPrivateKey(w io.Writer, priv *symmetric.AEADPrivateKey) (err error) {
- _, err = w.Write(priv.HashSeed[:])
- if err != nil {
- return
- }
- _, err = w.Write(priv.Key)
- return
-}
-
-func serializeHMACPrivateKey(w io.Writer, priv *symmetric.HMACPrivateKey) (err error) {
- _, err = w.Write(priv.HashSeed[:])
- if err != nil {
- return
- }
- _, err = w.Write(priv.Key)
- return
-}
-
-// decrypt decrypts an encrypted private key using a decryption key.
-func (pk *PrivateKey) decrypt(decryptionKey []byte) error {
- if pk.Dummy() {
- return errors.ErrDummyPrivateKey("dummy key found")
- }
- if !pk.Encrypted {
- return nil
- }
- block := pk.cipher.new(decryptionKey)
- var data []byte
- switch pk.s2kType {
- case S2KAEAD:
- aead := pk.aead.new(block)
- additionalData, err := pk.additionalData()
- if err != nil {
- return err
- }
- // Decrypt the encrypted key material with aead
- data, err = aead.Open(nil, pk.iv, pk.encryptedData, additionalData)
- if err != nil {
- return err
- }
- case S2KSHA1, S2KCHECKSUM:
- cfb := cipher.NewCFBDecrypter(block, pk.iv)
- data = make([]byte, len(pk.encryptedData))
- cfb.XORKeyStream(data, pk.encryptedData)
- if pk.s2kType == S2KSHA1 {
- if len(data) < sha1.Size {
- return errors.StructuralError("truncated private key data")
- }
- h := sha1.New()
- h.Write(data[:len(data)-sha1.Size])
- sum := h.Sum(nil)
- if !bytes.Equal(sum, data[len(data)-sha1.Size:]) {
- return errors.StructuralError("private key checksum failure")
- }
- data = data[:len(data)-sha1.Size]
- } else {
- if len(data) < 2 {
- return errors.StructuralError("truncated private key data")
- }
- var sum uint16
- for i := 0; i < len(data)-2; i++ {
- sum += uint16(data[i])
- }
- if data[len(data)-2] != uint8(sum>>8) ||
- data[len(data)-1] != uint8(sum) {
- return errors.StructuralError("private key checksum failure")
- }
- data = data[:len(data)-2]
- }
- default:
- return errors.InvalidArgumentError("invalid s2k type")
- }
-
- err := pk.parsePrivateKey(data)
- if _, ok := err.(errors.KeyInvalidError); ok {
- return errors.KeyInvalidError("invalid key parameters")
- }
- if err != nil {
- return err
- }
-
- // Mark key as unencrypted
- pk.s2kType = S2KNON
- pk.s2k = nil
- pk.Encrypted = false
- pk.encryptedData = nil
- return nil
-}
-
-func (pk *PrivateKey) decryptWithCache(passphrase []byte, keyCache *s2k.Cache) error {
- if pk.Dummy() {
- return errors.ErrDummyPrivateKey("dummy key found")
- }
- if !pk.Encrypted {
- return nil
- }
-
- key, err := keyCache.GetOrComputeDerivedKey(passphrase, pk.s2kParams, pk.cipher.KeySize())
- if err != nil {
- return err
- }
- if pk.s2kType == S2KAEAD {
- key = pk.applyHKDF(key)
- }
- return pk.decrypt(key)
-}
-
-// Decrypt decrypts an encrypted private key using a passphrase.
-func (pk *PrivateKey) Decrypt(passphrase []byte) error {
- if pk.Dummy() {
- return errors.ErrDummyPrivateKey("dummy key found")
- }
- if !pk.Encrypted {
- return nil
- }
-
- key := make([]byte, pk.cipher.KeySize())
- pk.s2k(key, passphrase)
- if pk.s2kType == S2KAEAD {
- key = pk.applyHKDF(key)
- }
- return pk.decrypt(key)
-}
-
-// DecryptPrivateKeys decrypts all encrypted keys with the given config and passphrase.
-// Avoids recomputation of similar s2k key derivations.
-func DecryptPrivateKeys(keys []*PrivateKey, passphrase []byte) error {
- // Create a cache to avoid recomputation of key derviations for the same passphrase.
- s2kCache := &s2k.Cache{}
- for _, key := range keys {
- if key != nil && !key.Dummy() && key.Encrypted {
- err := key.decryptWithCache(passphrase, s2kCache)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// encrypt encrypts an unencrypted private key.
-func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, s2kType S2KType, cipherFunction CipherFunction, rand io.Reader) error {
- if pk.Dummy() {
- return errors.ErrDummyPrivateKey("dummy key found")
- }
- if pk.Encrypted {
- return nil
- }
- // check if encryptionKey has the correct size
- if len(key) != cipherFunction.KeySize() {
- return errors.InvalidArgumentError("supplied encryption key has the wrong size")
- }
-
- priv := bytes.NewBuffer(nil)
- err := pk.serializePrivateKey(priv)
- if err != nil {
- return err
- }
-
- pk.cipher = cipherFunction
- pk.s2kParams = params
- pk.s2k, err = pk.s2kParams.Function()
- if err != nil {
- return err
- }
-
- privateKeyBytes := priv.Bytes()
- pk.s2kType = s2kType
- block := pk.cipher.new(key)
- switch s2kType {
- case S2KAEAD:
- if pk.aead == 0 {
- return errors.StructuralError("aead mode is not set on key")
- }
- aead := pk.aead.new(block)
- additionalData, err := pk.additionalData()
- if err != nil {
- return err
- }
- pk.iv = make([]byte, aead.NonceSize())
- _, err = io.ReadFull(rand, pk.iv)
- if err != nil {
- return err
- }
- // Decrypt the encrypted key material with aead
- pk.encryptedData = aead.Seal(nil, pk.iv, privateKeyBytes, additionalData)
- case S2KSHA1, S2KCHECKSUM:
- pk.iv = make([]byte, pk.cipher.blockSize())
- _, err = io.ReadFull(rand, pk.iv)
- if err != nil {
- return err
- }
- cfb := cipher.NewCFBEncrypter(block, pk.iv)
- if s2kType == S2KSHA1 {
- h := sha1.New()
- h.Write(privateKeyBytes)
- sum := h.Sum(nil)
- privateKeyBytes = append(privateKeyBytes, sum...)
- } else {
- var sum uint16
- for _, b := range privateKeyBytes {
- sum += uint16(b)
- }
- privateKeyBytes = append(privateKeyBytes, []byte{uint8(sum >> 8), uint8(sum)}...)
- }
- pk.encryptedData = make([]byte, len(privateKeyBytes))
- cfb.XORKeyStream(pk.encryptedData, privateKeyBytes)
- default:
- return errors.InvalidArgumentError("invalid s2k type for encryption")
- }
-
- pk.Encrypted = true
- pk.PrivateKey = nil
- return err
-}
-
-// EncryptWithConfig encrypts an unencrypted private key using the passphrase and the config.
-func (pk *PrivateKey) EncryptWithConfig(passphrase []byte, config *Config) error {
- params, err := s2k.Generate(config.Random(), config.S2K())
- if err != nil {
- return err
- }
- // Derive an encryption key with the configured s2k function.
- key := make([]byte, config.Cipher().KeySize())
- s2k, err := params.Function()
- if err != nil {
- return err
- }
- s2k(key, passphrase)
- s2kType := S2KSHA1
- if config.AEAD() != nil {
- s2kType = S2KAEAD
- pk.aead = config.AEAD().Mode()
- pk.cipher = config.Cipher()
- key = pk.applyHKDF(key)
- }
- // Encrypt the private key with the derived encryption key.
- return pk.encrypt(key, params, s2kType, config.Cipher(), config.Random())
-}
-
-// EncryptPrivateKeys encrypts all unencrypted keys with the given config and passphrase.
-// Only derives one key from the passphrase, which is then used to encrypt each key.
-func EncryptPrivateKeys(keys []*PrivateKey, passphrase []byte, config *Config) error {
- params, err := s2k.Generate(config.Random(), config.S2K())
- if err != nil {
- return err
- }
- // Derive an encryption key with the configured s2k function.
- encryptionKey := make([]byte, config.Cipher().KeySize())
- s2k, err := params.Function()
- if err != nil {
- return err
- }
- s2k(encryptionKey, passphrase)
- for _, key := range keys {
- if key != nil && !key.Dummy() && !key.Encrypted {
- s2kType := S2KSHA1
- if config.AEAD() != nil {
- s2kType = S2KAEAD
- key.aead = config.AEAD().Mode()
- key.cipher = config.Cipher()
- derivedKey := key.applyHKDF(encryptionKey)
- err = key.encrypt(derivedKey, params, s2kType, config.Cipher(), config.Random())
- } else {
- err = key.encrypt(encryptionKey, params, s2kType, config.Cipher(), config.Random())
- }
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// Encrypt encrypts an unencrypted private key using a passphrase.
-func (pk *PrivateKey) Encrypt(passphrase []byte) error {
- // Default config of private key encryption
- config := &Config{
- S2KConfig: &s2k.Config{
- S2KMode: s2k.IteratedSaltedS2K,
- S2KCount: 65536,
- Hash: crypto.SHA256,
- },
- DefaultCipher: CipherAES256,
- }
- return pk.EncryptWithConfig(passphrase, config)
-}
-
-func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) {
- switch priv := pk.PrivateKey.(type) {
- case *rsa.PrivateKey:
- err = serializeRSAPrivateKey(w, priv)
- case *dsa.PrivateKey:
- err = serializeDSAPrivateKey(w, priv)
- case *elgamal.PrivateKey:
- err = serializeElGamalPrivateKey(w, priv)
- case *ecdsa.PrivateKey:
- err = serializeECDSAPrivateKey(w, priv)
- case *eddsa.PrivateKey:
- err = serializeEdDSAPrivateKey(w, priv)
- case *ecdh.PrivateKey:
- err = serializeECDHPrivateKey(w, priv)
- case *x25519.PrivateKey:
- err = serializeX25519PrivateKey(w, priv)
- case *x448.PrivateKey:
- err = serializeX448PrivateKey(w, priv)
- case *ed25519.PrivateKey:
- err = serializeEd25519PrivateKey(w, priv)
- case *ed448.PrivateKey:
- err = serializeEd448PrivateKey(w, priv)
- case *symmetric.AEADPrivateKey:
- err = serializeAEADPrivateKey(w, priv)
- case *symmetric.HMACPrivateKey:
- err = serializeHMACPrivateKey(w, priv)
- default:
- err = errors.InvalidArgumentError("unknown private key type")
- }
- return
-}
-
-func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) {
- switch pk.PublicKey.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly:
- return pk.parseRSAPrivateKey(data)
- case PubKeyAlgoDSA:
- return pk.parseDSAPrivateKey(data)
- case PubKeyAlgoElGamal:
- return pk.parseElGamalPrivateKey(data)
- case PubKeyAlgoECDSA:
- return pk.parseECDSAPrivateKey(data)
- case PubKeyAlgoECDH:
- return pk.parseECDHPrivateKey(data)
- case PubKeyAlgoEdDSA:
- return pk.parseEdDSAPrivateKey(data)
- case PubKeyAlgoX25519:
- return pk.parseX25519PrivateKey(data)
- case PubKeyAlgoX448:
- return pk.parseX448PrivateKey(data)
- case PubKeyAlgoEd25519:
- return pk.parseEd25519PrivateKey(data)
- case PubKeyAlgoEd448:
- return pk.parseEd448PrivateKey(data)
- default:
- err = errors.StructuralError("unknown private key type")
- return
- case ExperimentalPubKeyAlgoAEAD:
- return pk.parseAEADPrivateKey(data)
- case ExperimentalPubKeyAlgoHMAC:
- return pk.parseHMACPrivateKey(data)
- }
-}
-
-func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) {
- rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey)
- rsaPriv := new(rsa.PrivateKey)
- rsaPriv.PublicKey = *rsaPub
-
- buf := bytes.NewBuffer(data)
- d := new(encoding.MPI)
- if _, err := d.ReadFrom(buf); err != nil {
- return err
- }
-
- p := new(encoding.MPI)
- if _, err := p.ReadFrom(buf); err != nil {
- return err
- }
-
- q := new(encoding.MPI)
- if _, err := q.ReadFrom(buf); err != nil {
- return err
- }
-
- rsaPriv.D = new(big.Int).SetBytes(d.Bytes())
- rsaPriv.Primes = make([]*big.Int, 2)
- rsaPriv.Primes[0] = new(big.Int).SetBytes(p.Bytes())
- rsaPriv.Primes[1] = new(big.Int).SetBytes(q.Bytes())
- if err := rsaPriv.Validate(); err != nil {
- return errors.KeyInvalidError(err.Error())
- }
- rsaPriv.Precompute()
- pk.PrivateKey = rsaPriv
-
- return nil
-}
-
-func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) {
- dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey)
- dsaPriv := new(dsa.PrivateKey)
- dsaPriv.PublicKey = *dsaPub
-
- buf := bytes.NewBuffer(data)
- x := new(encoding.MPI)
- if _, err := x.ReadFrom(buf); err != nil {
- return err
- }
-
- dsaPriv.X = new(big.Int).SetBytes(x.Bytes())
- if err := validateDSAParameters(dsaPriv); err != nil {
- return err
- }
- pk.PrivateKey = dsaPriv
-
- return nil
-}
-
-func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) {
- pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey)
- priv := new(elgamal.PrivateKey)
- priv.PublicKey = *pub
-
- buf := bytes.NewBuffer(data)
- x := new(encoding.MPI)
- if _, err := x.ReadFrom(buf); err != nil {
- return err
- }
-
- priv.X = new(big.Int).SetBytes(x.Bytes())
- if err := validateElGamalParameters(priv); err != nil {
- return err
- }
- pk.PrivateKey = priv
-
- return nil
-}
-
-func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) {
- ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey)
- ecdsaPriv := ecdsa.NewPrivateKey(*ecdsaPub)
-
- buf := bytes.NewBuffer(data)
- d := new(encoding.MPI)
- if _, err := d.ReadFrom(buf); err != nil {
- return err
- }
-
- if err := ecdsaPriv.UnmarshalIntegerSecret(d.Bytes()); err != nil {
- return err
- }
- if err := ecdsa.Validate(ecdsaPriv); err != nil {
- return err
- }
- pk.PrivateKey = ecdsaPriv
-
- return nil
-}
-
-func (pk *PrivateKey) parseECDHPrivateKey(data []byte) (err error) {
- ecdhPub := pk.PublicKey.PublicKey.(*ecdh.PublicKey)
- ecdhPriv := ecdh.NewPrivateKey(*ecdhPub)
-
- buf := bytes.NewBuffer(data)
- d := new(encoding.MPI)
- if _, err := d.ReadFrom(buf); err != nil {
- return err
- }
-
- if err := ecdhPriv.UnmarshalByteSecret(d.Bytes()); err != nil {
- return err
- }
-
- if err := ecdh.Validate(ecdhPriv); err != nil {
- return err
- }
-
- pk.PrivateKey = ecdhPriv
-
- return nil
-}
-
-func (pk *PrivateKey) parseX25519PrivateKey(data []byte) (err error) {
- publicKey := pk.PublicKey.PublicKey.(*x25519.PublicKey)
- privateKey := x25519.NewPrivateKey(*publicKey)
- privateKey.PublicKey = *publicKey
-
- privateKey.Secret = make([]byte, x25519.KeySize)
-
- if len(data) != x25519.KeySize {
- err = errors.StructuralError("wrong x25519 key size")
- return err
- }
- subtle.ConstantTimeCopy(1, privateKey.Secret, data)
- if err = x25519.Validate(privateKey); err != nil {
- return err
- }
- pk.PrivateKey = privateKey
- return nil
-}
-
-func (pk *PrivateKey) parseX448PrivateKey(data []byte) (err error) {
- publicKey := pk.PublicKey.PublicKey.(*x448.PublicKey)
- privateKey := x448.NewPrivateKey(*publicKey)
- privateKey.PublicKey = *publicKey
-
- privateKey.Secret = make([]byte, x448.KeySize)
-
- if len(data) != x448.KeySize {
- err = errors.StructuralError("wrong x448 key size")
- return err
- }
- subtle.ConstantTimeCopy(1, privateKey.Secret, data)
- if err = x448.Validate(privateKey); err != nil {
- return err
- }
- pk.PrivateKey = privateKey
- return nil
-}
-
-func (pk *PrivateKey) parseEd25519PrivateKey(data []byte) (err error) {
- publicKey := pk.PublicKey.PublicKey.(*ed25519.PublicKey)
- privateKey := ed25519.NewPrivateKey(*publicKey)
- privateKey.PublicKey = *publicKey
-
- if len(data) != ed25519.SeedSize {
- err = errors.StructuralError("wrong ed25519 key size")
- return err
- }
- err = privateKey.UnmarshalByteSecret(data)
- if err != nil {
- return err
- }
- err = ed25519.Validate(privateKey)
- if err != nil {
- return err
- }
- pk.PrivateKey = privateKey
- return nil
-}
-
-func (pk *PrivateKey) parseEd448PrivateKey(data []byte) (err error) {
- publicKey := pk.PublicKey.PublicKey.(*ed448.PublicKey)
- privateKey := ed448.NewPrivateKey(*publicKey)
- privateKey.PublicKey = *publicKey
-
- if len(data) != ed448.SeedSize {
- err = errors.StructuralError("wrong ed448 key size")
- return err
- }
- err = privateKey.UnmarshalByteSecret(data)
- if err != nil {
- return err
- }
- err = ed448.Validate(privateKey)
- if err != nil {
- return err
- }
- pk.PrivateKey = privateKey
- return nil
-}
-
-func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) {
- eddsaPub := pk.PublicKey.PublicKey.(*eddsa.PublicKey)
- eddsaPriv := eddsa.NewPrivateKey(*eddsaPub)
- eddsaPriv.PublicKey = *eddsaPub
-
- buf := bytes.NewBuffer(data)
- d := new(encoding.MPI)
- if _, err := d.ReadFrom(buf); err != nil {
- return err
- }
-
- if err = eddsaPriv.UnmarshalByteSecret(d.Bytes()); err != nil {
- return err
- }
-
- if err := eddsa.Validate(eddsaPriv); err != nil {
- return err
- }
-
- pk.PrivateKey = eddsaPriv
-
- return nil
-}
-
-func (pk *PrivateKey) additionalData() ([]byte, error) {
- additionalData := bytes.NewBuffer(nil)
- // Write additional data prefix based on packet type
- var packetByte byte
- if pk.PublicKey.IsSubkey {
- packetByte = 0xc7
- } else {
- packetByte = 0xc5
- }
- // Write public key to additional data
- _, err := additionalData.Write([]byte{packetByte})
- if err != nil {
- return nil, err
- }
- err = pk.PublicKey.serializeWithoutHeaders(additionalData)
- if err != nil {
- return nil, err
- }
- return additionalData.Bytes(), nil
-}
-
-func (pk *PrivateKey) applyHKDF(inputKey []byte) []byte {
- var packetByte byte
- if pk.PublicKey.IsSubkey {
- packetByte = 0xc7
- } else {
- packetByte = 0xc5
- }
- associatedData := []byte{packetByte, byte(pk.Version), byte(pk.cipher), byte(pk.aead)}
- hkdfReader := hkdf.New(sha256.New, inputKey, []byte{}, associatedData)
- encryptionKey := make([]byte, pk.cipher.KeySize())
- _, _ = readFull(hkdfReader, encryptionKey)
- return encryptionKey
-}
-
-func (pk *PrivateKey) parseAEADPrivateKey(data []byte) (err error) {
- pubKey := pk.PublicKey.PublicKey.(*symmetric.AEADPublicKey)
-
- aeadPriv := new(symmetric.AEADPrivateKey)
- aeadPriv.PublicKey = *pubKey
-
- copy(aeadPriv.HashSeed[:], data[:32])
-
- priv := make([]byte, pubKey.Cipher.KeySize())
- copy(priv, data[32:])
- aeadPriv.Key = priv
- aeadPriv.PublicKey.Key = aeadPriv.Key
-
- if err = validateAEADParameters(aeadPriv); err != nil {
- return
- }
-
- pk.PrivateKey = aeadPriv
- pk.PublicKey.PublicKey = &aeadPriv.PublicKey
- return
-}
-
-func (pk *PrivateKey) parseHMACPrivateKey(data []byte) (err error) {
- pubKey := pk.PublicKey.PublicKey.(*symmetric.HMACPublicKey)
-
- hmacPriv := new(symmetric.HMACPrivateKey)
- hmacPriv.PublicKey = *pubKey
-
- copy(hmacPriv.HashSeed[:], data[:32])
-
- priv := make([]byte, pubKey.Hash.Size())
- copy(priv, data[32:])
- hmacPriv.Key = data[32:]
- hmacPriv.PublicKey.Key = hmacPriv.Key
-
- if err = validateHMACParameters(hmacPriv); err != nil {
- return
- }
-
- pk.PrivateKey = hmacPriv
- pk.PublicKey.PublicKey = &hmacPriv.PublicKey
- return
-}
-
-func validateAEADParameters(priv *symmetric.AEADPrivateKey) error {
- return validateCommonSymmetric(priv.HashSeed, priv.PublicKey.BindingHash)
-}
-
-func validateHMACParameters(priv *symmetric.HMACPrivateKey) error {
- return validateCommonSymmetric(priv.HashSeed, priv.PublicKey.BindingHash)
-}
-
-func validateCommonSymmetric(seed [32]byte, bindingHash [32]byte) error {
- expectedBindingHash := symmetric.ComputeBindingHash(seed)
- if !bytes.Equal(expectedBindingHash, bindingHash[:]) {
- return errors.KeyInvalidError("symmetric: wrong binding hash")
- }
- return nil
-}
-
-func validateDSAParameters(priv *dsa.PrivateKey) error {
- p := priv.P // group prime
- q := priv.Q // subgroup order
- g := priv.G // g has order q mod p
- x := priv.X // secret
- y := priv.Y // y == g**x mod p
- one := big.NewInt(1)
- // expect g, y >= 2 and g < p
- if g.Cmp(one) <= 0 || y.Cmp(one) <= 0 || g.Cmp(p) > 0 {
- return errors.KeyInvalidError("dsa: invalid group")
- }
- // expect p > q
- if p.Cmp(q) <= 0 {
- return errors.KeyInvalidError("dsa: invalid group prime")
- }
- // q should be large enough and divide p-1
- pSub1 := new(big.Int).Sub(p, one)
- if q.BitLen() < 150 || new(big.Int).Mod(pSub1, q).Cmp(big.NewInt(0)) != 0 {
- return errors.KeyInvalidError("dsa: invalid order")
- }
- // confirm that g has order q mod p
- if !q.ProbablyPrime(32) || new(big.Int).Exp(g, q, p).Cmp(one) != 0 {
- return errors.KeyInvalidError("dsa: invalid order")
- }
- // check y
- if new(big.Int).Exp(g, x, p).Cmp(y) != 0 {
- return errors.KeyInvalidError("dsa: mismatching values")
- }
-
- return nil
-}
-
-func validateElGamalParameters(priv *elgamal.PrivateKey) error {
- p := priv.P // group prime
- g := priv.G // g has order p-1 mod p
- x := priv.X // secret
- y := priv.Y // y == g**x mod p
- one := big.NewInt(1)
- // Expect g, y >= 2 and g < p
- if g.Cmp(one) <= 0 || y.Cmp(one) <= 0 || g.Cmp(p) > 0 {
- return errors.KeyInvalidError("elgamal: invalid group")
- }
- if p.BitLen() < 1024 {
- return errors.KeyInvalidError("elgamal: group order too small")
- }
- pSub1 := new(big.Int).Sub(p, one)
- if new(big.Int).Exp(g, pSub1, p).Cmp(one) != 0 {
- return errors.KeyInvalidError("elgamal: invalid group")
- }
- // Since p-1 is not prime, g might have a smaller order that divides p-1.
- // We cannot confirm the exact order of g, but we make sure it is not too small.
- gExpI := new(big.Int).Set(g)
- i := 1
- threshold := 2 << 17 // we want order > threshold
- for i < threshold {
- i++ // we check every order to make sure key validation is not easily bypassed by guessing y'
- gExpI.Mod(new(big.Int).Mul(gExpI, g), p)
- if gExpI.Cmp(one) == 0 {
- return errors.KeyInvalidError("elgamal: order too small")
- }
- }
- // Check y
- if new(big.Int).Exp(g, x, p).Cmp(y) != 0 {
- return errors.KeyInvalidError("elgamal: mismatching values")
- }
-
- return nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go
deleted file mode 100644
index 029b8f1a..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package packet
-
-// Generated with `gpg --export-secret-keys "Test Key 2"`
-const privKeyRSAHex = "9501fe044cc349a8010400b70ca0010e98c090008d45d1ee8f9113bd5861fd57b88bacb7c68658747663f1e1a3b5a98f32fda6472373c024b97359cd2efc88ff60f77751adfbf6af5e615e6a1408cfad8bf0cea30b0d5f53aa27ad59089ba9b15b7ebc2777a25d7b436144027e3bcd203909f147d0e332b240cf63d3395f5dfe0df0a6c04e8655af7eacdf0011010001fe0303024a252e7d475fd445607de39a265472aa74a9320ba2dac395faa687e9e0336aeb7e9a7397e511b5afd9dc84557c80ac0f3d4d7bfec5ae16f20d41c8c84a04552a33870b930420e230e179564f6d19bb153145e76c33ae993886c388832b0fa042ddda7f133924f3854481533e0ede31d51278c0519b29abc3bf53da673e13e3e1214b52413d179d7f66deee35cac8eacb060f78379d70ef4af8607e68131ff529439668fc39c9ce6dfef8a5ac234d234802cbfb749a26107db26406213ae5c06d4673253a3cbee1fcbae58d6ab77e38d6e2c0e7c6317c48e054edadb5a40d0d48acb44643d998139a8a66bb820be1f3f80185bc777d14b5954b60effe2448a036d565c6bc0b915fcea518acdd20ab07bc1529f561c58cd044f723109b93f6fd99f876ff891d64306b5d08f48bab59f38695e9109c4dec34013ba3153488ce070268381ba923ee1eb77125b36afcb4347ec3478c8f2735b06ef17351d872e577fa95d0c397c88c71b59629a36aec"
-
-// Generated by `gpg --export-secret-keys` followed by a manual extraction of
-// the ElGamal subkey from the packets.
-const privKeyElGamalHex = "9d0157044df9ee1a100400eb8e136a58ec39b582629cdadf830bc64e0a94ed8103ca8bb247b27b11b46d1d25297ef4bcc3071785ba0c0bedfe89eabc5287fcc0edf81ab5896c1c8e4b20d27d79813c7aede75320b33eaeeaa586edc00fd1036c10133e6ba0ff277245d0d59d04b2b3421b7244aca5f4a8d870c6f1c1fbff9e1c26699a860b9504f35ca1d700030503fd1ededd3b840795be6d9ccbe3c51ee42e2f39233c432b831ddd9c4e72b7025a819317e47bf94f9ee316d7273b05d5fcf2999c3a681f519b1234bbfa6d359b4752bd9c3f77d6b6456cde152464763414ca130f4e91d91041432f90620fec0e6d6b5116076c2985d5aeaae13be492b9b329efcaf7ee25120159a0a30cd976b42d7afe030302dae7eb80db744d4960c4df930d57e87fe81412eaace9f900e6c839817a614ddb75ba6603b9417c33ea7b6c93967dfa2bcff3fa3c74a5ce2c962db65b03aece14c96cbd0038fc"
-
-// pkcs1PrivKeyHex is a PKCS#1, RSA private key.
-// Generated by `openssl genrsa 1024 | openssl rsa -outform DER | xxd -p`
-const pkcs1PrivKeyHex = "3082025d02010002818100e98edfa1c3b35884a54d0b36a6a603b0290fa85e49e30fa23fc94fef9c6790bc4849928607aa48d809da326fb42a969d06ad756b98b9c1a90f5d4a2b6d0ac05953c97f4da3120164a21a679793ce181c906dc01d235cc085ddcdf6ea06c389b6ab8885dfd685959e693138856a68a7e5db263337ff82a088d583a897cf2d59e9020301000102818100b6d5c9eb70b02d5369b3ee5b520a14490b5bde8a317d36f7e4c74b7460141311d1e5067735f8f01d6f5908b2b96fbd881f7a1ab9a84d82753e39e19e2d36856be960d05ac9ef8e8782ea1b6d65aee28fdfe1d61451e8cff0adfe84322f12cf455028b581cf60eb9e0e140ba5d21aeba6c2634d7c65318b9a665fc01c3191ca21024100fa5e818da3705b0fa33278bb28d4b6f6050388af2d4b75ec9375dd91ccf2e7d7068086a8b82a8f6282e4fbbdb8a7f2622eb97295249d87acea7f5f816f54d347024100eecf9406d7dc49cdfb95ab1eff4064de84c7a30f64b2798936a0d2018ba9eb52e4b636f82e96c49cc63b80b675e91e40d1b2e4017d4b9adaf33ab3d9cf1c214f024100c173704ace742c082323066226a4655226819a85304c542b9dacbeacbf5d1881ee863485fcf6f59f3a604f9b42289282067447f2b13dfeed3eab7851fc81e0550240741fc41f3fc002b382eed8730e33c5d8de40256e4accee846667f536832f711ab1d4590e7db91a8a116ac5bff3be13d3f9243ff2e976662aa9b395d907f8e9c9024046a5696c9ef882363e06c9fa4e2f5b580906452befba03f4a99d0f873697ef1f851d2226ca7934b30b7c3e80cb634a67172bbbf4781735fe3e09263e2dd723e7"
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go
deleted file mode 100644
index d14dd662..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go
+++ /dev/null
@@ -1,1192 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "crypto/dsa"
- "crypto/rsa"
- "crypto/sha1"
- "crypto/sha256"
- _ "crypto/sha512"
- "encoding/binary"
- goerrors "errors"
- "fmt"
- "hash"
- "io"
- "math/big"
- "strconv"
- "time"
-
- "github.com/ProtonMail/go-crypto/openpgp/ecdh"
- "github.com/ProtonMail/go-crypto/openpgp/ecdsa"
- "github.com/ProtonMail/go-crypto/openpgp/ed25519"
- "github.com/ProtonMail/go-crypto/openpgp/ed448"
- "github.com/ProtonMail/go-crypto/openpgp/eddsa"
- "github.com/ProtonMail/go-crypto/openpgp/elgamal"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
- "github.com/ProtonMail/go-crypto/openpgp/internal/ecc"
- "github.com/ProtonMail/go-crypto/openpgp/internal/encoding"
- "github.com/ProtonMail/go-crypto/openpgp/symmetric"
- "github.com/ProtonMail/go-crypto/openpgp/x25519"
- "github.com/ProtonMail/go-crypto/openpgp/x448"
-)
-
-// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2.
-type PublicKey struct {
- Version int
- CreationTime time.Time
- PubKeyAlgo PublicKeyAlgorithm
- PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey or *eddsa.PublicKey, *x25519.PublicKey, *x448.PublicKey, *ed25519.PublicKey, *ed448.PublicKey
- Fingerprint []byte
- KeyId uint64
- IsSubkey bool
-
- // RFC 4880 fields
- n, e, p, q, g, y encoding.Field
-
- // RFC 6637 fields
- // oid contains the OID byte sequence identifying the elliptic curve used
- oid encoding.Field
-
- // kdf stores key derivation function parameters
- // used for ECDH encryption. See RFC 6637, Section 9.
- kdf encoding.Field
-}
-
-// UpgradeToV5 updates the version of the key to v5, and updates all necessary
-// fields.
-func (pk *PublicKey) UpgradeToV5() {
- pk.Version = 5
- pk.setFingerprintAndKeyId()
-}
-
-// UpgradeToV6 updates the version of the key to v6, and updates all necessary
-// fields.
-func (pk *PublicKey) UpgradeToV6() {
- pk.Version = 6
- pk.setFingerprintAndKeyId()
-}
-
-// ReplaceKDF replaces the KDF instance, and updates all necessary fields.
-func (pk *PublicKey) ReplaceKDF(kdf ecdh.KDF) error {
- ecdhKey, ok := pk.PublicKey.(*ecdh.PublicKey)
- if !ok {
- return goerrors.New("wrong forwarding sub key generation")
- }
-
- ecdhKey.KDF = kdf
- byteBuffer := new(bytes.Buffer)
- err := kdf.Serialize(byteBuffer)
- if err != nil {
- return err
- }
-
- pk.kdf = encoding.NewOID(byteBuffer.Bytes()[1:])
- pk.setFingerprintAndKeyId()
-
- return nil
-}
-
-// signingKey provides a convenient abstraction over signature verification
-// for v3 and v4 public keys.
-type signingKey interface {
- SerializeForHash(io.Writer) error
- SerializeSignaturePrefix(io.Writer) error
- serializeWithoutHeaders(io.Writer) error
-}
-
-// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey.
-func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey {
- pk := &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoRSA,
- PublicKey: pub,
- n: new(encoding.MPI).SetBig(pub.N),
- e: new(encoding.MPI).SetBig(big.NewInt(int64(pub.E))),
- }
-
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey.
-func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey {
- pk := &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoDSA,
- PublicKey: pub,
- p: new(encoding.MPI).SetBig(pub.P),
- q: new(encoding.MPI).SetBig(pub.Q),
- g: new(encoding.MPI).SetBig(pub.G),
- y: new(encoding.MPI).SetBig(pub.Y),
- }
-
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey.
-func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey {
- pk := &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoElGamal,
- PublicKey: pub,
- p: new(encoding.MPI).SetBig(pub.P),
- g: new(encoding.MPI).SetBig(pub.G),
- y: new(encoding.MPI).SetBig(pub.Y),
- }
-
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey {
- pk := &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoECDSA,
- PublicKey: pub,
- p: encoding.NewMPI(pub.MarshalPoint()),
- }
-
- curveInfo := ecc.FindByCurve(pub.GetCurve())
- if curveInfo == nil {
- panic("unknown elliptic curve")
- }
- pk.oid = curveInfo.Oid
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-func NewECDHPublicKey(creationTime time.Time, pub *ecdh.PublicKey) *PublicKey {
- var pk *PublicKey
- var kdf = encoding.NewOID([]byte{0x1, pub.Hash.Id(), pub.Cipher.Id()})
- pk = &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoECDH,
- PublicKey: pub,
- p: encoding.NewMPI(pub.MarshalPoint()),
- kdf: kdf,
- }
-
- curveInfo := ecc.FindByCurve(pub.GetCurve())
-
- if curveInfo == nil {
- panic("unknown elliptic curve")
- }
-
- pk.oid = curveInfo.Oid
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-func NewEdDSAPublicKey(creationTime time.Time, pub *eddsa.PublicKey) *PublicKey {
- curveInfo := ecc.FindByCurve(pub.GetCurve())
- pk := &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoEdDSA,
- PublicKey: pub,
- oid: curveInfo.Oid,
- // Native point format, see draft-koch-eddsa-for-openpgp-04, Appendix B
- p: encoding.NewMPI(pub.MarshalPoint()),
- }
-
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-func NewX25519PublicKey(creationTime time.Time, pub *x25519.PublicKey) *PublicKey {
- pk := &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoX25519,
- PublicKey: pub,
- }
-
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-func NewX448PublicKey(creationTime time.Time, pub *x448.PublicKey) *PublicKey {
- pk := &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoX448,
- PublicKey: pub,
- }
-
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-func NewEd25519PublicKey(creationTime time.Time, pub *ed25519.PublicKey) *PublicKey {
- pk := &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoEd25519,
- PublicKey: pub,
- }
-
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-func NewEd448PublicKey(creationTime time.Time, pub *ed448.PublicKey) *PublicKey {
- pk := &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: PubKeyAlgoEd448,
- PublicKey: pub,
- }
-
- pk.setFingerprintAndKeyId()
- return pk
-}
-
-func NewAEADPublicKey(creationTime time.Time, pub *symmetric.AEADPublicKey) *PublicKey {
- var pk *PublicKey
- pk = &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: ExperimentalPubKeyAlgoAEAD,
- PublicKey: pub,
- }
-
- return pk
-}
-
-func NewHMACPublicKey(creationTime time.Time, pub *symmetric.HMACPublicKey) *PublicKey {
- var pk *PublicKey
- pk = &PublicKey{
- Version: 4,
- CreationTime: creationTime,
- PubKeyAlgo: ExperimentalPubKeyAlgoHMAC,
- PublicKey: pub,
- }
-
- return pk
-}
-
-func (pk *PublicKey) parse(r io.Reader) (err error) {
- // RFC 4880, section 5.5.2
- var buf [6]byte
- _, err = readFull(r, buf[:])
- if err != nil {
- return
- }
- if buf[0] != 4 && buf[0] != 5 && buf[0] != 6 {
- return errors.UnsupportedError("public key version " + strconv.Itoa(int(buf[0])))
- }
-
- pk.Version = int(buf[0])
- if pk.Version >= 5 {
- // Read the four-octet scalar octet count
- // The count is not used in this implementation
- var n [4]byte
- _, err = readFull(r, n[:])
- if err != nil {
- return
- }
- }
- pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0)
- pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5])
- // Ignore four-ocet length
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
- err = pk.parseRSA(r)
- case PubKeyAlgoDSA:
- err = pk.parseDSA(r)
- case PubKeyAlgoElGamal:
- err = pk.parseElGamal(r)
- case PubKeyAlgoECDSA:
- err = pk.parseECDSA(r)
- case PubKeyAlgoECDH:
- err = pk.parseECDH(r)
- case PubKeyAlgoEdDSA:
- err = pk.parseEdDSA(r)
- case PubKeyAlgoX25519:
- err = pk.parseX25519(r)
- case PubKeyAlgoX448:
- err = pk.parseX448(r)
- case PubKeyAlgoEd25519:
- err = pk.parseEd25519(r)
- case PubKeyAlgoEd448:
- err = pk.parseEd448(r)
- case ExperimentalPubKeyAlgoAEAD:
- err = pk.parseAEAD(r)
- case ExperimentalPubKeyAlgoHMAC:
- err = pk.parseHMAC(r)
- default:
- err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo)))
- }
- if err != nil {
- return
- }
-
- pk.setFingerprintAndKeyId()
- return
-}
-
-func (pk *PublicKey) setFingerprintAndKeyId() {
- // RFC 4880, section 12.2
- if pk.Version >= 5 {
- fingerprint := sha256.New()
- if err := pk.SerializeForHash(fingerprint); err != nil {
- // Should not happen for a hash.
- panic(err)
- }
- pk.Fingerprint = make([]byte, 32)
- copy(pk.Fingerprint, fingerprint.Sum(nil))
- pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[:8])
- } else {
- fingerprint := sha1.New()
- if err := pk.SerializeForHash(fingerprint); err != nil {
- // Should not happen for a hash.
- panic(err)
- }
- pk.Fingerprint = make([]byte, 20)
- copy(pk.Fingerprint, fingerprint.Sum(nil))
- pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20])
- }
-}
-
-// parseRSA parses RSA public key material from the given Reader. See RFC 4880,
-// section 5.5.2.
-func (pk *PublicKey) parseRSA(r io.Reader) (err error) {
- pk.n = new(encoding.MPI)
- if _, err = pk.n.ReadFrom(r); err != nil {
- return
- }
- pk.e = new(encoding.MPI)
- if _, err = pk.e.ReadFrom(r); err != nil {
- return
- }
-
- if len(pk.e.Bytes()) > 3 {
- err = errors.UnsupportedError("large public exponent")
- return
- }
- rsa := &rsa.PublicKey{
- N: new(big.Int).SetBytes(pk.n.Bytes()),
- E: 0,
- }
- for i := 0; i < len(pk.e.Bytes()); i++ {
- rsa.E <<= 8
- rsa.E |= int(pk.e.Bytes()[i])
- }
- pk.PublicKey = rsa
- return
-}
-
-// parseDSA parses DSA public key material from the given Reader. See RFC 4880,
-// section 5.5.2.
-func (pk *PublicKey) parseDSA(r io.Reader) (err error) {
- pk.p = new(encoding.MPI)
- if _, err = pk.p.ReadFrom(r); err != nil {
- return
- }
- pk.q = new(encoding.MPI)
- if _, err = pk.q.ReadFrom(r); err != nil {
- return
- }
- pk.g = new(encoding.MPI)
- if _, err = pk.g.ReadFrom(r); err != nil {
- return
- }
- pk.y = new(encoding.MPI)
- if _, err = pk.y.ReadFrom(r); err != nil {
- return
- }
-
- dsa := new(dsa.PublicKey)
- dsa.P = new(big.Int).SetBytes(pk.p.Bytes())
- dsa.Q = new(big.Int).SetBytes(pk.q.Bytes())
- dsa.G = new(big.Int).SetBytes(pk.g.Bytes())
- dsa.Y = new(big.Int).SetBytes(pk.y.Bytes())
- pk.PublicKey = dsa
- return
-}
-
-// parseElGamal parses ElGamal public key material from the given Reader. See
-// RFC 4880, section 5.5.2.
-func (pk *PublicKey) parseElGamal(r io.Reader) (err error) {
- pk.p = new(encoding.MPI)
- if _, err = pk.p.ReadFrom(r); err != nil {
- return
- }
- pk.g = new(encoding.MPI)
- if _, err = pk.g.ReadFrom(r); err != nil {
- return
- }
- pk.y = new(encoding.MPI)
- if _, err = pk.y.ReadFrom(r); err != nil {
- return
- }
-
- elgamal := new(elgamal.PublicKey)
- elgamal.P = new(big.Int).SetBytes(pk.p.Bytes())
- elgamal.G = new(big.Int).SetBytes(pk.g.Bytes())
- elgamal.Y = new(big.Int).SetBytes(pk.y.Bytes())
- pk.PublicKey = elgamal
- return
-}
-
-// parseECDSA parses ECDSA public key material from the given Reader. See
-// RFC 6637, Section 9.
-func (pk *PublicKey) parseECDSA(r io.Reader) (err error) {
- pk.oid = new(encoding.OID)
- if _, err = pk.oid.ReadFrom(r); err != nil {
- return
- }
-
- curveInfo := ecc.FindByOid(pk.oid)
- if curveInfo == nil {
- return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid))
- }
-
- pk.p = new(encoding.MPI)
- if _, err = pk.p.ReadFrom(r); err != nil {
- return
- }
-
- c, ok := curveInfo.Curve.(ecc.ECDSACurve)
- if !ok {
- return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid))
- }
-
- ecdsaKey := ecdsa.NewPublicKey(c)
- err = ecdsaKey.UnmarshalPoint(pk.p.Bytes())
- pk.PublicKey = ecdsaKey
-
- return
-}
-
-// parseECDH parses ECDH public key material from the given Reader. See
-// RFC 6637, Section 9.
-func (pk *PublicKey) parseECDH(r io.Reader) (err error) {
- pk.oid = new(encoding.OID)
- if _, err = pk.oid.ReadFrom(r); err != nil {
- return
- }
-
- curveInfo := ecc.FindByOid(pk.oid)
- if curveInfo == nil {
- return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid))
- }
-
- pk.p = new(encoding.MPI)
- if _, err = pk.p.ReadFrom(r); err != nil {
- return
- }
- pk.kdf = new(encoding.OID)
- if _, err = pk.kdf.ReadFrom(r); err != nil {
- return
- }
-
- c, ok := curveInfo.Curve.(ecc.ECDHCurve)
- if !ok {
- return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid))
- }
-
- kdfLen := len(pk.kdf.Bytes())
- if kdfLen < 3 {
- return errors.UnsupportedError("unsupported ECDH KDF length: " + strconv.Itoa(kdfLen))
- }
- kdfVersion := int(pk.kdf.Bytes()[0])
- if kdfVersion != ecdh.KDFVersion1 && kdfVersion != ecdh.KDFVersionForwarding {
- return errors.UnsupportedError("unsupported ECDH KDF version: " + strconv.Itoa(kdfVersion))
- }
- kdfHash, ok := algorithm.HashById[pk.kdf.Bytes()[1]]
- if !ok {
- return errors.UnsupportedError("unsupported ECDH KDF hash: " + strconv.Itoa(int(pk.kdf.Bytes()[1])))
- }
- kdfCipher, ok := algorithm.CipherById[pk.kdf.Bytes()[2]]
- if !ok {
- return errors.UnsupportedError("unsupported ECDH KDF cipher: " + strconv.Itoa(int(pk.kdf.Bytes()[2])))
- }
-
- kdf := ecdh.KDF{
- Version: kdfVersion,
- Hash: kdfHash,
- Cipher: kdfCipher,
- }
-
- if kdfVersion == ecdh.KDFVersionForwarding {
- if pk.Version != 4 || kdfLen != 23 {
- return errors.UnsupportedError("unsupported ECDH KDF v2 length: " + strconv.Itoa(kdfLen))
- }
-
- kdf.ReplacementFingerprint = pk.kdf.Bytes()[3:23]
- }
-
- ecdhKey := ecdh.NewPublicKey(c, kdf)
- err = ecdhKey.UnmarshalPoint(pk.p.Bytes())
- pk.PublicKey = ecdhKey
- return
-}
-
-func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) {
- pk.oid = new(encoding.OID)
- if _, err = pk.oid.ReadFrom(r); err != nil {
- return
- }
-
- curveInfo := ecc.FindByOid(pk.oid)
- if curveInfo == nil {
- return errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid))
- }
-
- c, ok := curveInfo.Curve.(ecc.EdDSACurve)
- if !ok {
- return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid))
- }
-
- pk.p = new(encoding.MPI)
- if _, err = pk.p.ReadFrom(r); err != nil {
- return
- }
-
- if len(pk.p.Bytes()) == 0 {
- return errors.StructuralError("empty EdDSA public key")
- }
-
- pub := eddsa.NewPublicKey(c)
-
- switch flag := pk.p.Bytes()[0]; flag {
- case 0x04:
- // TODO: see _grcy_ecc_eddsa_ensure_compact in grcypt
- return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag)))
- case 0x40:
- err = pub.UnmarshalPoint(pk.p.Bytes())
- default:
- return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag)))
- }
-
- pk.PublicKey = pub
- return
-}
-
-func (pk *PublicKey) parseX25519(r io.Reader) (err error) {
- point := make([]byte, x25519.KeySize)
- _, err = io.ReadFull(r, point)
- if err != nil {
- return
- }
- pub := &x25519.PublicKey{
- Point: point,
- }
- pk.PublicKey = pub
- return
-}
-
-func (pk *PublicKey) parseX448(r io.Reader) (err error) {
- point := make([]byte, x448.KeySize)
- _, err = io.ReadFull(r, point)
- if err != nil {
- return
- }
- pub := &x448.PublicKey{
- Point: point,
- }
- pk.PublicKey = pub
- return
-}
-
-func (pk *PublicKey) parseEd25519(r io.Reader) (err error) {
- point := make([]byte, ed25519.PublicKeySize)
- _, err = io.ReadFull(r, point)
- if err != nil {
- return
- }
- pub := &ed25519.PublicKey{
- Point: point,
- }
- pk.PublicKey = pub
- return
-}
-
-func (pk *PublicKey) parseEd448(r io.Reader) (err error) {
- point := make([]byte, ed448.PublicKeySize)
- _, err = io.ReadFull(r, point)
- if err != nil {
- return
- }
- pub := &ed448.PublicKey{
- Point: point,
- }
- pk.PublicKey = pub
- return
-}
-
-func (pk *PublicKey) parseAEAD(r io.Reader) (err error) {
- var cipher [1]byte
- _, err = readFull(r, cipher[:])
- if err != nil {
- return
- }
-
- var bindingHash [32]byte
- _, err = readFull(r, bindingHash[:])
- if err != nil {
- return
- }
-
- symmetric := &symmetric.AEADPublicKey{
- Cipher: algorithm.CipherFunction(cipher[0]),
- BindingHash: bindingHash,
- }
-
- pk.PublicKey = symmetric
- return
-}
-
-func (pk *PublicKey) parseHMAC(r io.Reader) (err error) {
- var hash [1]byte
- _, err = readFull(r, hash[:])
- if err != nil {
- return
- }
- bindingHash, err := readBindingHash(r)
- if err != nil {
- return
- }
-
- hmacHash, ok := algorithm.HashById[hash[0]]
- if !ok {
- return errors.UnsupportedError("unsupported HMAC hash: " + strconv.Itoa(int(hash[0])))
- }
-
- symmetric := &symmetric.HMACPublicKey{
- Hash: hmacHash,
- BindingHash: bindingHash,
- }
-
- pk.PublicKey = symmetric
- return
-}
-
-func readBindingHash(r io.Reader) (bindingHash [32]byte, err error) {
- _, err = readFull(r, bindingHash[:])
- return
-}
-
-// SerializeForHash serializes the PublicKey to w with the special packet
-// header format needed for hashing.
-func (pk *PublicKey) SerializeForHash(w io.Writer) error {
- if err := pk.SerializeSignaturePrefix(w); err != nil {
- return err
- }
- return pk.serializeWithoutHeaders(w)
-}
-
-// SerializeSignaturePrefix writes the prefix for this public key to the given Writer.
-// The prefix is used when calculating a signature over this public key. See
-// RFC 4880, section 5.2.4.
-func (pk *PublicKey) SerializeSignaturePrefix(w io.Writer) error {
- var pLength = pk.algorithmSpecificByteCount()
- // version, timestamp, algorithm
- pLength += versionSize + timestampSize + algorithmSize
- if pk.Version >= 5 {
- // key octet count (4).
- pLength += 4
- _, err := w.Write([]byte{
- // When a v4 signature is made over a key, the hash data starts with the octet 0x99, followed by a two-octet length
- // of the key, and then the body of the key packet. When a v6 signature is made over a key, the hash data starts
- // with the salt, then octet 0x9B, followed by a four-octet length of the key, and then the body of the key packet.
- 0x95 + byte(pk.Version),
- byte(pLength >> 24),
- byte(pLength >> 16),
- byte(pLength >> 8),
- byte(pLength),
- })
- if err != nil {
- return err
- }
- return nil
- }
- if _, err := w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}); err != nil {
- return err
- }
- return nil
-}
-
-func (pk *PublicKey) Serialize(w io.Writer) (err error) {
- length := uint32(versionSize + timestampSize + algorithmSize) // 6 byte header
- length += pk.algorithmSpecificByteCount()
- if pk.Version >= 5 {
- length += 4 // octet key count
- }
- packetType := packetTypePublicKey
- if pk.IsSubkey {
- packetType = packetTypePublicSubkey
- }
- err = serializeHeader(w, packetType, int(length))
- if err != nil {
- return
- }
- return pk.serializeWithoutHeaders(w)
-}
-
-func (pk *PublicKey) algorithmSpecificByteCount() uint32 {
- length := uint32(0)
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
- length += uint32(pk.n.EncodedLength())
- length += uint32(pk.e.EncodedLength())
- case PubKeyAlgoDSA:
- length += uint32(pk.p.EncodedLength())
- length += uint32(pk.q.EncodedLength())
- length += uint32(pk.g.EncodedLength())
- length += uint32(pk.y.EncodedLength())
- case PubKeyAlgoElGamal:
- length += uint32(pk.p.EncodedLength())
- length += uint32(pk.g.EncodedLength())
- length += uint32(pk.y.EncodedLength())
- case PubKeyAlgoECDSA:
- length += uint32(pk.oid.EncodedLength())
- length += uint32(pk.p.EncodedLength())
- case PubKeyAlgoECDH:
- length += uint32(pk.oid.EncodedLength())
- length += uint32(pk.p.EncodedLength())
- length += uint32(pk.kdf.EncodedLength())
- case PubKeyAlgoEdDSA:
- length += uint32(pk.oid.EncodedLength())
- length += uint32(pk.p.EncodedLength())
- case PubKeyAlgoX25519:
- length += x25519.KeySize
- case PubKeyAlgoX448:
- length += x448.KeySize
- case PubKeyAlgoEd25519:
- length += ed25519.PublicKeySize
- case PubKeyAlgoEd448:
- length += ed448.PublicKeySize
- case ExperimentalPubKeyAlgoAEAD, ExperimentalPubKeyAlgoHMAC:
- length += 1 // Hash octet
- length += 32 // Binding hash
- default:
- panic("unknown public key algorithm")
- }
- return length
-}
-
-// serializeWithoutHeaders marshals the PublicKey to w in the form of an
-// OpenPGP public key packet, not including the packet header.
-func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) {
- t := uint32(pk.CreationTime.Unix())
- if _, err = w.Write([]byte{
- byte(pk.Version),
- byte(t >> 24), byte(t >> 16), byte(t >> 8), byte(t),
- byte(pk.PubKeyAlgo),
- }); err != nil {
- return
- }
-
- if pk.Version >= 5 {
- n := pk.algorithmSpecificByteCount()
- if _, err = w.Write([]byte{
- byte(n >> 24), byte(n >> 16), byte(n >> 8), byte(n),
- }); err != nil {
- return
- }
- }
-
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
- if _, err = w.Write(pk.n.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(pk.e.EncodedBytes())
- return
- case PubKeyAlgoDSA:
- if _, err = w.Write(pk.p.EncodedBytes()); err != nil {
- return
- }
- if _, err = w.Write(pk.q.EncodedBytes()); err != nil {
- return
- }
- if _, err = w.Write(pk.g.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(pk.y.EncodedBytes())
- return
- case PubKeyAlgoElGamal:
- if _, err = w.Write(pk.p.EncodedBytes()); err != nil {
- return
- }
- if _, err = w.Write(pk.g.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(pk.y.EncodedBytes())
- return
- case PubKeyAlgoECDSA:
- if _, err = w.Write(pk.oid.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(pk.p.EncodedBytes())
- return
- case PubKeyAlgoECDH:
- if _, err = w.Write(pk.oid.EncodedBytes()); err != nil {
- return
- }
- if _, err = w.Write(pk.p.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(pk.kdf.EncodedBytes())
- return
- case PubKeyAlgoEdDSA:
- if _, err = w.Write(pk.oid.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(pk.p.EncodedBytes())
- return
- case PubKeyAlgoX25519:
- publicKey := pk.PublicKey.(*x25519.PublicKey)
- _, err = w.Write(publicKey.Point)
- return
- case PubKeyAlgoX448:
- publicKey := pk.PublicKey.(*x448.PublicKey)
- _, err = w.Write(publicKey.Point)
- return
- case PubKeyAlgoEd25519:
- publicKey := pk.PublicKey.(*ed25519.PublicKey)
- _, err = w.Write(publicKey.Point)
- return
- case PubKeyAlgoEd448:
- publicKey := pk.PublicKey.(*ed448.PublicKey)
- _, err = w.Write(publicKey.Point)
- return
- case ExperimentalPubKeyAlgoAEAD:
- symmKey := pk.PublicKey.(*symmetric.AEADPublicKey)
- cipherOctet := [1]byte{symmKey.Cipher.Id()}
- if _, err = w.Write(cipherOctet[:]); err != nil {
- return
- }
- _, err = w.Write(symmKey.BindingHash[:])
- return
- case ExperimentalPubKeyAlgoHMAC:
- symmKey := pk.PublicKey.(*symmetric.HMACPublicKey)
- hashOctet := [1]byte{symmKey.Hash.Id()}
- if _, err = w.Write(hashOctet[:]); err != nil {
- return
- }
- _, err = w.Write(symmKey.BindingHash[:])
- return
- }
- return errors.InvalidArgumentError("bad public-key algorithm")
-}
-
-// CanSign returns true iff this public key can generate signatures
-func (pk *PublicKey) CanSign() bool {
- return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal && pk.PubKeyAlgo != PubKeyAlgoECDH
-}
-
-// VerifySignature returns nil iff sig is a valid signature, made by this
-// public key, of the data hashed into signed. signed is mutated by this call.
-func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) {
- if !pk.CanSign() {
- return errors.InvalidArgumentError("public key cannot generate signatures")
- }
- if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) {
- sig.AddMetadataToHashSuffix()
- }
- signed.Write(sig.HashSuffix)
- hashBytes := signed.Sum(nil)
- // see discussion https://github.com/ProtonMail/go-crypto/issues/107
- if sig.Version >= 5 && (hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1]) {
- return errors.SignatureError("hash tag doesn't match")
- }
-
- if pk.PubKeyAlgo != sig.PubKeyAlgo {
- return errors.InvalidArgumentError("public key and signature use different algorithms")
- }
-
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey)
- err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.Bytes()))
- if err != nil {
- return errors.SignatureError("RSA verification failure")
- }
- return nil
- case PubKeyAlgoDSA:
- dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey)
- // Need to truncate hashBytes to match FIPS 186-3 section 4.6.
- subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8
- if len(hashBytes) > subgroupSize {
- hashBytes = hashBytes[:subgroupSize]
- }
- if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.Bytes()), new(big.Int).SetBytes(sig.DSASigS.Bytes())) {
- return errors.SignatureError("DSA verification failure")
- }
- return nil
- case PubKeyAlgoECDSA:
- ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey)
- if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.Bytes()), new(big.Int).SetBytes(sig.ECDSASigS.Bytes())) {
- return errors.SignatureError("ECDSA verification failure")
- }
- return nil
- case PubKeyAlgoEdDSA:
- eddsaPublicKey := pk.PublicKey.(*eddsa.PublicKey)
- if !eddsa.Verify(eddsaPublicKey, hashBytes, sig.EdDSASigR.Bytes(), sig.EdDSASigS.Bytes()) {
- return errors.SignatureError("EdDSA verification failure")
- }
- return nil
- case PubKeyAlgoEd25519:
- ed25519PublicKey := pk.PublicKey.(*ed25519.PublicKey)
- if !ed25519.Verify(ed25519PublicKey, hashBytes, sig.EdSig) {
- return errors.SignatureError("Ed25519 verification failure")
- }
- return nil
- case PubKeyAlgoEd448:
- ed448PublicKey := pk.PublicKey.(*ed448.PublicKey)
- if !ed448.Verify(ed448PublicKey, hashBytes, sig.EdSig) {
- return errors.SignatureError("ed448 verification failure")
- }
- return nil
- case ExperimentalPubKeyAlgoHMAC:
- HMACKey := pk.PublicKey.(*symmetric.HMACPublicKey)
-
- result, err := HMACKey.Verify(hashBytes, sig.HMAC.Bytes())
- if err != nil {
- return err
- }
- if !result {
- return errors.SignatureError("HMAC verification failure")
- }
- return nil
- default:
- return errors.SignatureError("Unsupported public key algorithm used in signature")
- }
-}
-
-// keySignatureHash returns a Hash of the message that needs to be signed for
-// pk to assert a subkey relationship to signed.
-func keySignatureHash(pk, signed signingKey, hashFunc hash.Hash) (h hash.Hash, err error) {
- h = hashFunc
-
- // RFC 4880, section 5.2.4
- err = pk.SerializeForHash(h)
- if err != nil {
- return nil, err
- }
-
- err = signed.SerializeForHash(h)
- return
-}
-
-// VerifyKeySignature returns nil iff sig is a valid signature, made by this
-// public key, of signed.
-func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error {
- preparedHash, err := sig.PrepareVerify()
- if err != nil {
- return err
- }
- h, err := keySignatureHash(pk, signed, preparedHash)
- if err != nil {
- return err
- }
- if err = pk.VerifySignature(h, sig); err != nil {
- return err
- }
-
- if sig.FlagSign {
- // Signing subkeys must be cross-signed. See
- // https://www.gnupg.org/faq/subkey-cross-certify.html.
- if sig.EmbeddedSignature == nil {
- return errors.StructuralError("signing subkey is missing cross-signature")
- }
- preparedHashEmbedded, err := sig.EmbeddedSignature.PrepareVerify()
- if err != nil {
- return err
- }
- // Verify the cross-signature. This is calculated over the same
- // data as the main signature, so we cannot just recursively
- // call signed.VerifyKeySignature(...)
- if h, err = keySignatureHash(pk, signed, preparedHashEmbedded); err != nil {
- return errors.StructuralError("error while hashing for cross-signature: " + err.Error())
- }
- if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil {
- return errors.StructuralError("error while verifying cross-signature: " + err.Error())
- }
- }
-
- // Keys having this flag MUST have the forwarding KDF parameters version 2 defined in Section 5.1.
- if sig.FlagForward && (signed.PubKeyAlgo != PubKeyAlgoECDH ||
- signed.kdf == nil ||
- signed.kdf.Bytes()[0] != ecdh.KDFVersionForwarding) {
- return errors.StructuralError("forwarding key with wrong ecdh kdf version")
- }
-
- return nil
-}
-
-func keyRevocationHash(pk signingKey, hashFunc hash.Hash) (err error) {
- return pk.SerializeForHash(hashFunc)
-}
-
-// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this
-// public key.
-func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) {
- preparedHash, err := sig.PrepareVerify()
- if err != nil {
- return err
- }
- if keyRevocationHash(pk, preparedHash); err != nil {
- return err
- }
- return pk.VerifySignature(preparedHash, sig)
-}
-
-// VerifySubkeyRevocationSignature returns nil iff sig is a valid subkey revocation signature,
-// made by this public key, of signed.
-func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signed *PublicKey) (err error) {
- preparedHash, err := sig.PrepareVerify()
- if err != nil {
- return err
- }
- h, err := keySignatureHash(pk, signed, preparedHash)
- if err != nil {
- return err
- }
- return pk.VerifySignature(h, sig)
-}
-
-// userIdSignatureHash returns a Hash of the message that needs to be signed
-// to assert that pk is a valid key for id.
-func userIdSignatureHash(id string, pk *PublicKey, h hash.Hash) (err error) {
-
- // RFC 4880, section 5.2.4
- if err := pk.SerializeSignaturePrefix(h); err != nil {
- return err
- }
- if err := pk.serializeWithoutHeaders(h); err != nil {
- return err
- }
-
- var buf [5]byte
- buf[0] = 0xb4
- buf[1] = byte(len(id) >> 24)
- buf[2] = byte(len(id) >> 16)
- buf[3] = byte(len(id) >> 8)
- buf[4] = byte(len(id))
- h.Write(buf[:])
- h.Write([]byte(id))
-
- return nil
-}
-
-// directKeySignatureHash returns a Hash of the message that needs to be signed.
-func directKeySignatureHash(pk *PublicKey, h hash.Hash) (err error) {
- return pk.SerializeForHash(h)
-}
-
-// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this
-// public key, that id is the identity of pub.
-func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) {
- h, err := sig.PrepareVerify()
- if err != nil {
- return err
- }
- if err := userIdSignatureHash(id, pub, h); err != nil {
- return err
- }
- return pk.VerifySignature(h, sig)
-}
-
-// VerifyDirectKeySignature returns nil iff sig is a valid signature, made by this
-// public key.
-func (pk *PublicKey) VerifyDirectKeySignature(sig *Signature) (err error) {
- h, err := sig.PrepareVerify()
- if err != nil {
- return err
- }
- if err := directKeySignatureHash(pk, h); err != nil {
- return err
- }
- return pk.VerifySignature(h, sig)
-}
-
-// KeyIdString returns the public key's fingerprint in capital hex
-// (e.g. "6C7EE1B8621CC013").
-func (pk *PublicKey) KeyIdString() string {
- return fmt.Sprintf("%X", pk.Fingerprint[12:20])
-}
-
-// KeyIdShortString returns the short form of public key's fingerprint
-// in capital hex, as shown by gpg --list-keys (e.g. "621CC013").
-func (pk *PublicKey) KeyIdShortString() string {
- return fmt.Sprintf("%X", pk.Fingerprint[16:20])
-}
-
-// BitLength returns the bit length for the given public key.
-func (pk *PublicKey) BitLength() (bitLength uint16, err error) {
- switch pk.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly:
- bitLength = pk.n.BitLength()
- case PubKeyAlgoDSA:
- bitLength = pk.p.BitLength()
- case PubKeyAlgoElGamal:
- bitLength = pk.p.BitLength()
- case PubKeyAlgoECDSA:
- bitLength = pk.p.BitLength()
- case PubKeyAlgoECDH:
- bitLength = pk.p.BitLength()
- case PubKeyAlgoEdDSA:
- bitLength = pk.p.BitLength()
- case PubKeyAlgoX25519:
- bitLength = x25519.KeySize * 8
- case PubKeyAlgoX448:
- bitLength = x448.KeySize * 8
- case PubKeyAlgoEd25519:
- bitLength = ed25519.PublicKeySize * 8
- case PubKeyAlgoEd448:
- bitLength = ed448.PublicKeySize * 8
- case ExperimentalPubKeyAlgoAEAD:
- bitLength = 32
- default:
- err = errors.InvalidArgumentError("bad public-key algorithm")
- }
- return
-}
-
-// Curve returns the used elliptic curve of this public key.
-// Returns an error if no elliptic curve is used.
-func (pk *PublicKey) Curve() (curve Curve, err error) {
- switch pk.PubKeyAlgo {
- case PubKeyAlgoECDSA, PubKeyAlgoECDH, PubKeyAlgoEdDSA:
- curveInfo := ecc.FindByOid(pk.oid)
- if curveInfo == nil {
- return "", errors.UnsupportedError(fmt.Sprintf("unknown oid: %x", pk.oid))
- }
- curve = Curve(curveInfo.GenName)
- case PubKeyAlgoEd25519, PubKeyAlgoX25519:
- curve = Curve25519
- case PubKeyAlgoEd448, PubKeyAlgoX448:
- curve = Curve448
- default:
- err = errors.InvalidArgumentError("public key does not operate with an elliptic curve")
- }
- return
-}
-
-// KeyExpired returns whether sig is a self-signature of a key that has
-// expired or is created in the future.
-func (pk *PublicKey) KeyExpired(sig *Signature, currentTime time.Time) bool {
- if pk.CreationTime.Unix() > currentTime.Unix() {
- return true
- }
- if sig.KeyLifetimeSecs == nil || *sig.KeyLifetimeSecs == 0 {
- return false
- }
- expiry := pk.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second)
- return currentTime.Unix() > expiry.Unix()
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go
deleted file mode 100644
index b255f1f6..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package packet
-
-const rsaFingerprintHex = "5fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb"
-
-const rsaPkDataHex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001"
-
-const dsaFingerprintHex = "eece4c094db002103714c63c8e8fbe54062f19ed"
-
-const dsaPkDataHex = "9901a2044d432f89110400cd581334f0d7a1e1bdc8b9d6d8c0baf68793632735d2bb0903224cbaa1dfbf35a60ee7a13b92643421e1eb41aa8d79bea19a115a677f6b8ba3c7818ce53a6c2a24a1608bd8b8d6e55c5090cbde09dd26e356267465ae25e69ec8bdd57c7bbb2623e4d73336f73a0a9098f7f16da2e25252130fd694c0e8070c55a812a423ae7f00a0ebf50e70c2f19c3520a551bd4b08d30f23530d3d03ff7d0bf4a53a64a09dc5e6e6e35854b7d70c882b0c60293401958b1bd9e40abec3ea05ba87cf64899299d4bd6aa7f459c201d3fbbd6c82004bdc5e8a9eb8082d12054cc90fa9d4ec251a843236a588bf49552441817436c4f43326966fe85447d4e6d0acf8fa1ef0f014730770603ad7634c3088dc52501c237328417c31c89ed70400b2f1a98b0bf42f11fefc430704bebbaa41d9f355600c3facee1e490f64208e0e094ea55e3a598a219a58500bf78ac677b670a14f4e47e9cf8eab4f368cc1ddcaa18cc59309d4cc62dd4f680e73e6cc3e1ce87a84d0925efbcb26c575c093fc42eecf45135fabf6403a25c2016e1774c0484e440a18319072c617cc97ac0a3bb0"
-
-const ecdsaFingerprintHex = "9892270b38b8980b05c8d56d43fe956c542ca00b"
-
-const ecdsaPkDataHex = "9893045071c29413052b8104002304230401f4867769cedfa52c325018896245443968e52e51d0c2df8d939949cb5b330f2921711fbee1c9b9dddb95d15cb0255e99badeddda7cc23d9ddcaacbc290969b9f24019375d61c2e4e3b36953a28d8b2bc95f78c3f1d592fb24499be348656a7b17e3963187b4361afe497bc5f9f81213f04069f8e1fb9e6a6290ae295ca1a92b894396cb4"
-
-const ecdhFingerprintHex = "722354df2475a42164d1d49faa8b938f9a201946"
-
-const ecdhPkDataHex = "b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec91803010909"
-
-const eddsaFingerprintHex = "b2d5e5ec0e6deca6bc8eeeb00907e75e1dd99ad8"
-
-const eddsaPkDataHex = "98330456e2132b16092b06010401da470f01010740bbda39266affa511a8c2d02edf690fb784b0499c4406185811a163539ef11dc1b41d74657374696e67203c74657374696e674074657374696e672e636f6d3e8879041316080021050256e2132b021b03050b09080702061508090a0b020416020301021e01021780000a09100907e75e1dd99ad86d0c00fe39d2008359352782bc9b61ac382584cd8eff3f57a18c2287e3afeeb05d1f04ba00fe2d0bc1ddf3ff8adb9afa3e7d9287244b4ec567f3db4d60b74a9b5465ed528203"
-
-// Source: https://sites.google.com/site/brainhub/pgpecckeys#TOC-ECC-NIST-P-384-key
-const ecc384PubHex = `99006f044d53059213052b81040022030304f6b8c5aced5b84ef9f4a209db2e4a9dfb70d28cb8c10ecd57674a9fa5a67389942b62d5e51367df4c7bfd3f8e500feecf07ed265a621a8ebbbe53e947ec78c677eba143bd1533c2b350e1c29f82313e1e1108eba063be1e64b10e6950e799c2db42465635f6473615f64685f333834203c6f70656e70677040627261696e6875622e6f72673e8900cb04101309005305024d530592301480000000002000077072656665727265642d656d61696c2d656e636f64696e67407067702e636f6d7067706d696d65040b090807021901051b03000000021602051e010000000415090a08000a0910098033880f54719fca2b0180aa37350968bd5f115afd8ce7bc7b103822152dbff06d0afcda835329510905b98cb469ba208faab87c7412b799e7b633017f58364ea480e8a1a3f253a0c5f22c446e8be9a9fce6210136ee30811abbd49139de28b5bdf8dc36d06ae748579e9ff503b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec9180301090989008404181309000c05024d530592051b0c000000000a0910098033880f54719f80970180eee7a6d8fcee41ee4f9289df17f9bcf9d955dca25c583b94336f3a2b2d4986dc5cf417b8d2dc86f741a9e1a6d236c0e3017d1c76575458a0cfb93ae8a2b274fcc65ceecd7a91eec83656ba13219969f06945b48c56bd04152c3a0553c5f2f4bd1267`
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go
deleted file mode 100644
index dd840923..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go
+++ /dev/null
@@ -1,209 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-type PacketReader interface {
- Next() (p Packet, err error)
- Push(reader io.Reader) (err error)
- Unread(p Packet)
-}
-
-// Reader reads packets from an io.Reader and allows packets to be 'unread' so
-// that they result from the next call to Next.
-type Reader struct {
- q []Packet
- readers []io.Reader
-}
-
-// New io.Readers are pushed when a compressed or encrypted packet is processed
-// and recursively treated as a new source of packets. However, a carefully
-// crafted packet can trigger an infinite recursive sequence of packets. See
-// http://mumble.net/~campbell/misc/pgp-quine
-// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402
-// This constant limits the number of recursive packets that may be pushed.
-const maxReaders = 32
-
-// Next returns the most recently unread Packet, or reads another packet from
-// the top-most io.Reader. Unknown/unsupported/Marker packet types are skipped.
-func (r *Reader) Next() (p Packet, err error) {
- for {
- p, err := r.read()
- if err == io.EOF {
- break
- } else if err != nil {
- if _, ok := err.(errors.UnknownPacketTypeError); ok {
- continue
- }
- if _, ok := err.(errors.UnsupportedError); ok {
- switch p.(type) {
- case *SymmetricallyEncrypted, *AEADEncrypted, *Compressed, *LiteralData:
- return nil, err
- }
- continue
- }
- return nil, err
- } else {
- //A marker packet MUST be ignored when received
- switch p.(type) {
- case *Marker:
- continue
- }
- return p, nil
- }
- }
- return nil, io.EOF
-}
-
-// Next returns the most recently unread Packet, or reads another packet from
-// the top-most io.Reader. Unknown/Marker packet types are skipped while unsupported
-// packets are returned as UnsupportedPacket type.
-func (r *Reader) NextWithUnsupported() (p Packet, err error) {
- for {
- p, err = r.read()
- if err == io.EOF {
- break
- } else if err != nil {
- if _, ok := err.(errors.UnknownPacketTypeError); ok {
- continue
- }
- if casteErr, ok := err.(errors.UnsupportedError); ok {
- return &UnsupportedPacket{
- IncompletePacket: p,
- Error: casteErr,
- }, nil
- }
- return
- } else {
- //A marker packet MUST be ignored when received
- switch p.(type) {
- case *Marker:
- continue
- }
- return
- }
- }
- return nil, io.EOF
-}
-
-func (r *Reader) read() (p Packet, err error) {
- if len(r.q) > 0 {
- p = r.q[len(r.q)-1]
- r.q = r.q[:len(r.q)-1]
- return
- }
- for len(r.readers) > 0 {
- p, err = Read(r.readers[len(r.readers)-1])
- if err == io.EOF {
- r.readers = r.readers[:len(r.readers)-1]
- continue
- }
- return p, err
- }
- return nil, io.EOF
-}
-
-// Push causes the Reader to start reading from a new io.Reader. When an EOF
-// error is seen from the new io.Reader, it is popped and the Reader continues
-// to read from the next most recent io.Reader. Push returns a StructuralError
-// if pushing the reader would exceed the maximum recursion level, otherwise it
-// returns nil.
-func (r *Reader) Push(reader io.Reader) (err error) {
- if len(r.readers) >= maxReaders {
- return errors.StructuralError("too many layers of packets")
- }
- r.readers = append(r.readers, reader)
- return nil
-}
-
-// Unread causes the given Packet to be returned from the next call to Next.
-func (r *Reader) Unread(p Packet) {
- r.q = append(r.q, p)
-}
-
-func NewReader(r io.Reader) *Reader {
- return &Reader{
- q: nil,
- readers: []io.Reader{r},
- }
-}
-
-// CheckReader is similar to Reader but additionally
-// uses the pushdown automata to verify the read packet sequence.
-type CheckReader struct {
- Reader
- verifier *SequenceVerifier
- fullyRead bool
-}
-
-// Next returns the most recently unread Packet, or reads another packet from
-// the top-most io.Reader. Unknown packet types are skipped.
-// If the read packet sequence does not conform to the packet composition
-// rules in rfc4880, it returns an error.
-func (r *CheckReader) Next() (p Packet, err error) {
- if r.fullyRead {
- return nil, io.EOF
- }
- if len(r.q) > 0 {
- p = r.q[len(r.q)-1]
- r.q = r.q[:len(r.q)-1]
- return
- }
- var errMsg error
- for len(r.readers) > 0 {
- p, errMsg, err = ReadWithCheck(r.readers[len(r.readers)-1], r.verifier)
- if errMsg != nil {
- err = errMsg
- return
- }
- if err == nil {
- return
- }
- if err == io.EOF {
- r.readers = r.readers[:len(r.readers)-1]
- continue
- }
- //A marker packet MUST be ignored when received
- switch p.(type) {
- case *Marker:
- continue
- }
- if _, ok := err.(errors.UnknownPacketTypeError); ok {
- continue
- }
- if _, ok := err.(errors.UnsupportedError); ok {
- switch p.(type) {
- case *SymmetricallyEncrypted, *AEADEncrypted, *Compressed, *LiteralData:
- return nil, err
- }
- continue
- }
- return nil, err
- }
- if errMsg = r.verifier.Next(EOSSymbol); errMsg != nil {
- return nil, errMsg
- }
- if errMsg = r.verifier.AssertValid(); errMsg != nil {
- return nil, errMsg
- }
- r.fullyRead = true
- return nil, io.EOF
-}
-
-func NewCheckReader(r io.Reader) *CheckReader {
- return &CheckReader{
- Reader: Reader{
- q: nil,
- readers: []io.Reader{r},
- },
- verifier: NewSequenceVerifier(),
- fullyRead: false,
- }
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/recipient.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/recipient.go
deleted file mode 100644
index fb2e362e..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/recipient.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package packet
-
-// Recipient type represents a Intended Recipient Fingerprint subpacket
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#name-intended-recipient-fingerpr
-type Recipient struct {
- KeyVersion int
- Fingerprint []byte
-}
-
-func (r *Recipient) Serialize() []byte {
- packet := make([]byte, len(r.Fingerprint)+1)
- packet[0] = byte(r.KeyVersion)
- copy(packet[1:], r.Fingerprint)
- return packet
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go
deleted file mode 100644
index 5b7d8ec9..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go
+++ /dev/null
@@ -1,1424 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "crypto"
- "crypto/dsa"
- "encoding/binary"
- "hash"
- "io"
- "strconv"
- "time"
-
- "github.com/ProtonMail/go-crypto/openpgp/ecdsa"
- "github.com/ProtonMail/go-crypto/openpgp/ed25519"
- "github.com/ProtonMail/go-crypto/openpgp/ed448"
- "github.com/ProtonMail/go-crypto/openpgp/eddsa"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
- "github.com/ProtonMail/go-crypto/openpgp/internal/encoding"
-)
-
-const (
- // See RFC 4880, section 5.2.3.21 for details.
- KeyFlagCertify = 1 << iota
- KeyFlagSign
- KeyFlagEncryptCommunications
- KeyFlagEncryptStorage
- KeyFlagSplitKey
- KeyFlagAuthenticate
- KeyFlagForward
- KeyFlagGroupKey
-)
-
-// Signature represents a signature. See RFC 4880, section 5.2.
-type Signature struct {
- Version int
- SigType SignatureType
- PubKeyAlgo PublicKeyAlgorithm
- Hash crypto.Hash
- // salt contains a random salt value for v6 signatures
- // See RFC the crypto refresh Section 5.2.3.
- salt []byte
-
- // HashSuffix is extra data that is hashed in after the signed data.
- HashSuffix []byte
- // HashTag contains the first two bytes of the hash for fast rejection
- // of bad signed data.
- HashTag [2]byte
-
- // Metadata includes format, filename and time, and is protected by v5
- // signatures of type 0x00 or 0x01. This metadata is included into the hash
- // computation; if nil, six 0x00 bytes are used instead. See section 5.2.4.
- Metadata *LiteralData
-
- CreationTime time.Time
-
- RSASignature encoding.Field
- DSASigR, DSASigS encoding.Field
- ECDSASigR, ECDSASigS encoding.Field
- EdDSASigR, EdDSASigS encoding.Field
- EdSig []byte
- HMAC encoding.Field
-
- // rawSubpackets contains the unparsed subpackets, in order.
- rawSubpackets []outputSubpacket
-
- // The following are optional so are nil when not included in the
- // signature.
-
- SigLifetimeSecs, KeyLifetimeSecs *uint32
- PreferredSymmetric, PreferredHash, PreferredCompression []uint8
- PreferredCipherSuites [][2]uint8
- IssuerKeyId *uint64
- IssuerFingerprint []byte
- SignerUserId *string
- IsPrimaryId *bool
- Notations []*Notation
- IntendedRecipients []*Recipient
-
- // TrustLevel and TrustAmount can be set by the signer to assert that
- // the key is not only valid but also trustworthy at the specified
- // level.
- // See RFC 4880, section 5.2.3.13 for details.
- TrustLevel TrustLevel
- TrustAmount TrustAmount
-
- // TrustRegularExpression can be used in conjunction with trust Signature
- // packets to limit the scope of the trust that is extended.
- // See RFC 4880, section 5.2.3.14 for details.
- TrustRegularExpression *string
-
- // PolicyURI can be set to the URI of a document that describes the
- // policy under which the signature was issued. See RFC 4880, section
- // 5.2.3.20 for details.
- PolicyURI string
-
- // FlagsValid is set if any flags were given. See RFC 4880, section
- // 5.2.3.21 for details.
- FlagsValid bool
- FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool
- FlagSplitKey, FlagAuthenticate, FlagForward, FlagGroupKey bool
-
- // RevocationReason is set if this signature has been revoked.
- // See RFC 4880, section 5.2.3.23 for details.
- RevocationReason *ReasonForRevocation
- RevocationReasonText string
-
- // In a self-signature, these flags are set there is a features subpacket
- // indicating that the issuer implementation supports these features
- // see https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#features-subpacket
- SEIPDv1, SEIPDv2 bool
-
- // EmbeddedSignature, if non-nil, is a signature of the parent key, by
- // this key. This prevents an attacker from claiming another's signing
- // subkey as their own.
- EmbeddedSignature *Signature
-
- outSubpackets []outputSubpacket
-}
-
-// VerifiableSignature internally keeps state if the
-// the signature has been verified before.
-type VerifiableSignature struct {
- Valid *bool // nil if it has not been verified yet
- Packet *Signature
-}
-
-// NewVerifiableSig returns a struct of type VerifiableSignature referencing the input signature.
-func NewVerifiableSig(signature *Signature) *VerifiableSignature {
- return &VerifiableSignature{
- Packet: signature,
- }
-}
-
-// Salt returns the signature salt for v6 signatures.
-func (sig *Signature) Salt() []byte {
- if sig == nil {
- return nil
- }
- return sig.salt
-}
-
-func (sig *Signature) parse(r io.Reader) (err error) {
- // RFC 4880, section 5.2.3
- var buf [7]byte
- _, err = readFull(r, buf[:1])
- if err != nil {
- return
- }
- if buf[0] != 4 && buf[0] != 5 && buf[0] != 6 {
- err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0])))
- return
- }
- sig.Version = int(buf[0])
- if sig.Version == 6 {
- _, err = readFull(r, buf[:7])
- } else {
- _, err = readFull(r, buf[:5])
- }
- if err != nil {
- return
- }
- sig.SigType = SignatureType(buf[0])
- sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1])
- switch sig.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA, PubKeyAlgoEd25519, PubKeyAlgoEd448, ExperimentalPubKeyAlgoHMAC:
- default:
- err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo)))
- return
- }
-
- var ok bool
-
- if sig.Version < 5 {
- sig.Hash, ok = algorithm.HashIdToHashWithSha1(buf[2])
- } else {
- sig.Hash, ok = algorithm.HashIdToHash(buf[2])
- }
-
- if !ok {
- return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2])))
- }
-
- var hashedSubpacketsLength int
- if sig.Version == 6 {
- // For a v6 signature, a four-octet length is used.
- hashedSubpacketsLength =
- int(buf[3])<<24 |
- int(buf[4])<<16 |
- int(buf[5])<<8 |
- int(buf[6])
- } else {
- hashedSubpacketsLength = int(buf[3])<<8 | int(buf[4])
- }
- hashedSubpackets := make([]byte, hashedSubpacketsLength)
- _, err = readFull(r, hashedSubpackets)
- if err != nil {
- return
- }
- err = sig.buildHashSuffix(hashedSubpackets)
- if err != nil {
- return
- }
-
- err = parseSignatureSubpackets(sig, hashedSubpackets, true)
- if err != nil {
- return
- }
-
- if sig.Version == 6 {
- _, err = readFull(r, buf[:4])
- } else {
- _, err = readFull(r, buf[:2])
- }
-
- if err != nil {
- return
- }
- var unhashedSubpacketsLength uint32
- if sig.Version == 6 {
- unhashedSubpacketsLength = uint32(buf[0])<<24 | uint32(buf[1])<<16 | uint32(buf[2])<<8 | uint32(buf[3])
- } else {
- unhashedSubpacketsLength = uint32(buf[0])<<8 | uint32(buf[1])
- }
- unhashedSubpackets := make([]byte, unhashedSubpacketsLength)
- _, err = readFull(r, unhashedSubpackets)
- if err != nil {
- return
- }
- err = parseSignatureSubpackets(sig, unhashedSubpackets, false)
- if err != nil {
- return
- }
-
- _, err = readFull(r, sig.HashTag[:2])
- if err != nil {
- return
- }
-
- if sig.Version == 6 {
- // Only for v6 signatures, a variable-length field containing the salt
- _, err = readFull(r, buf[:1])
- if err != nil {
- return
- }
- saltLength := int(buf[0])
- var expectedSaltLength int
- expectedSaltLength, err = SaltLengthForHash(sig.Hash)
- if err != nil {
- return
- }
- if saltLength != expectedSaltLength {
- err = errors.StructuralError("unexpected salt size for the given hash algorithm")
- return
- }
- salt := make([]byte, expectedSaltLength)
- _, err = readFull(r, salt)
- if err != nil {
- return
- }
- sig.salt = salt
- }
-
- switch sig.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- sig.RSASignature = new(encoding.MPI)
- _, err = sig.RSASignature.ReadFrom(r)
- case PubKeyAlgoDSA:
- sig.DSASigR = new(encoding.MPI)
- if _, err = sig.DSASigR.ReadFrom(r); err != nil {
- return
- }
-
- sig.DSASigS = new(encoding.MPI)
- _, err = sig.DSASigS.ReadFrom(r)
- case PubKeyAlgoECDSA:
- sig.ECDSASigR = new(encoding.MPI)
- if _, err = sig.ECDSASigR.ReadFrom(r); err != nil {
- return
- }
-
- sig.ECDSASigS = new(encoding.MPI)
- _, err = sig.ECDSASigS.ReadFrom(r)
- case PubKeyAlgoEdDSA:
- sig.EdDSASigR = new(encoding.MPI)
- if _, err = sig.EdDSASigR.ReadFrom(r); err != nil {
- return
- }
-
- sig.EdDSASigS = new(encoding.MPI)
- if _, err = sig.EdDSASigS.ReadFrom(r); err != nil {
- return
- }
- case PubKeyAlgoEd25519:
- sig.EdSig, err = ed25519.ReadSignature(r)
- if err != nil {
- return
- }
- case PubKeyAlgoEd448:
- sig.EdSig, err = ed448.ReadSignature(r)
- if err != nil {
- return
- }
- case ExperimentalPubKeyAlgoHMAC:
- sig.HMAC = new(encoding.ShortByteString)
- if _, err = sig.HMAC.ReadFrom(r); err != nil {
- return
- }
- default:
- panic("unreachable")
- }
- return
-}
-
-// parseSignatureSubpackets parses subpackets of the main signature packet. See
-// RFC 4880, section 5.2.3.1.
-func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) {
- for len(subpackets) > 0 {
- subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed)
- if err != nil {
- return
- }
- }
-
- if sig.CreationTime.IsZero() {
- err = errors.StructuralError("no creation time in signature")
- }
-
- return
-}
-
-type signatureSubpacketType uint8
-
-const (
- creationTimeSubpacket signatureSubpacketType = 2
- signatureExpirationSubpacket signatureSubpacketType = 3
- trustSubpacket signatureSubpacketType = 5
- regularExpressionSubpacket signatureSubpacketType = 6
- keyExpirationSubpacket signatureSubpacketType = 9
- prefSymmetricAlgosSubpacket signatureSubpacketType = 11
- issuerSubpacket signatureSubpacketType = 16
- notationDataSubpacket signatureSubpacketType = 20
- prefHashAlgosSubpacket signatureSubpacketType = 21
- prefCompressionSubpacket signatureSubpacketType = 22
- primaryUserIdSubpacket signatureSubpacketType = 25
- policyUriSubpacket signatureSubpacketType = 26
- keyFlagsSubpacket signatureSubpacketType = 27
- signerUserIdSubpacket signatureSubpacketType = 28
- reasonForRevocationSubpacket signatureSubpacketType = 29
- featuresSubpacket signatureSubpacketType = 30
- embeddedSignatureSubpacket signatureSubpacketType = 32
- issuerFingerprintSubpacket signatureSubpacketType = 33
- intendedRecipientSubpacket signatureSubpacketType = 35
- prefCipherSuitesSubpacket signatureSubpacketType = 39
-)
-
-// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1.
-func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) {
- // RFC 4880, section 5.2.3.1
- var (
- length uint32
- packetType signatureSubpacketType
- isCritical bool
- )
- if len(subpacket) == 0 {
- err = errors.StructuralError("zero length signature subpacket")
- return
- }
- switch {
- case subpacket[0] < 192:
- length = uint32(subpacket[0])
- subpacket = subpacket[1:]
- case subpacket[0] < 255:
- if len(subpacket) < 2 {
- goto Truncated
- }
- length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192
- subpacket = subpacket[2:]
- default:
- if len(subpacket) < 5 {
- goto Truncated
- }
- length = uint32(subpacket[1])<<24 |
- uint32(subpacket[2])<<16 |
- uint32(subpacket[3])<<8 |
- uint32(subpacket[4])
- subpacket = subpacket[5:]
- }
- if length > uint32(len(subpacket)) {
- goto Truncated
- }
- rest = subpacket[length:]
- subpacket = subpacket[:length]
- if len(subpacket) == 0 {
- err = errors.StructuralError("zero length signature subpacket")
- return
- }
- packetType = signatureSubpacketType(subpacket[0] & 0x7f)
- isCritical = subpacket[0]&0x80 == 0x80
- subpacket = subpacket[1:]
- sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket})
- if !isHashed &&
- packetType != issuerSubpacket &&
- packetType != issuerFingerprintSubpacket &&
- packetType != embeddedSignatureSubpacket {
- return
- }
- switch packetType {
- case creationTimeSubpacket:
- if len(subpacket) != 4 {
- err = errors.StructuralError("signature creation time not four bytes")
- return
- }
- t := binary.BigEndian.Uint32(subpacket)
- sig.CreationTime = time.Unix(int64(t), 0)
- case signatureExpirationSubpacket:
- // Signature expiration time, section 5.2.3.10
- if len(subpacket) != 4 {
- err = errors.StructuralError("expiration subpacket with bad length")
- return
- }
- sig.SigLifetimeSecs = new(uint32)
- *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket)
- case trustSubpacket:
- if len(subpacket) != 2 {
- err = errors.StructuralError("trust subpacket with bad length")
- return
- }
- // Trust level and amount, section 5.2.3.13
- sig.TrustLevel = TrustLevel(subpacket[0])
- sig.TrustAmount = TrustAmount(subpacket[1])
- case regularExpressionSubpacket:
- if len(subpacket) == 0 {
- err = errors.StructuralError("regexp subpacket with bad length")
- return
- }
- // Trust regular expression, section 5.2.3.14
- // RFC specifies the string should be null-terminated; remove a null byte from the end
- if subpacket[len(subpacket)-1] != 0x00 {
- err = errors.StructuralError("expected regular expression to be null-terminated")
- return
- }
- trustRegularExpression := string(subpacket[:len(subpacket)-1])
- sig.TrustRegularExpression = &trustRegularExpression
- case keyExpirationSubpacket:
- // Key expiration time, section 5.2.3.6
- if len(subpacket) != 4 {
- err = errors.StructuralError("key expiration subpacket with bad length")
- return
- }
- sig.KeyLifetimeSecs = new(uint32)
- *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket)
- case prefSymmetricAlgosSubpacket:
- // Preferred symmetric algorithms, section 5.2.3.7
- sig.PreferredSymmetric = make([]byte, len(subpacket))
- copy(sig.PreferredSymmetric, subpacket)
- case issuerSubpacket:
- // Issuer, section 5.2.3.5
- if sig.Version > 4 && isHashed {
- err = errors.StructuralError("issuer subpacket found in v6 key")
- return
- }
- if len(subpacket) != 8 {
- err = errors.StructuralError("issuer subpacket with bad length")
- return
- }
- if sig.Version <= 4 {
- sig.IssuerKeyId = new(uint64)
- *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket)
- }
- case notationDataSubpacket:
- // Notation data, section 5.2.3.16
- if len(subpacket) < 8 {
- err = errors.StructuralError("notation data subpacket with bad length")
- return
- }
-
- nameLength := uint32(subpacket[4])<<8 | uint32(subpacket[5])
- valueLength := uint32(subpacket[6])<<8 | uint32(subpacket[7])
- if len(subpacket) != int(nameLength)+int(valueLength)+8 {
- err = errors.StructuralError("notation data subpacket with bad length")
- return
- }
-
- notation := Notation{
- IsHumanReadable: (subpacket[0] & 0x80) == 0x80,
- Name: string(subpacket[8:(nameLength + 8)]),
- Value: subpacket[(nameLength + 8):(valueLength + nameLength + 8)],
- IsCritical: isCritical,
- }
-
- sig.Notations = append(sig.Notations, ¬ation)
- case prefHashAlgosSubpacket:
- // Preferred hash algorithms, section 5.2.3.8
- sig.PreferredHash = make([]byte, len(subpacket))
- copy(sig.PreferredHash, subpacket)
- case prefCompressionSubpacket:
- // Preferred compression algorithms, section 5.2.3.9
- sig.PreferredCompression = make([]byte, len(subpacket))
- copy(sig.PreferredCompression, subpacket)
- case primaryUserIdSubpacket:
- // Primary User ID, section 5.2.3.19
- if len(subpacket) != 1 {
- err = errors.StructuralError("primary user id subpacket with bad length")
- return
- }
- sig.IsPrimaryId = new(bool)
- if subpacket[0] > 0 {
- *sig.IsPrimaryId = true
- }
- case keyFlagsSubpacket:
- // Key flags, section 5.2.3.21
- if len(subpacket) == 0 {
- err = errors.StructuralError("empty key flags subpacket")
- return
- }
- sig.FlagsValid = true
- if subpacket[0]&KeyFlagCertify != 0 {
- sig.FlagCertify = true
- }
- if subpacket[0]&KeyFlagSign != 0 {
- sig.FlagSign = true
- }
- if subpacket[0]&KeyFlagEncryptCommunications != 0 {
- sig.FlagEncryptCommunications = true
- }
- if subpacket[0]&KeyFlagEncryptStorage != 0 {
- sig.FlagEncryptStorage = true
- }
- if subpacket[0]&KeyFlagSplitKey != 0 {
- sig.FlagSplitKey = true
- }
- if subpacket[0]&KeyFlagAuthenticate != 0 {
- sig.FlagAuthenticate = true
- }
- if subpacket[0]&KeyFlagForward != 0 {
- sig.FlagForward = true
- }
- if subpacket[0]&KeyFlagGroupKey != 0 {
- sig.FlagGroupKey = true
- }
- case signerUserIdSubpacket:
- userId := string(subpacket)
- sig.SignerUserId = &userId
- case reasonForRevocationSubpacket:
- // Reason For Revocation, section 5.2.3.23
- if len(subpacket) == 0 {
- err = errors.StructuralError("empty revocation reason subpacket")
- return
- }
- sig.RevocationReason = new(ReasonForRevocation)
- *sig.RevocationReason = NewReasonForRevocation(subpacket[0])
- sig.RevocationReasonText = string(subpacket[1:])
- case featuresSubpacket:
- // Features subpacket, section 5.2.3.24 specifies a very general
- // mechanism for OpenPGP implementations to signal support for new
- // features.
- if len(subpacket) > 0 {
- if subpacket[0]&0x01 != 0 {
- sig.SEIPDv1 = true
- }
- // 0x02 and 0x04 are reserved
- if subpacket[0]&0x08 != 0 {
- sig.SEIPDv2 = true
- }
- }
- case embeddedSignatureSubpacket:
- // Only usage is in signatures that cross-certify
- // signing subkeys. section 5.2.3.26 describes the
- // format, with its usage described in section 11.1
- if sig.EmbeddedSignature != nil {
- err = errors.StructuralError("Cannot have multiple embedded signatures")
- return
- }
- sig.EmbeddedSignature = new(Signature)
- // Embedded signatures are required to be v4 signatures see
- // section 12.1. However, we only parse v4 signatures in this
- // file anyway.
- if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil {
- return nil, err
- }
- if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding {
- return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType)))
- }
- case policyUriSubpacket:
- // Policy URI, section 5.2.3.20
- sig.PolicyURI = string(subpacket)
- case issuerFingerprintSubpacket:
- if len(subpacket) == 0 {
- err = errors.StructuralError("empty issuer fingerprint subpacket")
- return
- }
- v, l := subpacket[0], len(subpacket[1:])
- if v >= 5 && l != 32 || v < 5 && l != 20 {
- return nil, errors.StructuralError("bad fingerprint length")
- }
- sig.IssuerFingerprint = make([]byte, l)
- copy(sig.IssuerFingerprint, subpacket[1:])
- sig.IssuerKeyId = new(uint64)
- if v >= 5 {
- *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[1:9])
- } else {
- *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[13:21])
- }
- case intendedRecipientSubpacket:
- // Intended Recipient Fingerprint
- // https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#name-intended-recipient-fingerpr
- if len(subpacket) < 1 {
- return nil, errors.StructuralError("invalid intended recipient fingerpring length")
- }
- version, length := subpacket[0], len(subpacket[1:])
- if version >= 5 && length != 32 || version < 5 && length != 20 {
- return nil, errors.StructuralError("invalid fingerprint length")
- }
- fingerprint := make([]byte, length)
- copy(fingerprint, subpacket[1:])
- sig.IntendedRecipients = append(sig.IntendedRecipients, &Recipient{int(version), fingerprint})
- case prefCipherSuitesSubpacket:
- // Preferred AEAD cipher suites
- // See https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#name-preferred-aead-ciphersuites
- if len(subpacket)%2 != 0 {
- err = errors.StructuralError("invalid aead cipher suite length")
- return
- }
-
- sig.PreferredCipherSuites = make([][2]byte, len(subpacket)/2)
-
- for i := 0; i < len(subpacket)/2; i++ {
- sig.PreferredCipherSuites[i] = [2]uint8{subpacket[2*i], subpacket[2*i+1]}
- }
- default:
- if isCritical {
- err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType)))
- return
- }
- }
- return
-
-Truncated:
- err = errors.StructuralError("signature subpacket truncated")
- return
-}
-
-// subpacketLengthLength returns the length, in bytes, of an encoded length value.
-func subpacketLengthLength(length int) int {
- if length < 192 {
- return 1
- }
- if length < 16320 {
- return 2
- }
- return 5
-}
-
-func (sig *Signature) CheckKeyIdOrFingerprint(pk *PublicKey) bool {
- if sig.IssuerFingerprint != nil && len(sig.IssuerFingerprint) >= 20 {
- return bytes.Equal(sig.IssuerFingerprint, pk.Fingerprint)
- }
- return sig.IssuerKeyId != nil && *sig.IssuerKeyId == pk.KeyId
-}
-
-func (sig *Signature) CheckKeyIdOrFingerprintExplicit(fingerprint []byte, keyId uint64) bool {
- if sig.IssuerFingerprint != nil && len(sig.IssuerFingerprint) >= 20 && fingerprint != nil {
- return bytes.Equal(sig.IssuerFingerprint, fingerprint)
- }
- return sig.IssuerKeyId != nil && *sig.IssuerKeyId == keyId
-}
-
-// serializeSubpacketLength marshals the given length into to.
-func serializeSubpacketLength(to []byte, length int) int {
- // RFC 4880, Section 4.2.2.
- if length < 192 {
- to[0] = byte(length)
- return 1
- }
- if length < 16320 {
- length -= 192
- to[0] = byte((length >> 8) + 192)
- to[1] = byte(length)
- return 2
- }
- to[0] = 255
- to[1] = byte(length >> 24)
- to[2] = byte(length >> 16)
- to[3] = byte(length >> 8)
- to[4] = byte(length)
- return 5
-}
-
-// subpacketsLength returns the serialized length, in bytes, of the given
-// subpackets.
-func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) {
- for _, subpacket := range subpackets {
- if subpacket.hashed == hashed {
- length += subpacketLengthLength(len(subpacket.contents) + 1)
- length += 1 // type byte
- length += len(subpacket.contents)
- }
- }
- return
-}
-
-// serializeSubpackets marshals the given subpackets into to.
-func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) {
- for _, subpacket := range subpackets {
- if subpacket.hashed == hashed {
- n := serializeSubpacketLength(to, len(subpacket.contents)+1)
- to[n] = byte(subpacket.subpacketType)
- if subpacket.isCritical {
- to[n] |= 0x80
- }
- to = to[1+n:]
- n = copy(to, subpacket.contents)
- to = to[n:]
- }
- }
-}
-
-// SigExpired returns whether sig is a signature that has expired or is created
-// in the future.
-func (sig *Signature) SigExpired(currentTime time.Time) bool {
- if sig.CreationTime.Unix() > currentTime.Unix() {
- return true
- }
- if sig.SigLifetimeSecs == nil || *sig.SigLifetimeSecs == 0 {
- return false
- }
- expiry := sig.CreationTime.Add(time.Duration(*sig.SigLifetimeSecs) * time.Second)
- return currentTime.Unix() > expiry.Unix()
-}
-
-// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing.
-func (sig *Signature) buildHashSuffix(hashedSubpackets []byte) (err error) {
- var hashId byte
- var ok bool
-
- if sig.Version < 5 {
- hashId, ok = algorithm.HashToHashIdWithSha1(sig.Hash)
- } else {
- hashId, ok = algorithm.HashToHashId(sig.Hash)
- }
-
- if !ok {
- sig.HashSuffix = nil
- return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash)))
- }
-
- hashedFields := bytes.NewBuffer([]byte{
- uint8(sig.Version),
- uint8(sig.SigType),
- uint8(sig.PubKeyAlgo),
- uint8(hashId),
- })
- hashedSubpacketsLength := len(hashedSubpackets)
- if sig.Version == 6 {
- // v6 signatures store the length in 4 octets
- hashedFields.Write([]byte{
- uint8(hashedSubpacketsLength >> 24),
- uint8(hashedSubpacketsLength >> 16),
- uint8(hashedSubpacketsLength >> 8),
- uint8(hashedSubpacketsLength),
- })
- } else {
- hashedFields.Write([]byte{
- uint8(hashedSubpacketsLength >> 8),
- uint8(hashedSubpacketsLength),
- })
- }
- lenPrefix := hashedFields.Len()
- hashedFields.Write(hashedSubpackets)
-
- var l uint64 = uint64(lenPrefix + len(hashedSubpackets))
- if sig.Version == 5 {
- // v5 case
- hashedFields.Write([]byte{0x05, 0xff})
- hashedFields.Write([]byte{
- uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32),
- uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l),
- })
- } else {
- // v4 and v6 case
- hashedFields.Write([]byte{byte(sig.Version), 0xff})
- hashedFields.Write([]byte{
- uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l),
- })
- }
- sig.HashSuffix = make([]byte, hashedFields.Len())
- copy(sig.HashSuffix, hashedFields.Bytes())
- return
-}
-
-func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) {
- hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true)
- hashedSubpackets := make([]byte, hashedSubpacketsLen)
- serializeSubpackets(hashedSubpackets, sig.outSubpackets, true)
- err = sig.buildHashSuffix(hashedSubpackets)
- if err != nil {
- return
- }
- if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) {
- sig.AddMetadataToHashSuffix()
- }
-
- h.Write(sig.HashSuffix)
- digest = h.Sum(nil)
- copy(sig.HashTag[:], digest)
- return
-}
-
-// PrepareSign must be called to create a hash object before Sign for v6 signatures.
-// The created hash object initially hashes a randomly generated salt
-// as required by v6 signatures. The generated salt is stored in sig. If the signature is not v6,
-// the method returns an empty hash object.
-// See RFC the crypto refresh Section 3.2.4.
-func (sig *Signature) PrepareSign(config *Config) (hash.Hash, error) {
- if !sig.Hash.Available() {
- return nil, errors.UnsupportedError("hash function")
- }
- hasher := sig.Hash.New()
- if sig.Version == 6 {
- if sig.salt == nil {
- var err error
- sig.salt, err = SignatureSaltForHash(sig.Hash, config.Random())
- if err != nil {
- return nil, err
- }
- }
- hasher.Write(sig.salt)
- }
- return hasher, nil
-}
-
-// SetSalt sets the signature salt for v6 signatures.
-// Assumes salt is generated correctly and checks if length matches.
-// If the signature is not v6, the method ignores the salt.
-// Use PrepareSign whenever possible instead of generating and
-// hashing the salt externally.
-// See RFC the crypto refresh Section 3.2.4.
-func (sig *Signature) SetSalt(salt []byte) error {
- if sig.Version == 6 {
- expectedSaltLength, err := SaltLengthForHash(sig.Hash)
- if err != nil {
- return err
- }
- if salt == nil || len(salt) != expectedSaltLength {
- return errors.InvalidArgumentError("unexpected salt size for the given hash algorithm")
- }
- sig.salt = salt
- }
- return nil
-}
-
-// PrepareVerify must be called to create a hash object before verifying v6 signatures.
-// The created hash object initially hashes the internally stored salt.
-// If the signature is not v6, the method returns an empty hash object.
-// See crypto refresh Section 3.2.4.
-func (sig *Signature) PrepareVerify() (hash.Hash, error) {
- if !sig.Hash.Available() {
- return nil, errors.UnsupportedError("hash function")
- }
- hasher := sig.Hash.New()
- if sig.Version == 6 {
- if sig.salt == nil {
- return nil, errors.StructuralError("v6 requires a salt for the hash to be signed")
- }
- hasher.Write(sig.salt)
- }
- return hasher, nil
-}
-
-// Sign signs a message with a private key. The hash, h, must contain
-// the hash of the message to be signed and will be mutated by this function.
-// On success, the signature is stored in sig. Call Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) {
- if priv.Dummy() {
- return errors.ErrDummyPrivateKey("dummy key found")
- }
- sig.Version = priv.PublicKey.Version
- sig.IssuerFingerprint = priv.PublicKey.Fingerprint
- sig.outSubpackets, err = sig.buildSubpackets(priv.PublicKey)
- if err != nil {
- return err
- }
- digest, err := sig.signPrepareHash(h)
- if err != nil {
- return
- }
- switch priv.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- // supports both *rsa.PrivateKey and crypto.Signer
- sigdata, err := priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash)
- if err == nil {
- sig.RSASignature = encoding.NewMPI(sigdata)
- }
- case PubKeyAlgoDSA:
- dsaPriv := priv.PrivateKey.(*dsa.PrivateKey)
-
- // Need to truncate hashBytes to match FIPS 186-3 section 4.6.
- subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8
- if len(digest) > subgroupSize {
- digest = digest[:subgroupSize]
- }
- r, s, err := dsa.Sign(config.Random(), dsaPriv, digest)
- if err == nil {
- sig.DSASigR = new(encoding.MPI).SetBig(r)
- sig.DSASigS = new(encoding.MPI).SetBig(s)
- }
- case PubKeyAlgoECDSA:
- sk := priv.PrivateKey.(*ecdsa.PrivateKey)
- r, s, err := ecdsa.Sign(config.Random(), sk, digest)
-
- if err == nil {
- sig.ECDSASigR = new(encoding.MPI).SetBig(r)
- sig.ECDSASigS = new(encoding.MPI).SetBig(s)
- }
- case PubKeyAlgoEdDSA:
- sk := priv.PrivateKey.(*eddsa.PrivateKey)
- r, s, err := eddsa.Sign(sk, digest)
- if err == nil {
- sig.EdDSASigR = encoding.NewMPI(r)
- sig.EdDSASigS = encoding.NewMPI(s)
- }
- case PubKeyAlgoEd25519:
- sk := priv.PrivateKey.(*ed25519.PrivateKey)
- signature, err := ed25519.Sign(sk, digest)
- if err == nil {
- sig.EdSig = signature
- }
- case PubKeyAlgoEd448:
- sk := priv.PrivateKey.(*ed448.PrivateKey)
- signature, err := ed448.Sign(sk, digest)
- if err == nil {
- sig.EdSig = signature
- }
- case ExperimentalPubKeyAlgoHMAC:
- sigdata, err := priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, nil)
- if err == nil {
- sig.HMAC = encoding.NewShortByteString(sigdata)
- }
- default:
- err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo)))
- }
-
- return
-}
-
-// SignUserId computes a signature from priv, asserting that pub is a valid
-// key for the identity id. On success, the signature is stored in sig. Call
-// Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error {
- if priv.Dummy() {
- return errors.ErrDummyPrivateKey("dummy key found")
- }
- prepareHash, err := sig.PrepareSign(config)
- if err != nil {
- return err
- }
- if err := userIdSignatureHash(id, pub, prepareHash); err != nil {
- return err
- }
- return sig.Sign(prepareHash, priv, config)
-}
-
-// SignDirectKeyBinding computes a signature from priv
-// On success, the signature is stored in sig.
-// Call Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) SignDirectKeyBinding(pub *PublicKey, priv *PrivateKey, config *Config) error {
- if priv.Dummy() {
- return errors.ErrDummyPrivateKey("dummy key found")
- }
- prepareHash, err := sig.PrepareSign(config)
- if err != nil {
- return err
- }
- if err := directKeySignatureHash(pub, prepareHash); err != nil {
- return err
- }
- return sig.Sign(prepareHash, priv, config)
-}
-
-// CrossSignKey computes a signature from signingKey on pub hashed using hashKey. On success,
-// the signature is stored in sig. Call Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) CrossSignKey(pub *PublicKey, hashKey *PublicKey, signingKey *PrivateKey,
- config *Config) error {
- prepareHash, err := sig.PrepareSign(config)
- if err != nil {
- return err
- }
- h, err := keySignatureHash(hashKey, pub, prepareHash)
- if err != nil {
- return err
- }
- return sig.Sign(h, signingKey, config)
-}
-
-// SignKey computes a signature from priv, asserting that pub is a subkey. On
-// success, the signature is stored in sig. Call Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error {
- if priv.Dummy() {
- return errors.ErrDummyPrivateKey("dummy key found")
- }
- prepareHash, err := sig.PrepareSign(config)
- if err != nil {
- return err
- }
- h, err := keySignatureHash(&priv.PublicKey, pub, prepareHash)
- if err != nil {
- return err
- }
- return sig.Sign(h, priv, config)
-}
-
-// RevokeKey computes a revocation signature of pub using priv. On success, the signature is
-// stored in sig. Call Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) RevokeKey(pub *PublicKey, priv *PrivateKey, config *Config) error {
- prepareHash, err := sig.PrepareSign(config)
- if err != nil {
- return err
- }
- if err := keyRevocationHash(pub, prepareHash); err != nil {
- return err
- }
- return sig.Sign(prepareHash, priv, config)
-}
-
-// RevokeSubkey computes a subkey revocation signature of pub using priv.
-// On success, the signature is stored in sig. Call Serialize to write it out.
-// If config is nil, sensible defaults will be used.
-func (sig *Signature) RevokeSubkey(pub *PublicKey, priv *PrivateKey, config *Config) error {
- // Identical to a subkey binding signature
- return sig.SignKey(pub, priv, config)
-}
-
-// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been
-// called first.
-func (sig *Signature) Serialize(w io.Writer) (err error) {
- if len(sig.outSubpackets) == 0 {
- sig.outSubpackets = sig.rawSubpackets
- }
- if sig.RSASignature == nil && sig.DSASigR == nil && sig.ECDSASigR == nil && sig.EdDSASigR == nil && sig.EdSig == nil && sig.HMAC == nil {
- return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize")
- }
-
- sigLength := 0
- switch sig.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- sigLength = int(sig.RSASignature.EncodedLength())
- case PubKeyAlgoDSA:
- sigLength = int(sig.DSASigR.EncodedLength())
- sigLength += int(sig.DSASigS.EncodedLength())
- case PubKeyAlgoECDSA:
- sigLength = int(sig.ECDSASigR.EncodedLength())
- sigLength += int(sig.ECDSASigS.EncodedLength())
- case PubKeyAlgoEdDSA:
- sigLength = int(sig.EdDSASigR.EncodedLength())
- sigLength += int(sig.EdDSASigS.EncodedLength())
- case PubKeyAlgoEd25519:
- sigLength = ed25519.SignatureSize
- case PubKeyAlgoEd448:
- sigLength = ed448.SignatureSize
- case ExperimentalPubKeyAlgoHMAC:
- sigLength = int(sig.HMAC.EncodedLength())
- default:
- panic("impossible")
- }
-
- hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true)
- unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false)
- length := 4 + /* length of version|signature type|public-key algorithm|hash algorithm */
- 2 /* length of hashed subpackets */ + hashedSubpacketsLen +
- 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen +
- 2 /* hash tag */ + sigLength
- if sig.Version == 6 {
- length += 4 + /* the two length fields are four-octet instead of two */
- 1 + /* salt length */
- len(sig.salt) /* length salt */
- }
- err = serializeHeader(w, packetTypeSignature, length)
- if err != nil {
- return
- }
- err = sig.serializeBody(w)
- if err != nil {
- return err
- }
- return
-}
-
-func (sig *Signature) serializeBody(w io.Writer) (err error) {
- var fields []byte
- if sig.Version == 6 {
- // v6 signatures use 4 octets for length
- hashedSubpacketsLen :=
- uint32(uint32(sig.HashSuffix[4])<<24) |
- uint32(uint32(sig.HashSuffix[5])<<16) |
- uint32(uint32(sig.HashSuffix[6])<<8) |
- uint32(sig.HashSuffix[7])
- fields = sig.HashSuffix[:8+hashedSubpacketsLen]
- } else {
- hashedSubpacketsLen := uint16(uint16(sig.HashSuffix[4])<<8) |
- uint16(sig.HashSuffix[5])
- fields = sig.HashSuffix[:6+hashedSubpacketsLen]
-
- }
- _, err = w.Write(fields)
- if err != nil {
- return
- }
-
- unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false)
- var unhashedSubpackets []byte
- if sig.Version == 6 {
- unhashedSubpackets = make([]byte, 4+unhashedSubpacketsLen)
- unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 24)
- unhashedSubpackets[1] = byte(unhashedSubpacketsLen >> 16)
- unhashedSubpackets[2] = byte(unhashedSubpacketsLen >> 8)
- unhashedSubpackets[3] = byte(unhashedSubpacketsLen)
- serializeSubpackets(unhashedSubpackets[4:], sig.outSubpackets, false)
- } else {
- unhashedSubpackets = make([]byte, 2+unhashedSubpacketsLen)
- unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8)
- unhashedSubpackets[1] = byte(unhashedSubpacketsLen)
- serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false)
- }
-
- _, err = w.Write(unhashedSubpackets)
- if err != nil {
- return
- }
- _, err = w.Write(sig.HashTag[:])
- if err != nil {
- return
- }
-
- if sig.Version == 6 {
- // write salt for v6 signatures
- _, err = w.Write([]byte{uint8(len(sig.salt))})
- if err != nil {
- return
- }
- _, err = w.Write(sig.salt)
- if err != nil {
- return
- }
- }
-
- switch sig.PubKeyAlgo {
- case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly:
- _, err = w.Write(sig.RSASignature.EncodedBytes())
- case PubKeyAlgoDSA:
- if _, err = w.Write(sig.DSASigR.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(sig.DSASigS.EncodedBytes())
- case PubKeyAlgoECDSA:
- if _, err = w.Write(sig.ECDSASigR.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(sig.ECDSASigS.EncodedBytes())
- case PubKeyAlgoEdDSA:
- if _, err = w.Write(sig.EdDSASigR.EncodedBytes()); err != nil {
- return
- }
- _, err = w.Write(sig.EdDSASigS.EncodedBytes())
- case PubKeyAlgoEd25519:
- err = ed25519.WriteSignature(w, sig.EdSig)
- case PubKeyAlgoEd448:
- err = ed448.WriteSignature(w, sig.EdSig)
- case ExperimentalPubKeyAlgoHMAC:
- _, err = w.Write(sig.HMAC.EncodedBytes())
- default:
- panic("impossible")
- }
- return
-}
-
-// outputSubpacket represents a subpacket to be marshaled.
-type outputSubpacket struct {
- hashed bool // true if this subpacket is in the hashed area.
- subpacketType signatureSubpacketType
- isCritical bool
- contents []byte
-}
-
-func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubpacket, err error) {
- creationTime := make([]byte, 4)
- binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix()))
- subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime})
-
- if sig.IssuerKeyId != nil && sig.Version == 4 {
- keyId := make([]byte, 8)
- binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId)
- subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId})
- }
- if sig.IssuerFingerprint != nil {
- contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...)
- subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, sig.Version >= 5, contents})
- }
- if sig.SignerUserId != nil {
- subpackets = append(subpackets, outputSubpacket{true, signerUserIdSubpacket, false, []byte(*sig.SignerUserId)})
- }
- if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 {
- sigLifetime := make([]byte, 4)
- binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs)
- subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime})
- }
-
- // Key flags may only appear in self-signatures or certification signatures.
-
- if sig.FlagsValid {
- var flags byte
- if sig.FlagCertify {
- flags |= KeyFlagCertify
- }
- if sig.FlagSign {
- flags |= KeyFlagSign
- }
- if sig.FlagEncryptCommunications {
- flags |= KeyFlagEncryptCommunications
- }
- if sig.FlagEncryptStorage {
- flags |= KeyFlagEncryptStorage
- }
- if sig.FlagSplitKey {
- flags |= KeyFlagSplitKey
- }
- if sig.FlagAuthenticate {
- flags |= KeyFlagAuthenticate
- }
- if sig.FlagForward {
- flags |= KeyFlagForward
- }
- if sig.FlagGroupKey {
- flags |= KeyFlagGroupKey
- }
- subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}})
- }
-
- for _, notation := range sig.Notations {
- subpackets = append(
- subpackets,
- outputSubpacket{
- true,
- notationDataSubpacket,
- notation.IsCritical,
- notation.getData(),
- })
- }
-
- for _, recipient := range sig.IntendedRecipients {
- subpackets = append(
- subpackets,
- outputSubpacket{
- true,
- intendedRecipientSubpacket,
- false,
- recipient.Serialize(),
- })
- }
-
- // The following subpackets may only appear in self-signatures.
-
- var features = byte(0x00)
- if sig.SEIPDv1 {
- features |= 0x01
- }
- if sig.SEIPDv2 {
- features |= 0x08
- }
-
- if features != 0x00 {
- subpackets = append(subpackets, outputSubpacket{true, featuresSubpacket, false, []byte{features}})
- }
-
- if sig.TrustLevel != 0 {
- subpackets = append(subpackets, outputSubpacket{true, trustSubpacket, true, []byte{byte(sig.TrustLevel), byte(sig.TrustAmount)}})
- }
-
- if sig.TrustRegularExpression != nil {
- // RFC specifies the string should be null-terminated; add a null byte to the end
- subpackets = append(subpackets, outputSubpacket{true, regularExpressionSubpacket, true, []byte(*sig.TrustRegularExpression + "\000")})
- }
-
- if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 {
- keyLifetime := make([]byte, 4)
- binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs)
- subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime})
- }
-
- if sig.IsPrimaryId != nil && *sig.IsPrimaryId {
- subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}})
- }
-
- if len(sig.PreferredSymmetric) > 0 {
- subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric})
- }
-
- if len(sig.PreferredHash) > 0 {
- subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash})
- }
-
- if len(sig.PreferredCompression) > 0 {
- subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression})
- }
-
- if len(sig.PolicyURI) > 0 {
- subpackets = append(subpackets, outputSubpacket{true, policyUriSubpacket, false, []uint8(sig.PolicyURI)})
- }
-
- if len(sig.PreferredCipherSuites) > 0 {
- serialized := make([]byte, len(sig.PreferredCipherSuites)*2)
- for i, cipherSuite := range sig.PreferredCipherSuites {
- serialized[2*i] = cipherSuite[0]
- serialized[2*i+1] = cipherSuite[1]
- }
- subpackets = append(subpackets, outputSubpacket{true, prefCipherSuitesSubpacket, false, serialized})
- }
-
- // Revocation reason appears only in revocation signatures and is serialized as per section 5.2.3.23.
- if sig.RevocationReason != nil {
- subpackets = append(subpackets, outputSubpacket{true, reasonForRevocationSubpacket, true,
- append([]uint8{uint8(*sig.RevocationReason)}, []uint8(sig.RevocationReasonText)...)})
- }
-
- // EmbeddedSignature appears only in subkeys capable of signing and is serialized as per section 5.2.3.26.
- if sig.EmbeddedSignature != nil {
- var buf bytes.Buffer
- err = sig.EmbeddedSignature.serializeBody(&buf)
- if err != nil {
- return
- }
- subpackets = append(subpackets, outputSubpacket{true, embeddedSignatureSubpacket, true, buf.Bytes()})
- }
-
- return
-}
-
-// AddMetadataToHashSuffix modifies the current hash suffix to include metadata
-// (format, filename, and time). Version 5 keys protect this data including it
-// in the hash computation. See section 5.2.4.
-func (sig *Signature) AddMetadataToHashSuffix() {
- if sig == nil || sig.Version != 5 {
- return
- }
- if sig.SigType != 0x00 && sig.SigType != 0x01 {
- return
- }
- lit := sig.Metadata
- if lit == nil {
- // This will translate into six 0x00 bytes.
- lit = &LiteralData{}
- }
-
- // Extract the current byte count
- n := sig.HashSuffix[len(sig.HashSuffix)-8:]
- l := uint64(
- uint64(n[0])<<56 | uint64(n[1])<<48 | uint64(n[2])<<40 | uint64(n[3])<<32 |
- uint64(n[4])<<24 | uint64(n[5])<<16 | uint64(n[6])<<8 | uint64(n[7]))
-
- suffix := bytes.NewBuffer(nil)
- suffix.Write(sig.HashSuffix[:l])
-
- // Add the metadata
- var buf [4]byte
- buf[0] = lit.Format
- fileName := lit.FileName
- if len(lit.FileName) > 255 {
- fileName = fileName[:255]
- }
- buf[1] = byte(len(fileName))
- suffix.Write(buf[:2])
- suffix.Write([]byte(lit.FileName))
- binary.BigEndian.PutUint32(buf[:], lit.Time)
- suffix.Write(buf[:])
-
- suffix.Write([]byte{0x05, 0xff})
- suffix.Write([]byte{
- uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32),
- uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l),
- })
- sig.HashSuffix = suffix.Bytes()
-}
-
-// SaltLengthForHash selects the required salt length for the given hash algorithm,
-// as per Table 23 (Hash algorithm registry) of the crypto refresh.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#section-9.5|Crypto Refresh Section 9.5.
-func SaltLengthForHash(hash crypto.Hash) (int, error) {
- switch hash {
- case crypto.SHA256, crypto.SHA224, crypto.SHA3_256:
- return 16, nil
- case crypto.SHA384:
- return 24, nil
- case crypto.SHA512, crypto.SHA3_512:
- return 32, nil
- default:
- return 0, errors.UnsupportedError("hash function not supported for V6 signatures")
- }
-}
-
-// SignatureSaltForHash generates a random signature salt
-// with the length for the given hash algorithm.
-// See https://datatracker.ietf.org/doc/html/draft-ietf-openpgp-crypto-refresh#section-9.5|Crypto Refresh Section 9.5.
-func SignatureSaltForHash(hash crypto.Hash, randReader io.Reader) ([]byte, error) {
- saltLength, err := SaltLengthForHash(hash)
- if err != nil {
- return nil, err
- }
- salt := make([]byte, saltLength)
- _, err = io.ReadFull(randReader, salt)
- if err != nil {
- return nil, err
- }
- return salt, nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go
deleted file mode 100644
index c97b98b9..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go
+++ /dev/null
@@ -1,315 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "crypto/cipher"
- "crypto/sha256"
- "io"
- "strconv"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/s2k"
- "golang.org/x/crypto/hkdf"
-)
-
-// This is the largest session key that we'll support. Since at most 256-bit cipher
-// is supported in OpenPGP, this is large enough to contain also the auth tag.
-const maxSessionKeySizeInBytes = 64
-
-// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC
-// 4880, section 5.3.
-type SymmetricKeyEncrypted struct {
- Version int
- CipherFunc CipherFunction
- Mode AEADMode
- s2k func(out, in []byte)
- iv []byte
- encryptedKey []byte // Contains also the authentication tag for AEAD
-}
-
-// parse parses an SymmetricKeyEncrypted packet as specified in
-// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#name-symmetric-key-encrypted-ses
-func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error {
- var buf [1]byte
-
- // Version
- if _, err := readFull(r, buf[:]); err != nil {
- return err
- }
- ske.Version = int(buf[0])
- if ske.Version != 4 && ske.Version != 5 && ske.Version != 6 {
- return errors.UnsupportedError("unknown SymmetricKeyEncrypted version")
- }
-
- if ske.Version > 5 {
- // Scalar octet count
- if _, err := readFull(r, buf[:]); err != nil {
- return err
- }
- }
-
- // Cipher function
- if _, err := readFull(r, buf[:]); err != nil {
- return err
- }
- ske.CipherFunc = CipherFunction(buf[0])
- if !ske.CipherFunc.IsSupported() {
- return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[0])))
- }
-
- if ske.Version >= 5 {
- // AEAD mode
- if _, err := readFull(r, buf[:]); err != nil {
- return errors.StructuralError("cannot read AEAD octet from packet")
- }
- ske.Mode = AEADMode(buf[0])
- }
-
- if ske.Version > 5 {
- // Scalar octet count
- if _, err := readFull(r, buf[:]); err != nil {
- return err
- }
- }
-
- var err error
- if ske.s2k, err = s2k.Parse(r); err != nil {
- if _, ok := err.(errors.ErrDummyPrivateKey); ok {
- return errors.UnsupportedError("missing key GNU extension in session key")
- }
- return err
- }
-
- if ske.Version >= 5 {
- // AEAD IV
- iv := make([]byte, ske.Mode.IvLength())
- _, err := readFull(r, iv)
- if err != nil {
- return errors.StructuralError("cannot read AEAD IV")
- }
-
- ske.iv = iv
- }
-
- encryptedKey := make([]byte, maxSessionKeySizeInBytes)
- // The session key may follow. We just have to try and read to find
- // out. If it exists then we limit it to maxSessionKeySizeInBytes.
- n, err := readFull(r, encryptedKey)
- if err != nil && err != io.ErrUnexpectedEOF {
- return err
- }
-
- if n != 0 {
- if n == maxSessionKeySizeInBytes {
- return errors.UnsupportedError("oversized encrypted session key")
- }
- ske.encryptedKey = encryptedKey[:n]
- }
- return nil
-}
-
-// Decrypt attempts to decrypt an encrypted session key and returns the key and
-// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data
-// packet.
-func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) {
- key := make([]byte, ske.CipherFunc.KeySize())
- ske.s2k(key, passphrase)
- if len(ske.encryptedKey) == 0 {
- return key, ske.CipherFunc, nil
- }
- switch ske.Version {
- case 4:
- plaintextKey, cipherFunc, err := ske.decryptV4(key)
- return plaintextKey, cipherFunc, err
- case 5, 6:
- plaintextKey, err := ske.aeadDecrypt(ske.Version, key)
- return plaintextKey, CipherFunction(0), err
- }
- err := errors.UnsupportedError("unknown SymmetricKeyEncrypted version")
- return nil, CipherFunction(0), err
-}
-
-func (ske *SymmetricKeyEncrypted) decryptV4(key []byte) ([]byte, CipherFunction, error) {
- // the IV is all zeros
- iv := make([]byte, ske.CipherFunc.blockSize())
- c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv)
- plaintextKey := make([]byte, len(ske.encryptedKey))
- c.XORKeyStream(plaintextKey, ske.encryptedKey)
- cipherFunc := CipherFunction(plaintextKey[0])
- if cipherFunc.blockSize() == 0 {
- return nil, ske.CipherFunc, errors.UnsupportedError(
- "unknown cipher: " + strconv.Itoa(int(cipherFunc)))
- }
- plaintextKey = plaintextKey[1:]
- if len(plaintextKey) != cipherFunc.KeySize() {
- return nil, cipherFunc, errors.StructuralError(
- "length of decrypted key not equal to cipher keysize")
- }
- return plaintextKey, cipherFunc, nil
-}
-
-func (ske *SymmetricKeyEncrypted) aeadDecrypt(version int, key []byte) ([]byte, error) {
- adata := []byte{0xc3, byte(version), byte(ske.CipherFunc), byte(ske.Mode)}
- aead := getEncryptedKeyAeadInstance(ske.CipherFunc, ske.Mode, key, adata, version)
-
- plaintextKey, err := aead.Open(nil, ske.iv, ske.encryptedKey, adata)
- if err != nil {
- return nil, err
- }
- return plaintextKey, nil
-}
-
-// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w.
-// The packet contains a random session key, encrypted by a key derived from
-// the given passphrase. The session key is returned and must be passed to
-// SerializeSymmetricallyEncrypted.
-// If config is nil, sensible defaults will be used.
-func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) {
- cipherFunc := config.Cipher()
-
- sessionKey := make([]byte, cipherFunc.KeySize())
- _, err = io.ReadFull(config.Random(), sessionKey)
- if err != nil {
- return
- }
-
- err = SerializeSymmetricKeyEncryptedReuseKey(w, sessionKey, passphrase, config)
- if err != nil {
- return
- }
-
- key = sessionKey
- return
-}
-
-// SerializeSymmetricKeyEncryptedReuseKey serializes a symmetric key packet to w.
-// The packet contains the given session key, encrypted by a key derived from
-// the given passphrase. The returned session key must be passed to
-// SerializeSymmetricallyEncrypted.
-// If config is nil, sensible defaults will be used.
-func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, passphrase []byte, config *Config) (err error) {
- var version int
- if config.AEAD() != nil {
- version = 6
- } else {
- version = 4
- }
- cipherFunc := config.Cipher()
- // cipherFunc must be AES
- if !cipherFunc.IsSupported() || cipherFunc < CipherAES128 || cipherFunc > CipherAES256 {
- return errors.UnsupportedError("unsupported cipher: " + strconv.Itoa(int(cipherFunc)))
- }
-
- keySize := cipherFunc.KeySize()
- s2kBuf := new(bytes.Buffer)
- keyEncryptingKey := make([]byte, keySize)
- // s2k.Serialize salts and stretches the passphrase, and writes the
- // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf.
- err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, config.S2K())
- if err != nil {
- return
- }
- s2kBytes := s2kBuf.Bytes()
-
- var packetLength int
- switch version {
- case 4:
- packetLength = 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize
- case 5, 6:
- ivLen := config.AEAD().Mode().IvLength()
- tagLen := config.AEAD().Mode().TagLength()
- packetLength = 3 + len(s2kBytes) + ivLen + keySize + tagLen
- }
- if version > 5 {
- packetLength += 2 // additional octet count fields
- }
-
- err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength)
- if err != nil {
- return
- }
-
- // Symmetric Key Encrypted Version
- buf := []byte{byte(version)}
-
- if version > 5 {
- // Scalar octet count
- buf = append(buf, byte(3+len(s2kBytes)+config.AEAD().Mode().IvLength()))
- }
-
- // Cipher function
- buf = append(buf, byte(cipherFunc))
-
- if version >= 5 {
- // AEAD mode
- buf = append(buf, byte(config.AEAD().Mode()))
- }
- if version > 5 {
- // Scalar octet count
- buf = append(buf, byte(len(s2kBytes)))
- }
- _, err = w.Write(buf)
- if err != nil {
- return
- }
- _, err = w.Write(s2kBytes)
- if err != nil {
- return
- }
-
- switch version {
- case 4:
- iv := make([]byte, cipherFunc.blockSize())
- c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv)
- encryptedCipherAndKey := make([]byte, keySize+1)
- c.XORKeyStream(encryptedCipherAndKey, buf[1:])
- c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey)
- _, err = w.Write(encryptedCipherAndKey)
- if err != nil {
- return
- }
- case 5, 6:
- mode := config.AEAD().Mode()
- adata := []byte{0xc3, byte(version), byte(cipherFunc), byte(mode)}
- aead := getEncryptedKeyAeadInstance(cipherFunc, mode, keyEncryptingKey, adata, version)
-
- // Sample iv using random reader
- iv := make([]byte, config.AEAD().Mode().IvLength())
- _, err = io.ReadFull(config.Random(), iv)
- if err != nil {
- return
- }
- // Seal and write (encryptedData includes auth. tag)
-
- encryptedData := aead.Seal(nil, iv, sessionKey, adata)
- _, err = w.Write(iv)
- if err != nil {
- return
- }
- _, err = w.Write(encryptedData)
- if err != nil {
- return
- }
- }
-
- return
-}
-
-func getEncryptedKeyAeadInstance(c CipherFunction, mode AEADMode, inputKey, associatedData []byte, version int) (aead cipher.AEAD) {
- var blockCipher cipher.Block
- if version > 5 {
- hkdfReader := hkdf.New(sha256.New, inputKey, []byte{}, associatedData)
-
- encryptionKey := make([]byte, c.KeySize())
- _, _ = readFull(hkdfReader, encryptionKey)
-
- blockCipher = c.new(encryptionKey)
- } else {
- blockCipher = c.new(inputKey)
- }
- return mode.new(blockCipher)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go
deleted file mode 100644
index e9bbf032..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-const aeadSaltSize = 32
-
-// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The
-// encrypted Contents will consist of more OpenPGP packets. See RFC 4880,
-// sections 5.7 and 5.13.
-type SymmetricallyEncrypted struct {
- Version int
- Contents io.Reader // contains tag for version 2
- IntegrityProtected bool // If true it is type 18 (with MDC or AEAD). False is packet type 9
-
- // Specific to version 1
- prefix []byte
-
- // Specific to version 2
- Cipher CipherFunction
- Mode AEADMode
- ChunkSizeByte byte
- Salt [aeadSaltSize]byte
-}
-
-const (
- symmetricallyEncryptedVersionMdc = 1
- symmetricallyEncryptedVersionAead = 2
-)
-
-func (se *SymmetricallyEncrypted) parse(r io.Reader) error {
- if se.IntegrityProtected {
- // See RFC 4880, section 5.13.
- var buf [1]byte
- _, err := readFull(r, buf[:])
- if err != nil {
- return err
- }
-
- switch buf[0] {
- case symmetricallyEncryptedVersionMdc:
- se.Version = symmetricallyEncryptedVersionMdc
- case symmetricallyEncryptedVersionAead:
- se.Version = symmetricallyEncryptedVersionAead
- if err := se.parseAead(r); err != nil {
- return err
- }
- default:
- return errors.UnsupportedError("unknown SymmetricallyEncrypted version")
- }
- }
- se.Contents = r
- return nil
-}
-
-// Decrypt returns a ReadCloser, from which the decrypted Contents of the
-// packet can be read. An incorrect key will only be detected after trying
-// to decrypt the entire data.
-func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) {
- if se.Version == symmetricallyEncryptedVersionAead {
- return se.decryptAead(key)
- }
-
- return se.decryptMdc(c, key)
-}
-
-// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet
-// to w and returns a WriteCloser to which the to-be-encrypted packets can be
-// written.
-// If config is nil, sensible defaults will be used.
-func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, aeadSupported bool, cipherSuite CipherSuite, key []byte, config *Config) (Contents io.WriteCloser, err error) {
- writeCloser := noOpCloser{w}
- ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedIntegrityProtected)
- if err != nil {
- return
- }
-
- if aeadSupported {
- return serializeSymmetricallyEncryptedAead(ciphertext, cipherSuite, config.AEADConfig.ChunkSizeByte(), config.Random(), key)
- }
-
- return serializeSymmetricallyEncryptedMdc(ciphertext, c, key, config)
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go
deleted file mode 100644
index a8ef0bbb..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_aead.go
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2023 Proton AG. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "crypto/cipher"
- "crypto/sha256"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "golang.org/x/crypto/hkdf"
-)
-
-// parseAead parses a V2 SEIPD packet (AEAD) as specified in
-// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2
-func (se *SymmetricallyEncrypted) parseAead(r io.Reader) error {
- headerData := make([]byte, 3)
- if n, err := io.ReadFull(r, headerData); n < 3 {
- return errors.StructuralError("could not read aead header: " + err.Error())
- }
-
- // Cipher
- se.Cipher = CipherFunction(headerData[0])
- // cipherFunc must have block size 16 to use AEAD
- if se.Cipher.blockSize() != 16 {
- return errors.UnsupportedError("invalid aead cipher: " + string(se.Cipher))
- }
-
- // Mode
- se.Mode = AEADMode(headerData[1])
- if se.Mode.TagLength() == 0 {
- return errors.UnsupportedError("unknown aead mode: " + string(se.Mode))
- }
-
- // Chunk size
- se.ChunkSizeByte = headerData[2]
- if se.ChunkSizeByte > 16 {
- return errors.UnsupportedError("invalid aead chunk size byte: " + string(se.ChunkSizeByte))
- }
-
- // Salt
- if n, err := io.ReadFull(r, se.Salt[:]); n < aeadSaltSize {
- return errors.StructuralError("could not read aead salt: " + err.Error())
- }
-
- return nil
-}
-
-// associatedData for chunks: tag, version, cipher, mode, chunk size byte
-func (se *SymmetricallyEncrypted) associatedData() []byte {
- return []byte{
- 0xD2,
- symmetricallyEncryptedVersionAead,
- byte(se.Cipher),
- byte(se.Mode),
- se.ChunkSizeByte,
- }
-}
-
-// decryptAead decrypts a V2 SEIPD packet (AEAD) as specified in
-// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2
-func (se *SymmetricallyEncrypted) decryptAead(inputKey []byte) (io.ReadCloser, error) {
- aead, nonce := getSymmetricallyEncryptedAeadInstance(se.Cipher, se.Mode, inputKey, se.Salt[:], se.associatedData())
-
- // Carry the first tagLen bytes
- tagLen := se.Mode.TagLength()
- peekedBytes := make([]byte, tagLen)
- n, err := io.ReadFull(se.Contents, peekedBytes)
- if n < tagLen || (err != nil && err != io.EOF) {
- return nil, errors.StructuralError("not enough data to decrypt:" + err.Error())
- }
-
- return &aeadDecrypter{
- aeadCrypter: aeadCrypter{
- aead: aead,
- chunkSize: decodeAEADChunkSize(se.ChunkSizeByte),
- initialNonce: nonce,
- associatedData: se.associatedData(),
- chunkIndex: make([]byte, 8),
- packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected,
- },
- reader: se.Contents,
- peekedBytes: peekedBytes,
- }, nil
-}
-
-// serializeSymmetricallyEncryptedAead encrypts to a writer a V2 SEIPD packet (AEAD) as specified in
-// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2
-func serializeSymmetricallyEncryptedAead(ciphertext io.WriteCloser, cipherSuite CipherSuite, chunkSizeByte byte, rand io.Reader, inputKey []byte) (Contents io.WriteCloser, err error) {
- // cipherFunc must have block size 16 to use AEAD
- if cipherSuite.Cipher.blockSize() != 16 {
- return nil, errors.InvalidArgumentError("invalid aead cipher function")
- }
-
- if cipherSuite.Cipher.KeySize() != len(inputKey) {
- return nil, errors.InvalidArgumentError("error in aead serialization: bad key length")
- }
-
- // Data for en/decryption: tag, version, cipher, aead mode, chunk size
- prefix := []byte{
- 0xD2,
- symmetricallyEncryptedVersionAead,
- byte(cipherSuite.Cipher),
- byte(cipherSuite.Mode),
- chunkSizeByte,
- }
-
- // Write header (that correspond to prefix except first byte)
- n, err := ciphertext.Write(prefix[1:])
- if err != nil || n < 4 {
- return nil, err
- }
-
- // Random salt
- salt := make([]byte, aeadSaltSize)
- if _, err := io.ReadFull(rand, salt); err != nil {
- return nil, err
- }
-
- if _, err := ciphertext.Write(salt); err != nil {
- return nil, err
- }
-
- aead, nonce := getSymmetricallyEncryptedAeadInstance(cipherSuite.Cipher, cipherSuite.Mode, inputKey, salt, prefix)
-
- return &aeadEncrypter{
- aeadCrypter: aeadCrypter{
- aead: aead,
- chunkSize: decodeAEADChunkSize(chunkSizeByte),
- associatedData: prefix,
- chunkIndex: make([]byte, 8),
- initialNonce: nonce,
- packetTag: packetTypeSymmetricallyEncryptedIntegrityProtected,
- },
- writer: ciphertext,
- }, nil
-}
-
-func getSymmetricallyEncryptedAeadInstance(c CipherFunction, mode AEADMode, inputKey, salt, associatedData []byte) (aead cipher.AEAD, nonce []byte) {
- hkdfReader := hkdf.New(sha256.New, inputKey, salt, associatedData)
-
- encryptionKey := make([]byte, c.KeySize())
- _, _ = readFull(hkdfReader, encryptionKey)
-
- // Last 64 bits of nonce are the counter
- nonce = make([]byte, mode.IvLength()-8)
-
- _, _ = readFull(hkdfReader, nonce)
-
- blockCipher := c.new(encryptionKey)
- aead = mode.new(blockCipher)
-
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go
deleted file mode 100644
index 645963fa..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted_mdc.go
+++ /dev/null
@@ -1,259 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "crypto/cipher"
- "crypto/sha1"
- "crypto/subtle"
- "hash"
- "io"
- "strconv"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
-)
-
-// seMdcReader wraps an io.Reader with a no-op Close method.
-type seMdcReader struct {
- in io.Reader
-}
-
-func (ser seMdcReader) Read(buf []byte) (int, error) {
- return ser.in.Read(buf)
-}
-
-func (ser seMdcReader) Close() error {
- return nil
-}
-
-func (se *SymmetricallyEncrypted) decryptMdc(c CipherFunction, key []byte) (io.ReadCloser, error) {
- if !c.IsSupported() {
- return nil, errors.UnsupportedError("unsupported cipher: " + strconv.Itoa(int(c)))
- }
-
- if len(key) != c.KeySize() {
- return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length")
- }
-
- if se.prefix == nil {
- se.prefix = make([]byte, c.blockSize()+2)
- _, err := readFull(se.Contents, se.prefix)
- if err != nil {
- return nil, err
- }
- } else if len(se.prefix) != c.blockSize()+2 {
- return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths")
- }
-
- ocfbResync := OCFBResync
- if se.IntegrityProtected {
- // MDC packets use a different form of OCFB mode.
- ocfbResync = OCFBNoResync
- }
-
- s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync)
-
- plaintext := cipher.StreamReader{S: s, R: se.Contents}
-
- if se.IntegrityProtected {
- // IntegrityProtected packets have an embedded hash that we need to check.
- h := sha1.New()
- h.Write(se.prefix)
- return &seMDCReader{in: plaintext, h: h}, nil
- }
-
- // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser.
- return seMdcReader{plaintext}, nil
-}
-
-const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size
-
-// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold
-// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an
-// MDC packet containing a hash of the previous Contents which is checked
-// against the running hash. See RFC 4880, section 5.13.
-type seMDCReader struct {
- in io.Reader
- h hash.Hash
- trailer [mdcTrailerSize]byte
- scratch [mdcTrailerSize]byte
- trailerUsed int
- error bool
- eof bool
-}
-
-func (ser *seMDCReader) Read(buf []byte) (n int, err error) {
- if ser.error {
- err = io.ErrUnexpectedEOF
- return
- }
- if ser.eof {
- err = io.EOF
- return
- }
-
- // If we haven't yet filled the trailer buffer then we must do that
- // first.
- for ser.trailerUsed < mdcTrailerSize {
- n, err = ser.in.Read(ser.trailer[ser.trailerUsed:])
- ser.trailerUsed += n
- if err == io.EOF {
- if ser.trailerUsed != mdcTrailerSize {
- n = 0
- err = io.ErrUnexpectedEOF
- ser.error = true
- return
- }
- ser.eof = true
- n = 0
- return
- }
-
- if err != nil {
- n = 0
- return
- }
- }
-
- // If it's a short read then we read into a temporary buffer and shift
- // the data into the caller's buffer.
- if len(buf) <= mdcTrailerSize {
- n, err = readFull(ser.in, ser.scratch[:len(buf)])
- copy(buf, ser.trailer[:n])
- ser.h.Write(buf[:n])
- copy(ser.trailer[:], ser.trailer[n:])
- copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:])
- if n < len(buf) {
- ser.eof = true
- err = io.EOF
- }
- return
- }
-
- n, err = ser.in.Read(buf[mdcTrailerSize:])
- copy(buf, ser.trailer[:])
- ser.h.Write(buf[:n])
- copy(ser.trailer[:], buf[n:])
-
- if err == io.EOF {
- ser.eof = true
- }
- return
-}
-
-// This is a new-format packet tag byte for a type 19 (Integrity Protected) packet.
-const mdcPacketTagByte = byte(0x80) | 0x40 | 19
-
-func (ser *seMDCReader) Close() error {
- if ser.error {
- return errors.ErrMDCMissing
- }
-
- for !ser.eof {
- // We haven't seen EOF so we need to read to the end
- var buf [1024]byte
- _, err := ser.Read(buf[:])
- if err == io.EOF {
- break
- }
- if err != nil {
- return errors.ErrMDCMissing
- }
- }
-
- ser.h.Write(ser.trailer[:2])
-
- final := ser.h.Sum(nil)
- if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 {
- return errors.ErrMDCHashMismatch
- }
- // The hash already includes the MDC header, but we still check its value
- // to confirm encryption correctness
- if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size {
- return errors.ErrMDCMissing
- }
- return nil
-}
-
-// An seMDCWriter writes through to an io.WriteCloser while maintains a running
-// hash of the data written. On close, it emits an MDC packet containing the
-// running hash.
-type seMDCWriter struct {
- w io.WriteCloser
- h hash.Hash
-}
-
-func (w *seMDCWriter) Write(buf []byte) (n int, err error) {
- w.h.Write(buf)
- return w.w.Write(buf)
-}
-
-func (w *seMDCWriter) Close() (err error) {
- var buf [mdcTrailerSize]byte
-
- buf[0] = mdcPacketTagByte
- buf[1] = sha1.Size
- w.h.Write(buf[:2])
- digest := w.h.Sum(nil)
- copy(buf[2:], digest)
-
- _, err = w.w.Write(buf[:])
- if err != nil {
- return
- }
- return w.w.Close()
-}
-
-// noOpCloser is like an ioutil.NopCloser, but for an io.Writer.
-type noOpCloser struct {
- w io.Writer
-}
-
-func (c noOpCloser) Write(data []byte) (n int, err error) {
- return c.w.Write(data)
-}
-
-func (c noOpCloser) Close() error {
- return nil
-}
-
-func serializeSymmetricallyEncryptedMdc(ciphertext io.WriteCloser, c CipherFunction, key []byte, config *Config) (Contents io.WriteCloser, err error) {
- // Disallow old cipher suites
- if !c.IsSupported() || c < CipherAES128 {
- return nil, errors.InvalidArgumentError("invalid mdc cipher function")
- }
-
- if c.KeySize() != len(key) {
- return nil, errors.InvalidArgumentError("error in mdc serialization: bad key length")
- }
-
- _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersionMdc})
- if err != nil {
- return
- }
-
- block := c.new(key)
- blockSize := block.BlockSize()
- iv := make([]byte, blockSize)
- _, err = io.ReadFull(config.Random(), iv)
- if err != nil {
- return nil, err
- }
- if err != nil {
- return
- }
- s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync)
- _, err = ciphertext.Write(prefix)
- if err != nil {
- return
- }
- plaintext := cipher.StreamWriter{S: s, W: ciphertext}
-
- h := sha1.New()
- h.Write(iv)
- h.Write(iv[blockSize-2:])
- Contents = &seMDCWriter{w: plaintext, h: h}
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go
deleted file mode 100644
index 63814ed1..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "bytes"
- "image"
- "image/jpeg"
- "io"
-)
-
-const UserAttrImageSubpacket = 1
-
-// UserAttribute is capable of storing other types of data about a user
-// beyond name, email and a text comment. In practice, user attributes are typically used
-// to store a signed thumbnail photo JPEG image of the user.
-// See RFC 4880, section 5.12.
-type UserAttribute struct {
- Contents []*OpaqueSubpacket
-}
-
-// NewUserAttributePhoto creates a user attribute packet
-// containing the given images.
-func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) {
- uat = new(UserAttribute)
- for _, photo := range photos {
- var buf bytes.Buffer
- // RFC 4880, Section 5.12.1.
- data := []byte{
- 0x10, 0x00, // Little-endian image header length (16 bytes)
- 0x01, // Image header version 1
- 0x01, // JPEG
- 0, 0, 0, 0, // 12 reserved octets, must be all zero.
- 0, 0, 0, 0,
- 0, 0, 0, 0}
- if _, err = buf.Write(data); err != nil {
- return
- }
- if err = jpeg.Encode(&buf, photo, nil); err != nil {
- return
- }
-
- lengthBuf := make([]byte, 5)
- n := serializeSubpacketLength(lengthBuf, len(buf.Bytes())+1)
- lengthBuf = lengthBuf[:n]
-
- uat.Contents = append(uat.Contents, &OpaqueSubpacket{
- SubType: UserAttrImageSubpacket,
- EncodedLength: lengthBuf,
- Contents: buf.Bytes(),
- })
- }
- return
-}
-
-// NewUserAttribute creates a new user attribute packet containing the given subpackets.
-func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute {
- return &UserAttribute{Contents: contents}
-}
-
-func (uat *UserAttribute) parse(r io.Reader) (err error) {
- // RFC 4880, section 5.13
- b, err := io.ReadAll(r)
- if err != nil {
- return
- }
- uat.Contents, err = OpaqueSubpackets(b)
- return
-}
-
-// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including
-// header.
-func (uat *UserAttribute) Serialize(w io.Writer) (err error) {
- var buf bytes.Buffer
- for _, sp := range uat.Contents {
- err = sp.Serialize(&buf)
- if err != nil {
- return err
- }
- }
- if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil {
- return err
- }
- _, err = w.Write(buf.Bytes())
- return
-}
-
-// ImageData returns zero or more byte slices, each containing
-// JPEG File Interchange Format (JFIF), for each photo in the
-// user attribute packet.
-func (uat *UserAttribute) ImageData() (imageData [][]byte) {
- for _, sp := range uat.Contents {
- if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 {
- imageData = append(imageData, sp.Contents[16:])
- }
- }
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go
deleted file mode 100644
index 3c7451a3..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packet
-
-import (
- "io"
- "strings"
-)
-
-// UserId contains text that is intended to represent the name and email
-// address of the key holder. See RFC 4880, section 5.11. By convention, this
-// takes the form "Full Name (Comment) "
-type UserId struct {
- Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below.
-
- Name, Comment, Email string
-}
-
-func hasInvalidCharacters(s string) bool {
- for _, c := range s {
- switch c {
- case '(', ')', '<', '>', 0:
- return true
- }
- }
- return false
-}
-
-// NewUserId returns a UserId or nil if any of the arguments contain invalid
-// characters. The invalid characters are '\x00', '(', ')', '<' and '>'
-func NewUserId(name, comment, email string) *UserId {
- // RFC 4880 doesn't deal with the structure of userid strings; the
- // name, comment and email form is just a convention. However, there's
- // no convention about escaping the metacharacters and GPG just refuses
- // to create user ids where, say, the name contains a '('. We mirror
- // this behaviour.
-
- if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) {
- return nil
- }
-
- uid := new(UserId)
- uid.Name, uid.Comment, uid.Email = name, comment, email
- uid.Id = name
- if len(comment) > 0 {
- if len(uid.Id) > 0 {
- uid.Id += " "
- }
- uid.Id += "("
- uid.Id += comment
- uid.Id += ")"
- }
- if len(email) > 0 {
- if len(uid.Id) > 0 {
- uid.Id += " "
- }
- uid.Id += "<"
- uid.Id += email
- uid.Id += ">"
- }
- return uid
-}
-
-func (uid *UserId) parse(r io.Reader) (err error) {
- // RFC 4880, section 5.11
- b, err := io.ReadAll(r)
- if err != nil {
- return
- }
- uid.Id = string(b)
- uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id)
- return
-}
-
-// Serialize marshals uid to w in the form of an OpenPGP packet, including
-// header.
-func (uid *UserId) Serialize(w io.Writer) error {
- err := serializeHeader(w, packetTypeUserId, len(uid.Id))
- if err != nil {
- return err
- }
- _, err = w.Write([]byte(uid.Id))
- return err
-}
-
-// parseUserId extracts the name, comment and email from a user id string that
-// is formatted as "Full Name (Comment) ".
-func parseUserId(id string) (name, comment, email string) {
- var n, c, e struct {
- start, end int
- }
- var state int
-
- for offset, rune := range id {
- switch state {
- case 0:
- // Entering name
- n.start = offset
- state = 1
- fallthrough
- case 1:
- // In name
- if rune == '(' {
- state = 2
- n.end = offset
- } else if rune == '<' {
- state = 5
- n.end = offset
- }
- case 2:
- // Entering comment
- c.start = offset
- state = 3
- fallthrough
- case 3:
- // In comment
- if rune == ')' {
- state = 4
- c.end = offset
- }
- case 4:
- // Between comment and email
- if rune == '<' {
- state = 5
- }
- case 5:
- // Entering email
- e.start = offset
- state = 6
- fallthrough
- case 6:
- // In email
- if rune == '>' {
- state = 7
- e.end = offset
- }
- default:
- // After email
- }
- }
- switch state {
- case 1:
- // ended in the name
- n.end = len(id)
- case 3:
- // ended in comment
- c.end = len(id)
- case 6:
- // ended in email
- e.end = len(id)
- }
-
- name = strings.TrimSpace(id[n.start:n.end])
- comment = strings.TrimSpace(id[c.start:c.end])
- email = strings.TrimSpace(id[e.start:e.end])
-
- // RFC 2822 3.4: alternate simple form of a mailbox
- if email == "" && strings.ContainsRune(name, '@') {
- email = name
- name = ""
- }
-
- return
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go
deleted file mode 100644
index 33bceb3f..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go
+++ /dev/null
@@ -1,619 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package openpgp implements high level operations on OpenPGP messages.
-package openpgp // import "github.com/ProtonMail/go-crypto/openpgp"
-
-import (
- "crypto"
- _ "crypto/sha256"
- _ "crypto/sha512"
- "hash"
- "io"
- "strconv"
-
- "github.com/ProtonMail/go-crypto/openpgp/armor"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
- "github.com/ProtonMail/go-crypto/openpgp/packet"
- _ "golang.org/x/crypto/sha3"
-)
-
-// SignatureType is the armor type for a PGP signature.
-var SignatureType = "PGP SIGNATURE"
-
-// readArmored reads an armored block with the given type.
-func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) {
- block, err := armor.Decode(r)
- if err != nil {
- return
- }
-
- if block.Type != expectedType {
- return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type)
- }
-
- return block.Body, nil
-}
-
-// MessageDetails contains the result of parsing an OpenPGP encrypted and/or
-// signed message.
-type MessageDetails struct {
- IsEncrypted bool // true if the message was encrypted.
- EncryptedToKeyIds []uint64 // the list of recipient key ids.
- IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message.
- DecryptedWith Key // the private key used to decrypt the message, if any.
- IsSigned bool // true if the message is signed.
- SignedByKeyId uint64 // the key id of the signer, if any.
- SignedByFingerprint []byte // the key fingerprint of the signer, if any.
- SignedBy *Key // the key of the signer, if available.
- LiteralData *packet.LiteralData // the metadata of the contents
- UnverifiedBody io.Reader // the contents of the message.
-
- // If IsSigned is true and SignedBy is non-zero then the signature will
- // be verified as UnverifiedBody is read. The signature cannot be
- // checked until the whole of UnverifiedBody is read so UnverifiedBody
- // must be consumed until EOF before the data can be trusted. Even if a
- // message isn't signed (or the signer is unknown) the data may contain
- // an authentication code that is only checked once UnverifiedBody has
- // been consumed. Once EOF has been seen, the following fields are
- // valid. (An authentication code failure is reported as a
- // SignatureError error when reading from UnverifiedBody.)
- Signature *packet.Signature // the signature packet itself.
- SignatureError error // nil if the signature is good.
- UnverifiedSignatures []*packet.Signature // all other unverified signature packets.
-
- decrypted io.ReadCloser
-}
-
-// A PromptFunction is used as a callback by functions that may need to decrypt
-// a private key, or prompt for a passphrase. It is called with a list of
-// acceptable, encrypted private keys and a boolean that indicates whether a
-// passphrase is usable. It should either decrypt a private key or return a
-// passphrase to try. If the decrypted private key or given passphrase isn't
-// correct, the function will be called again, forever. Any error returned will
-// be passed up.
-type PromptFunction func(keys []Key, symmetric bool) ([]byte, error)
-
-// A keyEnvelopePair is used to store a private key with the envelope that
-// contains a symmetric key, encrypted with that key.
-type keyEnvelopePair struct {
- key Key
- encryptedKey *packet.EncryptedKey
-}
-
-// ReadMessage parses an OpenPGP message that may be signed and/or encrypted.
-// The given KeyRing should contain both public keys (for signature
-// verification) and, possibly encrypted, private keys for decrypting.
-// If config is nil, sensible defaults will be used.
-func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) {
- var p packet.Packet
-
- var symKeys []*packet.SymmetricKeyEncrypted
- var pubKeys []keyEnvelopePair
- // Integrity protected encrypted packet: SymmetricallyEncrypted or AEADEncrypted
- var edp packet.EncryptedDataPacket
-
- packets := packet.NewReader(r)
- md = new(MessageDetails)
- md.IsEncrypted = true
-
- // The message, if encrypted, starts with a number of packets
- // containing an encrypted decryption key. The decryption key is either
- // encrypted to a public key, or with a passphrase. This loop
- // collects these packets.
-ParsePackets:
- for {
- p, err = packets.Next()
- if err != nil {
- return nil, err
- }
- switch p := p.(type) {
- case *packet.SymmetricKeyEncrypted:
- // This packet contains the decryption key encrypted with a passphrase.
- md.IsSymmetricallyEncrypted = true
- symKeys = append(symKeys, p)
- case *packet.EncryptedKey:
- // This packet contains the decryption key encrypted to a public key.
- md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId)
- switch p.Algo {
- case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH, packet.PubKeyAlgoX25519, packet.PubKeyAlgoX448, packet.ExperimentalPubKeyAlgoAEAD:
- break
- default:
- continue
- }
- if keyring != nil {
- var keys []Key
- if p.KeyId == 0 {
- keys = keyring.DecryptionKeys()
- } else {
- keys = keyring.KeysById(p.KeyId)
- }
- for _, k := range keys {
- pubKeys = append(pubKeys, keyEnvelopePair{k, p})
- }
- }
- case *packet.SymmetricallyEncrypted:
- if !p.IntegrityProtected && !config.AllowUnauthenticatedMessages() {
- return nil, errors.UnsupportedError("message is not integrity protected")
- }
- edp = p
- break ParsePackets
- case *packet.AEADEncrypted:
- edp = p
- break ParsePackets
- case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature:
- // This message isn't encrypted.
- if len(symKeys) != 0 || len(pubKeys) != 0 {
- return nil, errors.StructuralError("key material not followed by encrypted message")
- }
- packets.Unread(p)
- return readSignedMessage(packets, nil, keyring, config)
- }
- }
-
- var candidates []Key
- var decrypted io.ReadCloser
-
- // Now that we have the list of encrypted keys we need to decrypt at
- // least one of them or, if we cannot, we need to call the prompt
- // function so that it can decrypt a key or give us a passphrase.
-FindKey:
- for {
- // See if any of the keys already have a private key available
- candidates = candidates[:0]
- candidateFingerprints := make(map[string]bool)
-
- for _, pk := range pubKeys {
- if pk.key.PrivateKey == nil {
- continue
- }
- if !pk.key.PrivateKey.Encrypted {
- if len(pk.encryptedKey.Key) == 0 {
- errDec := pk.encryptedKey.Decrypt(pk.key.PrivateKey, config)
- if errDec != nil {
- continue
- }
- }
- // Try to decrypt symmetrically encrypted
- decrypted, err = edp.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key)
- if err != nil && err != errors.ErrKeyIncorrect {
- return nil, err
- }
- if decrypted != nil {
- md.DecryptedWith = pk.key
- break FindKey
- }
- } else {
- fpr := string(pk.key.PublicKey.Fingerprint[:])
- if v := candidateFingerprints[fpr]; v {
- continue
- }
- candidates = append(candidates, pk.key)
- candidateFingerprints[fpr] = true
- }
- }
-
- if len(candidates) == 0 && len(symKeys) == 0 {
- return nil, errors.ErrKeyIncorrect
- }
-
- if prompt == nil {
- return nil, errors.ErrKeyIncorrect
- }
-
- passphrase, err := prompt(candidates, len(symKeys) != 0)
- if err != nil {
- return nil, err
- }
-
- // Try the symmetric passphrase first
- if len(symKeys) != 0 && passphrase != nil {
- for _, s := range symKeys {
- key, cipherFunc, err := s.Decrypt(passphrase)
- // In v4, on wrong passphrase, session key decryption is very likely to result in an invalid cipherFunc:
- // only for < 5% of cases we will proceed to decrypt the data
- if err == nil {
- decrypted, err = edp.Decrypt(cipherFunc, key)
- if err != nil {
- return nil, err
- }
- if decrypted != nil {
- break FindKey
- }
- }
- }
- }
- }
-
- md.decrypted = decrypted
- if err := packets.Push(decrypted); err != nil {
- return nil, err
- }
- mdFinal, sensitiveParsingErr := readSignedMessage(packets, md, keyring, config)
- if sensitiveParsingErr != nil {
- return nil, errors.StructuralError("parsing error")
- }
- return mdFinal, nil
-}
-
-// readSignedMessage reads a possibly signed message if mdin is non-zero then
-// that structure is updated and returned. Otherwise a fresh MessageDetails is
-// used.
-func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing, config *packet.Config) (md *MessageDetails, err error) {
- if mdin == nil {
- mdin = new(MessageDetails)
- }
- md = mdin
-
- var p packet.Packet
- var h hash.Hash
- var wrappedHash hash.Hash
- var prevLast bool
-FindLiteralData:
- for {
- p, err = packets.Next()
- if err != nil {
- return nil, err
- }
- switch p := p.(type) {
- case *packet.Compressed:
- if err := packets.Push(p.Body); err != nil {
- return nil, err
- }
- case *packet.OnePassSignature:
- if prevLast {
- return nil, errors.UnsupportedError("nested signature packets")
- }
-
- if p.IsLast {
- prevLast = true
- }
-
- h, wrappedHash, err = hashForSignature(p.Hash, p.SigType, p.Salt)
- if err != nil {
- md.SignatureError = err
- }
-
- md.IsSigned = true
- if p.Version == 6 {
- md.SignedByFingerprint = p.KeyFingerprint
- }
- md.SignedByKeyId = p.KeyId
-
- if keyring != nil {
- keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign)
- if len(keys) > 0 {
- md.SignedBy = &keys[0]
- }
- }
- case *packet.LiteralData:
- md.LiteralData = p
- break FindLiteralData
- }
- }
-
- if md.IsSigned && md.SignatureError == nil {
- md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md, config}
- } else if md.decrypted != nil {
- md.UnverifiedBody = &checkReader{md, false}
- } else {
- md.UnverifiedBody = md.LiteralData.Body
- }
-
- return md, nil
-}
-
-func wrapHashForSignature(hashFunc hash.Hash, sigType packet.SignatureType) (hash.Hash, error) {
- switch sigType {
- case packet.SigTypeBinary:
- return hashFunc, nil
- case packet.SigTypeText:
- return NewCanonicalTextHash(hashFunc), nil
- }
- return nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType)))
-}
-
-// hashForSignature returns a pair of hashes that can be used to verify a
-// signature. The signature may specify that the contents of the signed message
-// should be preprocessed (i.e. to normalize line endings). Thus this function
-// returns two hashes. The second should be used to hash the message itself and
-// performs any needed preprocessing.
-func hashForSignature(hashFunc crypto.Hash, sigType packet.SignatureType, sigSalt []byte) (hash.Hash, hash.Hash, error) {
- if _, ok := algorithm.HashToHashIdWithSha1(hashFunc); !ok {
- return nil, nil, errors.UnsupportedError("unsupported hash function")
- }
- if !hashFunc.Available() {
- return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashFunc)))
- }
- h := hashFunc.New()
- if sigSalt != nil {
- h.Write(sigSalt)
- }
- wrappedHash, err := wrapHashForSignature(h, sigType)
- if err != nil {
- return nil, nil, err
- }
- switch sigType {
- case packet.SigTypeBinary:
- return h, wrappedHash, nil
- case packet.SigTypeText:
- return h, wrappedHash, nil
- }
- return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType)))
-}
-
-// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF
-// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger
-// MDC checks.
-type checkReader struct {
- md *MessageDetails
- checked bool
-}
-
-func (cr *checkReader) Read(buf []byte) (int, error) {
- n, sensitiveParsingError := cr.md.LiteralData.Body.Read(buf)
- if sensitiveParsingError == io.EOF {
- if cr.checked {
- // Only check once
- return n, io.EOF
- }
- mdcErr := cr.md.decrypted.Close()
- if mdcErr != nil {
- return n, mdcErr
- }
- cr.checked = true
- return n, io.EOF
- }
-
- if sensitiveParsingError != nil {
- return n, errors.StructuralError("parsing error")
- }
-
- return n, nil
-}
-
-// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes
-// the data as it is read. When it sees an EOF from the underlying io.Reader
-// it parses and checks a trailing Signature packet and triggers any MDC checks.
-type signatureCheckReader struct {
- packets *packet.Reader
- h, wrappedHash hash.Hash
- md *MessageDetails
- config *packet.Config
-}
-
-func (scr *signatureCheckReader) Read(buf []byte) (int, error) {
- n, sensitiveParsingError := scr.md.LiteralData.Body.Read(buf)
-
- // Hash only if required
- if scr.md.SignedBy != nil {
- scr.wrappedHash.Write(buf[:n])
- }
-
- if sensitiveParsingError == io.EOF {
- var p packet.Packet
- var readError error
- var sig *packet.Signature
-
- p, readError = scr.packets.Next()
- for readError == nil {
- var ok bool
- if sig, ok = p.(*packet.Signature); ok {
- if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) {
- sig.Metadata = scr.md.LiteralData
- }
-
- // If signature KeyID matches
- if scr.md.SignedBy != nil && *sig.IssuerKeyId == scr.md.SignedByKeyId {
- key := scr.md.SignedBy
- signatureError := key.PublicKey.VerifySignature(scr.h, sig)
- if signatureError == nil {
- signatureError = checkSignatureDetails(key, sig, scr.config)
- }
- scr.md.Signature = sig
- scr.md.SignatureError = signatureError
- } else {
- scr.md.UnverifiedSignatures = append(scr.md.UnverifiedSignatures, sig)
- }
- }
-
- p, readError = scr.packets.Next()
- }
-
- if scr.md.SignedBy != nil && scr.md.Signature == nil {
- if scr.md.UnverifiedSignatures == nil {
- scr.md.SignatureError = errors.StructuralError("LiteralData not followed by signature")
- } else {
- scr.md.SignatureError = errors.StructuralError("No matching signature found")
- }
- }
-
- // The SymmetricallyEncrypted packet, if any, might have an
- // unsigned hash of its own. In order to check this we need to
- // close that Reader.
- if scr.md.decrypted != nil {
- mdcErr := scr.md.decrypted.Close()
- if mdcErr != nil {
- return n, mdcErr
- }
- }
- return n, io.EOF
- }
-
- if sensitiveParsingError != nil {
- return n, errors.StructuralError("parsing error")
- }
-
- return n, nil
-}
-
-// VerifyDetachedSignature takes a signed file and a detached signature and
-// returns the signature packet and the entity the signature was signed by,
-// if any, and a possible signature verification error.
-// If the signer isn't known, ErrUnknownIssuer is returned.
-func VerifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) {
- return verifyDetachedSignature(keyring, signed, signature, nil, false, config)
-}
-
-// VerifyDetachedSignatureAndHash performs the same actions as
-// VerifyDetachedSignature and checks that the expected hash functions were used.
-func VerifyDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) {
- return verifyDetachedSignature(keyring, signed, signature, expectedHashes, true, config)
-}
-
-// CheckDetachedSignature takes a signed file and a detached signature and
-// returns the entity the signature was signed by, if any, and a possible
-// signature verification error. If the signer isn't known,
-// ErrUnknownIssuer is returned.
-func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) {
- _, signer, err = verifyDetachedSignature(keyring, signed, signature, nil, false, config)
- return
-}
-
-// CheckDetachedSignatureAndHash performs the same actions as
-// CheckDetachedSignature and checks that the expected hash functions were used.
-func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (signer *Entity, err error) {
- _, signer, err = verifyDetachedSignature(keyring, signed, signature, expectedHashes, true, config)
- return
-}
-
-func verifyDetachedSignature(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, checkHashes bool, config *packet.Config) (sig *packet.Signature, signer *Entity, err error) {
- var issuerKeyId uint64
- var hashFunc crypto.Hash
- var sigType packet.SignatureType
- var keys []Key
- var p packet.Packet
-
- packets := packet.NewReader(signature)
- for {
- p, err = packets.Next()
- if err == io.EOF {
- return nil, nil, errors.ErrUnknownIssuer
- }
- if err != nil {
- return nil, nil, err
- }
-
- var ok bool
- sig, ok = p.(*packet.Signature)
- if !ok {
- return nil, nil, errors.StructuralError("non signature packet found")
- }
- if sig.IssuerKeyId == nil {
- return nil, nil, errors.StructuralError("signature doesn't have an issuer")
- }
- issuerKeyId = *sig.IssuerKeyId
- hashFunc = sig.Hash
- sigType = sig.SigType
- if checkHashes {
- matchFound := false
- // check for hashes
- for _, expectedHash := range expectedHashes {
- if hashFunc == expectedHash {
- matchFound = true
- break
- }
- }
- if !matchFound {
- return nil, nil, errors.StructuralError("hash algorithm or salt mismatch with cleartext message headers")
- }
- }
- keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign)
- if len(keys) > 0 {
- break
- }
- }
-
- if len(keys) == 0 {
- panic("unreachable")
- }
-
- h, err := sig.PrepareVerify()
- if err != nil {
- return nil, nil, err
- }
- wrappedHash, err := wrapHashForSignature(h, sigType)
- if err != nil {
- return nil, nil, err
- }
-
- if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF {
- return nil, nil, err
- }
-
- for _, key := range keys {
- err = key.PublicKey.VerifySignature(h, sig)
- if err == nil {
- return sig, key.Entity, checkSignatureDetails(&key, sig, config)
- }
- }
-
- return nil, nil, err
-}
-
-// CheckArmoredDetachedSignature performs the same actions as
-// CheckDetachedSignature but expects the signature to be armored.
-func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) {
- body, err := readArmored(signature, SignatureType)
- if err != nil {
- return
- }
-
- return CheckDetachedSignature(keyring, signed, body, config)
-}
-
-// checkSignatureDetails returns an error if:
-// - The signature (or one of the binding signatures mentioned below)
-// has a unknown critical notation data subpacket
-// - The primary key of the signing entity is revoked
-// - The primary identity is revoked
-// - The signature is expired
-// - The primary key of the signing entity is expired according to the
-// primary identity binding signature
-//
-// ... or, if the signature was signed by a subkey and:
-// - The signing subkey is revoked
-// - The signing subkey is expired according to the subkey binding signature
-// - The signing subkey binding signature is expired
-// - The signing subkey cross-signature is expired
-//
-// NOTE: The order of these checks is important, as the caller may choose to
-// ignore ErrSignatureExpired or ErrKeyExpired errors, but should never
-// ignore any other errors.
-func checkSignatureDetails(key *Key, signature *packet.Signature, config *packet.Config) error {
- now := config.Now()
- primarySelfSignature, primaryIdentity := key.Entity.PrimarySelfSignature()
- signedBySubKey := key.PublicKey != key.Entity.PrimaryKey
- sigsToCheck := []*packet.Signature{signature, primarySelfSignature}
- if signedBySubKey {
- sigsToCheck = append(sigsToCheck, key.SelfSignature, key.SelfSignature.EmbeddedSignature)
- }
- for _, sig := range sigsToCheck {
- for _, notation := range sig.Notations {
- if notation.IsCritical && !config.KnownNotation(notation.Name) {
- return errors.SignatureError("unknown critical notation: " + notation.Name)
- }
- }
- }
- if key.Entity.Revoked(now) || // primary key is revoked
- (signedBySubKey && key.Revoked(now)) || // subkey is revoked
- (primaryIdentity != nil && primaryIdentity.Revoked(now)) { // primary identity is revoked for v4
- return errors.ErrKeyRevoked
- }
- if key.Entity.PrimaryKey.KeyExpired(primarySelfSignature, now) { // primary key is expired
- return errors.ErrKeyExpired
- }
- if signedBySubKey {
- if key.PublicKey.KeyExpired(key.SelfSignature, now) { // subkey is expired
- return errors.ErrKeyExpired
- }
- }
- for _, sig := range sigsToCheck {
- if sig.SigExpired(now) { // any of the relevant signatures are expired
- return errors.ErrSignatureExpired
- }
- }
- return nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go
deleted file mode 100644
index 77282c0e..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go
+++ /dev/null
@@ -1,475 +0,0 @@
-package openpgp
-
-const testKey1KeyId uint64 = 0xA34D7E18C20C31BB
-const testKey3KeyId uint64 = 0x338934250CCC0360
-const testKeyP256KeyId uint64 = 0xd44a2c495918513e
-
-const signedInput = "Signed message\nline 2\nline 3\n"
-const signedTextInput = "Signed message\r\nline 2\r\nline 3\r\n"
-
-const recipientUnspecifiedHex = "848c0300000000000000000103ff62d4d578d03cf40c3da998dfe216c074fa6ddec5e31c197c9666ba292830d91d18716a80f699f9d897389a90e6d62d0238f5f07a5248073c0f24920e4bc4a30c2d17ee4e0cae7c3d4aaa4e8dced50e3010a80ee692175fa0385f62ecca4b56ee6e9980aa3ec51b61b077096ac9e800edaf161268593eedb6cc7027ff5cb32745d250010d407a6221ae22ef18469b444f2822478c4d190b24d36371a95cb40087cdd42d9399c3d06a53c0673349bfb607927f20d1e122bde1e2bf3aa6cae6edf489629bcaa0689539ae3b718914d88ededc3b"
-
-const detachedSignatureHex = "889c04000102000605024d449cd1000a0910a34d7e18c20c31bb167603ff57718d09f28a519fdc7b5a68b6a3336da04df85e38c5cd5d5bd2092fa4629848a33d85b1729402a2aab39c3ac19f9d573f773cc62c264dc924c067a79dfd8a863ae06c7c8686120760749f5fd9b1e03a64d20a7df3446ddc8f0aeadeaeba7cbaee5c1e366d65b6a0c6cc749bcb912d2f15013f812795c2e29eb7f7b77f39ce77"
-
-const detachedSignatureTextHex = "889c04010102000605024d449d21000a0910a34d7e18c20c31bbc8c60400a24fbef7342603a41cb1165767bd18985d015fb72fe05db42db36cfb2f1d455967f1e491194fbf6cf88146222b23bf6ffbd50d17598d976a0417d3192ff9cc0034fd00f287b02e90418bbefe609484b09231e4e7a5f3562e199bf39909ab5276c4d37382fe088f6b5c3426fc1052865da8b3ab158672d58b6264b10823dc4b39"
-
-const detachedSignatureDSAHex = "884604001102000605024d6c4eac000a0910338934250ccc0360f18d00a087d743d6405ed7b87755476629600b8b694a39e900a0abff8126f46faf1547c1743c37b21b4ea15b8f83"
-
-const detachedSignatureP256Hex = "885e0400130a0006050256e5bb00000a0910d44a2c495918513edef001009841a4f792beb0befccb35c8838a6a87d9b936beaa86db6745ddc7b045eee0cf00fd1ac1f78306b17e965935dd3f8bae4587a76587e4af231efe19cc4011a8434817"
-
-// The plaintext is https://www.gutenberg.org/cache/epub/1080/pg1080.txt
-const modestProposalSha512 = "lbbrB1+WP3T9AaC9OQqBdOcCjgeEQadlulXsNPgVx0tyqPzDHwUugZ2gE7V0ESKAw6kAVfgkcuvfgxAAGaeHtw=="
-
-const testKeys1And2Hex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b0020003b88d044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f0011010001889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab0020003988d044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b0020003b88d044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020003"
-
-const testKeys1And2PrivateHex = "9501d8044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd00110100010003ff4d91393b9a8e3430b14d6209df42f98dc927425b881f1209f319220841273a802a97c7bdb8b3a7740b3ab5866c4d1d308ad0d3a79bd1e883aacf1ac92dfe720285d10d08752a7efe3c609b1d00f17f2805b217be53999a7da7e493bfc3e9618fd17018991b8128aea70a05dbce30e4fbe626aa45775fa255dd9177aabf4df7cf0200c1ded12566e4bc2bb590455e5becfb2e2c9796482270a943343a7835de41080582c2be3caf5981aa838140e97afa40ad652a0b544f83eb1833b0957dce26e47b0200eacd6046741e9ce2ec5beb6fb5e6335457844fb09477f83b050a96be7da043e17f3a9523567ed40e7a521f818813a8b8a72209f1442844843ccc7eb9805442570200bdafe0438d97ac36e773c7162028d65844c4d463e2420aa2228c6e50dc2743c3d6c72d0d782a5173fe7be2169c8a9f4ef8a7cf3e37165e8c61b89c346cdc6c1799d2b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b00200009d01d8044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f00110100010003fd17a7490c22a79c59281fb7b20f5e6553ec0c1637ae382e8adaea295f50241037f8997cf42c1ce26417e015091451b15424b2c59eb8d4161b0975630408e394d3b00f88d4b4e18e2cc85e8251d4753a27c639c83f5ad4a571c4f19d7cd460b9b73c25ade730c99df09637bd173d8e3e981ac64432078263bb6dc30d3e974150dd0200d0ee05be3d4604d2146fb0457f31ba17c057560785aa804e8ca5530a7cd81d3440d0f4ba6851efcfd3954b7e68908fc0ba47f7ac37bf559c6c168b70d3a7c8cd0200da1c677c4bce06a068070f2b3733b0a714e88d62aa3f9a26c6f5216d48d5c2b5624144f3807c0df30be66b3268eeeca4df1fbded58faf49fc95dc3c35f134f8b01fd1396b6c0fc1b6c4f0eb8f5e44b8eace1e6073e20d0b8bc5385f86f1cf3f050f66af789f3ef1fc107b7f4421e19e0349c730c68f0a226981f4e889054fdb4dc149e8e889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab00200009501fe044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001fe030302e9030f3c783e14856063f16938530e148bc57a7aa3f3e4f90df9dceccdc779bc0835e1ad3d006e4a8d7b36d08b8e0de5a0d947254ecfbd22037e6572b426bcfdc517796b224b0036ff90bc574b5509bede85512f2eefb520fb4b02aa523ba739bff424a6fe81c5041f253f8d757e69a503d3563a104d0d49e9e890b9d0c26f96b55b743883b472caa7050c4acfd4a21f875bdf1258d88bd61224d303dc9df77f743137d51e6d5246b88c406780528fd9a3e15bab5452e5b93970d9dcc79f48b38651b9f15bfbcf6da452837e9cc70683d1bdca94507870f743e4ad902005812488dd342f836e72869afd00ce1850eea4cfa53ce10e3608e13d3c149394ee3cbd0e23d018fcbcb6e2ec5a1a22972d1d462ca05355d0d290dd2751e550d5efb38c6c89686344df64852bf4ff86638708f644e8ec6bd4af9b50d8541cb91891a431326ab2e332faa7ae86cfb6e0540aa63160c1e5cdd5a4add518b303fff0a20117c6bc77f7cfbaf36b04c865c6c2b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b00200009d01fe044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001fe030302e9030f3c783e148560f936097339ae381d63116efcf802ff8b1c9360767db5219cc987375702a4123fd8657d3e22700f23f95020d1b261eda5257e9a72f9a918e8ef22dd5b3323ae03bbc1923dd224db988cadc16acc04b120a9f8b7e84da9716c53e0334d7b66586ddb9014df604b41be1e960dcfcbc96f4ed150a1a0dd070b9eb14276b9b6be413a769a75b519a53d3ecc0c220e85cd91ca354d57e7344517e64b43b6e29823cbd87eae26e2b2e78e6dedfbb76e3e9f77bcb844f9a8932eb3db2c3f9e44316e6f5d60e9e2a56e46b72abe6b06dc9a31cc63f10023d1f5e12d2a3ee93b675c96f504af0001220991c88db759e231b3320dcedf814dcf723fd9857e3d72d66a0f2af26950b915abdf56c1596f46a325bf17ad4810d3535fb02a259b247ac3dbd4cc3ecf9c51b6c07cebb009c1506fba0a89321ec8683e3fd009a6e551d50243e2d5092fefb3321083a4bad91320dc624bd6b5dddf93553e3d53924c05bfebec1fb4bd47e89a1a889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020000"
-
-const dsaElGamalTestKeysHex = "9501e1044dfcb16a110400aa3e5c1a1f43dd28c2ffae8abf5cfce555ee874134d8ba0a0f7b868ce2214beddc74e5e1e21ded354a95d18acdaf69e5e342371a71fbb9093162e0c5f3427de413a7f2c157d83f5cd2f9d791256dc4f6f0e13f13c3302af27f2384075ab3021dff7a050e14854bbde0a1094174855fc02f0bae8e00a340d94a1f22b32e48485700a0cec672ac21258fb95f61de2ce1af74b2c4fa3e6703ff698edc9be22c02ae4d916e4fa223f819d46582c0516235848a77b577ea49018dcd5e9e15cff9dbb4663a1ae6dd7580fa40946d40c05f72814b0f88481207e6c0832c3bded4853ebba0a7e3bd8e8c66df33d5a537cd4acf946d1080e7a3dcea679cb2b11a72a33a2b6a9dc85f466ad2ddf4c3db6283fa645343286971e3dd700703fc0c4e290d45767f370831a90187e74e9972aae5bff488eeff7d620af0362bfb95c1a6c3413ab5d15a2e4139e5d07a54d72583914661ed6a87cce810be28a0aa8879a2dd39e52fb6fe800f4f181ac7e328f740cde3d09a05cecf9483e4cca4253e60d4429ffd679d9996a520012aad119878c941e3cf151459873bdfc2a9563472fe0303027a728f9feb3b864260a1babe83925ce794710cfd642ee4ae0e5b9d74cee49e9c67b6cd0ea5dfbb582132195a121356a1513e1bca73e5b80c58c7ccb4164453412f456c47616d616c2054657374204b65792031886204131102002205024dfcb16a021b03060b090807030206150802090a0b0416020301021e01021780000a091033af447ccd759b09fadd00a0b8fd6f5a790bad7e9f2dbb7632046dc4493588db009c087c6a9ba9f7f49fab221587a74788c00db4889ab00200009d0157044dfcb16a1004008dec3f9291205255ccff8c532318133a6840739dd68b03ba942676f9038612071447bf07d00d559c5c0875724ea16a4c774f80d8338b55fca691a0522e530e604215b467bbc9ccfd483a1da99d7bc2648b4318fdbd27766fc8bfad3fddb37c62b8ae7ccfe9577e9b8d1e77c1d417ed2c2ef02d52f4da11600d85d3229607943700030503ff506c94c87c8cab778e963b76cf63770f0a79bf48fb49d3b4e52234620fc9f7657f9f8d56c96a2b7c7826ae6b57ebb2221a3fe154b03b6637cea7e6d98e3e45d87cf8dc432f723d3d71f89c5192ac8d7290684d2c25ce55846a80c9a7823f6acd9bb29fa6cd71f20bc90eccfca20451d0c976e460e672b000df49466408d527affe0303027a728f9feb3b864260abd761730327bca2aaa4ea0525c175e92bf240682a0e83b226f97ecb2e935b62c9a133858ce31b271fa8eb41f6a1b3cd72a63025ce1a75ee4180dcc284884904181102000905024dfcb16a021b0c000a091033af447ccd759b09dd0b009e3c3e7296092c81bee5a19929462caaf2fff3ae26009e218c437a2340e7ea628149af1ec98ec091a43992b00200009501e1044dfcb1be1104009f61faa61aa43df75d128cbe53de528c4aec49ce9360c992e70c77072ad5623de0a3a6212771b66b39a30dad6781799e92608316900518ec01184a85d872365b7d2ba4bacfb5882ea3c2473d3750dc6178cc1cf82147fb58caa28b28e9f12f6d1efcb0534abed644156c91cca4ab78834268495160b2400bc422beb37d237c2300a0cac94911b6d493bda1e1fbc6feeca7cb7421d34b03fe22cec6ccb39675bb7b94a335c2b7be888fd3906a1125f33301d8aa6ec6ee6878f46f73961c8d57a3e9544d8ef2a2cbfd4d52da665b1266928cfe4cb347a58c412815f3b2d2369dec04b41ac9a71cc9547426d5ab941cccf3b18575637ccfb42df1a802df3cfe0a999f9e7109331170e3a221991bf868543960f8c816c28097e503fe319db10fb98049f3a57d7c80c420da66d56f3644371631fad3f0ff4040a19a4fedc2d07727a1b27576f75a4d28c47d8246f27071e12d7a8de62aad216ddbae6aa02efd6b8a3e2818cda48526549791ab277e447b3a36c57cefe9b592f5eab73959743fcc8e83cbefec03a329b55018b53eec196765ae40ef9e20521a603c551efe0303020950d53a146bf9c66034d00c23130cce95576a2ff78016ca471276e8227fb30b1ffbd92e61804fb0c3eff9e30b1a826ee8f3e4730b4d86273ca977b4164453412f456c47616d616c2054657374204b65792032886204131102002205024dfcb1be021b03060b090807030206150802090a0b0416020301021e01021780000a0910a86bf526325b21b22bd9009e34511620415c974750a20df5cb56b182f3b48e6600a0a9466cb1a1305a84953445f77d461593f1d42bc1b00200009d0157044dfcb1be1004009565a951da1ee87119d600c077198f1c1bceb0f7aa54552489298e41ff788fa8f0d43a69871f0f6f77ebdfb14a4260cf9fbeb65d5844b4272a1904dd95136d06c3da745dc46327dd44a0f16f60135914368c8039a34033862261806bb2c5ce1152e2840254697872c85441ccb7321431d75a747a4bfb1d2c66362b51ce76311700030503fc0ea76601c196768070b7365a200e6ddb09307f262d5f39eec467b5f5784e22abdf1aa49226f59ab37cb49969d8f5230ea65caf56015abda62604544ed526c5c522bf92bed178a078789f6c807b6d34885688024a5bed9e9f8c58d11d4b82487b44c5f470c5606806a0443b79cadb45e0f897a561a53f724e5349b9267c75ca17fe0303020950d53a146bf9c660bc5f4ce8f072465e2d2466434320c1e712272fafc20e342fe7608101580fa1a1a367e60486a7cd1246b7ef5586cf5e10b32762b710a30144f12dd17dd4884904181102000905024dfcb1be021b0c000a0910a86bf526325b21b2904c00a0b2b66b4b39ccffda1d10f3ea8d58f827e30a8b8e009f4255b2d8112a184e40cde43a34e8655ca7809370b0020000"
-
-const ed25519wX25519Key = "c54b0663877fe31b00000020f94da7bb48d60a61e567706a6587d0331999bb9d891a08242ead84543df895a3001972817b12be707e8d5f586ce61361201d344eb266a2c82fde6835762b65b0b7c2b1061f1b0a00000042058263877fe3030b090705150a0e080c021600029b03021e09222106cb186c4f0609a697e4d52dfa6c722b0c1f1e27c18a56708f6525ec27bad9acc905270902070200000000ad2820103e2d7d227ec0e6d7ce4471db36bfc97083253690271498a7ef0576c07faae14585b3b903b0127ec4fda2f023045a2ec76bcb4f9571a9651e14aee1137a1d668442c88f951e33c4ffd33fb9a17d511eed758fc6d9cc50cb5fd793b2039d5804c74b0663877fe319000000208693248367f9e5015db922f8f48095dda784987f2d5985b12fbad16caf5e4435004d600a4f794d44775c57a26e0feefed558e9afffd6ad0d582d57fb2ba2dcedb8c29b06181b0a0000002c050263877fe322a106cb186c4f0609a697e4d52dfa6c722b0c1f1e27c18a56708f6525ec27bad9acc9021b0c00000000defa20a6e9186d9d5935fc8fe56314cdb527486a5a5120f9b762a235a729f039010a56b89c658568341fbef3b894e9834ad9bc72afae2f4c9c47a43855e65f1cb0a3f77bbc5f61085c1f8249fe4e7ca59af5f0bcee9398e0fa8d76e522e1d8ab42bb0d"
-
-const signedMessageHex = "a3019bc0cbccc0c4b8d8b74ee2108fe16ec6d3ca490cbe362d3f8333d3f352531472538b8b13d353b97232f352158c20943157c71c16064626063656269052062e4e01987e9b6fccff4b7df3a34c534b23e679cbec3bc0f8f6e64dfb4b55fe3f8efa9ce110ddb5cd79faf1d753c51aecfa669f7e7aa043436596cccc3359cb7dd6bbe9ecaa69e5989d9e57209571edc0b2fa7f57b9b79a64ee6e99ce1371395fee92fec2796f7b15a77c386ff668ee27f6d38f0baa6c438b561657377bf6acff3c5947befd7bf4c196252f1d6e5c524d0300"
-
-const signedTextMessageHex = "a3019bc0cbccc8c4b8d8b74ee2108fe16ec6d36a250cbece0c178233d3f352531472538b8b13d35379b97232f352158ca0b4312f57c71c1646462606365626906a062e4e019811591798ff99bf8afee860b0d8a8c2a85c3387e3bcf0bb3b17987f2bbcfab2aa526d930cbfd3d98757184df3995c9f3e7790e36e3e9779f06089d4c64e9e47dd6202cb6e9bc73c5d11bb59fbaf89d22d8dc7cf199ddf17af96e77c5f65f9bbed56f427bd8db7af37f6c9984bf9385efaf5f184f986fb3e6adb0ecfe35bbf92d16a7aa2a344fb0bc52fb7624f0200"
-
-const signedEncryptedMessageHex = "c18c032a67d68660df41c70103ff5a84c9a72f80e74ef0384c2d6a9ebfe2b09e06a8f298394f6d2abf174e40934ab0ec01fb2d0ddf21211c6fe13eb238563663b017a6b44edca552eb4736c4b7dc6ed907dd9e12a21b51b64b46f902f76fb7aaf805c1db8070574d8d0431a23e324a750f77fb72340a17a42300ee4ca8207301e95a731da229a63ab9c6b44541fbd2c11d016d810b3b3b2b38f15b5b40f0a4910332829c2062f1f7cc61f5b03677d73c54cafa1004ced41f315d46444946faae571d6f426e6dbd45d9780eb466df042005298adabf7ce0ef766dfeb94cd449c7ed0046c880339599c4711af073ce649b1e237c40b50a5536283e03bdbb7afad78bd08707715c67fb43295f905b4c479178809d429a8e167a9a8c6dfd8ab20b4edebdc38d6dec879a3202e1b752690d9bb5b0c07c5a227c79cc200e713a99251a4219d62ad5556900cf69bd384b6c8e726c7be267471d0d23af956da165af4af757246c2ebcc302b39e8ef2fccb4971b234fcda22d759ddb20e27269ee7f7fe67898a9de721bfa02ab0becaa046d00ea16cb1afc4e2eab40d0ac17121c565686e5cbd0cbdfbd9d6db5c70278b9c9db5a83176d04f61fbfbc4471d721340ede2746e5c312ded4f26787985af92b64fae3f253dbdde97f6a5e1996fd4d865599e32ff76325d3e9abe93184c02988ee89a4504356a4ef3b9b7a57cbb9637ca90af34a7676b9ef559325c3cca4e29d69fec1887f5440bb101361d744ad292a8547f22b4f22b419a42aa836169b89190f46d9560824cb2ac6e8771de8223216a5e647e132ab9eebcba89569ab339cb1c3d70fe806b31f4f4c600b4103b8d7583ebff16e43dcda551e6530f975122eb8b29"
-
-const verifiedSignatureEncryptedMessageHex = "c2b304000108000605026048f6d600210910a34d7e18c20c31bb1621045fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb9a3b0400a32ddac1af259c1b0abab0041327ea04970944401978fb647dd1cf9aba4f164e43f0d8a9389501886474bdd4a6e77f6aea945c07dfbf87743835b44cc2c39a1f9aeecfa83135abc92e18e50396f2e6a06c44e0188b0081effbfb4160d28f118d4ff73dd199a102e47cffd8c7ff2bacd83ae72b5820c021a486766dd587b5da61"
-
-const unverifiedSignatureEncryptedMessageHex = "c2b304000108000605026048f6d600210910a34d7e18c20c31bb1621045fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb9a3b0400a32ddac1af259c1b0abab0041327ea04970944401978fb647dd1cf9aba4f164e43f0d8a9389501886474bdd4a6e77f6aea945c07dfbf87743835b44cc2c39a1f9aeecfa83135abc92e18e50396f2e6a06c44e0188b0081effbfb4160d28f118d4ff73dd199a102e47cffd8c7ff2bacd83ae72b5820c021a486766dd587b5da61"
-
-const signedEncryptedMessage2Hex = "85010e03cf6a7abcd43e36731003fb057f5495b79db367e277cdbe4ab90d924ddee0c0381494112ff8c1238fb0184af35d1731573b01bc4c55ecacd2aafbe2003d36310487d1ecc9ac994f3fada7f9f7f5c3a64248ab7782906c82c6ff1303b69a84d9a9529c31ecafbcdb9ba87e05439897d87e8a2a3dec55e14df19bba7f7bd316291c002ae2efd24f83f9e3441203fc081c0c23dc3092a454ca8a082b27f631abf73aca341686982e8fbda7e0e7d863941d68f3de4a755c2964407f4b5e0477b3196b8c93d551dd23c8beef7d0f03fbb1b6066f78907faf4bf1677d8fcec72651124080e0b7feae6b476e72ab207d38d90b958759fdedfc3c6c35717c9dbfc979b3cfbbff0a76d24a5e57056bb88acbd2a901ef64bc6e4db02adc05b6250ff378de81dca18c1910ab257dff1b9771b85bb9bbe0a69f5989e6d1710a35e6dfcceb7d8fb5ccea8db3932b3d9ff3fe0d327597c68b3622aec8e3716c83a6c93f497543b459b58ba504ed6bcaa747d37d2ca746fe49ae0a6ce4a8b694234e941b5159ff8bd34b9023da2814076163b86f40eed7c9472f81b551452d5ab87004a373c0172ec87ea6ce42ccfa7dbdad66b745496c4873d8019e8c28d6b3"
-
-const signatureEncryptedMessage2Hex = "c24604001102000605024dfd0166000a091033af447ccd759b09bae600a096ec5e63ecf0a403085e10f75cc3bab327663282009f51fad9df457ed8d2b70d8a73c76e0443eac0f377"
-
-const symmetricallyEncryptedCompressedHex = "c32e040903085a357c1a7b5614ed00cc0d1d92f428162058b3f558a0fb0980d221ebac6c97d5eda4e0fe32f6e706e94dd263012d6ca1ef8c4bbd324098225e603a10c85ebf09cbf7b5aeeb5ce46381a52edc51038b76a8454483be74e6dcd1e50d5689a8ae7eceaeefed98a0023d49b22eb1f65c2aa1ef1783bb5e1995713b0457102ec3c3075fe871267ffa4b686ad5d52000d857"
-
-const dsaTestKeyHex = "9901a2044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794"
-
-const dsaTestKeyPrivateHex = "9501bb044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4d00009f592e0619d823953577d4503061706843317e4fee083db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794"
-
-const p256TestKeyHex = "98520456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b7754b8560456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b6030108078861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e"
-
-const p256TestKeyPrivateHex = "94a50456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253fe070302f0c2bfb0b6c30f87ee1599472b8636477eab23ced13b271886a4b50ed34c9d8436af5af5b8f88921f0efba6ef8c37c459bbb88bc1c6a13bbd25c4ce9b1e97679569ee77645d469bf4b43de637f5561b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b77549ca90456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b603010807fe0703027510012471a603cfee2968dce19f732721ddf03e966fd133b4e3c7a685b788705cbc46fb026dc94724b830c9edbaecd2fb2c662f23169516cacd1fe423f0475c364ecc10abcabcfd4bbbda1a36a1bd8861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e"
-
-const armoredPrivateKeyBlock = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-Version: GnuPG v1.4.10 (GNU/Linux)
-
-lQHYBE2rFNoBBADFwqWQIW/DSqcB4yCQqnAFTJ27qS5AnB46ccAdw3u4Greeu3Bp
-idpoHdjULy7zSKlwR1EA873dO/k/e11Ml3dlAFUinWeejWaK2ugFP6JjiieSsrKn
-vWNicdCS4HTWn0X4sjl0ZiAygw6GNhqEQ3cpLeL0g8E9hnYzJKQ0LWJa0QARAQAB
-AAP/TB81EIo2VYNmTq0pK1ZXwUpxCrvAAIG3hwKjEzHcbQznsjNvPUihZ+NZQ6+X
-0HCfPAdPkGDCLCb6NavcSW+iNnLTrdDnSI6+3BbIONqWWdRDYJhqZCkqmG6zqSfL
-IdkJgCw94taUg5BWP/AAeQrhzjChvpMQTVKQL5mnuZbUCeMCAN5qrYMP2S9iKdnk
-VANIFj7656ARKt/nf4CBzxcpHTyB8+d2CtPDKCmlJP6vL8t58Jmih+kHJMvC0dzn
-gr5f5+sCAOOe5gt9e0am7AvQWhdbHVfJU0TQJx+m2OiCJAqGTB1nvtBLHdJnfdC9
-TnXXQ6ZXibqLyBies/xeY2sCKL5qtTMCAKnX9+9d/5yQxRyrQUHt1NYhaXZnJbHx
-q4ytu0eWz+5i68IYUSK69jJ1NWPM0T6SkqpB3KCAIv68VFm9PxqG1KmhSrQIVGVz
-dCBLZXmIuAQTAQIAIgUCTasU2gIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AA
-CgkQO9o98PRieSoLhgQAkLEZex02Qt7vGhZzMwuN0R22w3VwyYyjBx+fM3JFETy1
-ut4xcLJoJfIaF5ZS38UplgakHG0FQ+b49i8dMij0aZmDqGxrew1m4kBfjXw9B/v+
-eIqpODryb6cOSwyQFH0lQkXC040pjq9YqDsO5w0WYNXYKDnzRV0p4H1pweo2VDid
-AdgETasU2gEEAN46UPeWRqKHvA99arOxee38fBt2CI08iiWyI8T3J6ivtFGixSqV
-bRcPxYO/qLpVe5l84Nb3X71GfVXlc9hyv7CD6tcowL59hg1E/DC5ydI8K8iEpUmK
-/UnHdIY5h8/kqgGxkY/T/hgp5fRQgW1ZoZxLajVlMRZ8W4tFtT0DeA+JABEBAAEA
-A/0bE1jaaZKj6ndqcw86jd+QtD1SF+Cf21CWRNeLKnUds4FRRvclzTyUMuWPkUeX
-TaNNsUOFqBsf6QQ2oHUBBK4VCHffHCW4ZEX2cd6umz7mpHW6XzN4DECEzOVksXtc
-lUC1j4UB91DC/RNQqwX1IV2QLSwssVotPMPqhOi0ZLNY7wIA3n7DWKInxYZZ4K+6
-rQ+POsz6brEoRHwr8x6XlHenq1Oki855pSa1yXIARoTrSJkBtn5oI+f8AzrnN0BN
-oyeQAwIA/7E++3HDi5aweWrViiul9cd3rcsS0dEnksPhvS0ozCJiHsq/6GFmy7J8
-QSHZPteedBnZyNp5jR+H7cIfVN3KgwH/Skq4PsuPhDq5TKK6i8Pc1WW8MA6DXTdU
-nLkX7RGmMwjC0DBf7KWAlPjFaONAX3a8ndnz//fy1q7u2l9AZwrj1qa1iJ8EGAEC
-AAkFAk2rFNoCGwwACgkQO9o98PRieSo2/QP/WTzr4ioINVsvN1akKuekmEMI3LAp
-BfHwatufxxP1U+3Si/6YIk7kuPB9Hs+pRqCXzbvPRrI8NHZBmc8qIGthishdCYad
-AHcVnXjtxrULkQFGbGvhKURLvS9WnzD/m1K2zzwxzkPTzT9/Yf06O6Mal5AdugPL
-VrM0m72/jnpKo04=
-=zNCn
------END PGP PRIVATE KEY BLOCK-----`
-
-const e2ePublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-Charset: UTF-8
-
-xv8AAABSBAAAAAATCCqGSM49AwEHAgME1LRoXSpOxtHXDUdmuvzchyg6005qIBJ4
-sfaSxX7QgH9RV2ONUhC+WiayCNADq+UMzuR/vunSr4aQffXvuGnR383/AAAAFDxk
-Z2lsQHlhaG9vLWluYy5jb20+wv8AAACGBBATCAA4/wAAAAWCVGvAG/8AAAACiwn/
-AAAACZC2VkQCOjdvYf8AAAAFlQgJCgv/AAAAA5YBAv8AAAACngEAAE1BAP0X8veD
-24IjmI5/C6ZAfVNXxgZZFhTAACFX75jUA3oD6AEAzoSwKf1aqH6oq62qhCN/pekX
-+WAsVMBhNwzLpqtCRjLO/wAAAFYEAAAAABIIKoZIzj0DAQcCAwT50ain7vXiIRv8
-B1DO3x3cE/aattZ5sHNixJzRCXi2vQIA5QmOxZ6b5jjUekNbdHG3SZi1a2Ak5mfX
-fRxC/5VGAwEIB8L/AAAAZQQYEwgAGP8AAAAFglRrwBz/AAAACZC2VkQCOjdvYQAA
-FJAA9isX3xtGyMLYwp2F3nXm7QEdY5bq5VUcD/RJlj792VwA/1wH0pCzVLl4Q9F9
-ex7En5r7rHR5xwX82Msc+Rq9dSyO
-=7MrZ
------END PGP PUBLIC KEY BLOCK-----`
-
-const dsaKeyWithSHA512 = `9901a2044f04b07f110400db244efecc7316553ee08d179972aab87bb1214de7692593fcf5b6feb1c80fba268722dd464748539b85b81d574cd2d7ad0ca2444de4d849b8756bad7768c486c83a824f9bba4af773d11742bdfb4ac3b89ef8cc9452d4aad31a37e4b630d33927bff68e879284a1672659b8b298222fc68f370f3e24dccacc4a862442b9438b00a0ea444a24088dc23e26df7daf8f43cba3bffc4fe703fe3d6cd7fdca199d54ed8ae501c30e3ec7871ea9cdd4cf63cfe6fc82281d70a5b8bb493f922cd99fba5f088935596af087c8d818d5ec4d0b9afa7f070b3d7c1dd32a84fca08d8280b4890c8da1dde334de8e3cad8450eed2a4a4fcc2db7b8e5528b869a74a7f0189e11ef097ef1253582348de072bb07a9fa8ab838e993cef0ee203ff49298723e2d1f549b00559f886cd417a41692ce58d0ac1307dc71d85a8af21b0cf6eaa14baf2922d3a70389bedf17cc514ba0febbd107675a372fe84b90162a9e88b14d4b1c6be855b96b33fb198c46f058568817780435b6936167ebb3724b680f32bf27382ada2e37a879b3d9de2abe0c3f399350afd1ad438883f4791e2e3b4184453412068617368207472756e636174696f6e207465737488620413110a002205024f04b07f021b03060b090807030206150802090a0b0416020301021e01021780000a0910ef20e0cefca131581318009e2bf3bf047a44d75a9bacd00161ee04d435522397009a03a60d51bd8a568c6c021c8d7cf1be8d990d6417b0020003`
-
-const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101`
-
-const rsaSignatureBadMPIlength = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101`
-
-const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101`
-
-const campbellQuine = `a0b001000300fcffa0b001000d00f2ff000300fcffa0b001000d00f2ff8270a01c00000500faff8270a01c00000500faff000500faff001400ebff8270a01c00000500faff000500faff001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400000000ffff000000ffff000b00f4ff428821c400000000ffff000000ffff000b00f4ff0233214c40000100feff000233214c40000100feff0000`
-
-const keyV4forVerifyingSignedMessageV3 = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-Comment: GPGTools - https://gpgtools.org
-
-mI0EVfxoFQEEAMBIqmbDfYygcvP6Phr1wr1XI41IF7Qixqybs/foBF8qqblD9gIY
-BKpXjnBOtbkcVOJ0nljd3/sQIfH4E0vQwK5/4YRQSI59eKOqd6Fx+fWQOLG+uu6z
-tewpeCj9LLHvibx/Sc7VWRnrznia6ftrXxJ/wHMezSab3tnGC0YPVdGNABEBAAG0
-JEdvY3J5cHRvIFRlc3QgS2V5IDx0aGVtYXhAZ21haWwuY29tPoi5BBMBCgAjBQJV
-/GgVAhsDBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQeXnQmhdGW9PFVAP+
-K7TU0qX5ArvIONIxh/WAweyOk884c5cE8f+3NOPOOCRGyVy0FId5A7MmD5GOQh4H
-JseOZVEVCqlmngEvtHZb3U1VYtVGE5WZ+6rQhGsMcWP5qaT4soYwMBlSYxgYwQcx
-YhN9qOr292f9j2Y//TTIJmZT4Oa+lMxhWdqTfX+qMgG4jQRV/GgVAQQArhFSiij1
-b+hT3dnapbEU+23Z1yTu1DfF6zsxQ4XQWEV3eR8v+8mEDDNcz8oyyF56k6UQ3rXi
-UMTIwRDg4V6SbZmaFbZYCOwp/EmXJ3rfhm7z7yzXj2OFN22luuqbyVhuL7LRdB0M
-pxgmjXb4tTvfgKd26x34S+QqUJ7W6uprY4sAEQEAAYifBBgBCgAJBQJV/GgVAhsM
-AAoJEHl50JoXRlvT7y8D/02ckx4OMkKBZo7viyrBw0MLG92i+DC2bs35PooHR6zz
-786mitjOp5z2QWNLBvxC70S0qVfCIz8jKupO1J6rq6Z8CcbLF3qjm6h1omUBf8Nd
-EfXKD2/2HV6zMKVknnKzIEzauh+eCKS2CeJUSSSryap/QLVAjRnckaES/OsEWhNB
-=RZia
------END PGP PUBLIC KEY BLOCK-----
-`
-
-const signedMessageV3 = `-----BEGIN PGP MESSAGE-----
-Comment: GPGTools - https://gpgtools.org
-
-owGbwMvMwMVYWXlhlrhb9GXG03JJDKF/MtxDMjKLFYAoUaEktbhEITe1uDgxPVWP
-q5NhKjMrWAVcC9evD8z/bF/uWNjqtk/X3y5/38XGRQHm/57rrDRYuGnTw597Xqka
-uM3137/hH3Os+Jf2dc0fXOITKwJvXJvecPVs0ta+Vg7ZO1MLn8w58Xx+6L58mbka
-DGHyU9yTueZE8D+QF/Tz28Y78dqtF56R1VPn9Xw4uJqrWYdd7b3vIZ1V6R4Nh05d
-iT57d/OhWwA=
-=hG7R
------END PGP MESSAGE-----
-`
-
-// https://mailarchive.ietf.org/arch/msg/openpgp/9SheW_LENE0Kxf7haNllovPyAdY/
-const v5PrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-
-lGEFXJH05BYAAAAtCSsGAQQB2kcPAQEHQFhZlVcVVtwf+21xNQPX+ecMJJBL0MPd
-fj75iux+my8QAAAAAAAiAQCHZ1SnSUmWqxEsoI6facIVZQu6mph3cBFzzTvcm5lA
-Ng5ctBhlbW1hLmdvbGRtYW5AZXhhbXBsZS5uZXSIlgUTFggASCIhBRk0e8mHJGQC
-X5nfPsLgAA7ZiEiS4fez6kyUAJFZVptUBQJckfTkAhsDBQsJCAcCAyICAQYVCgkI
-CwIEFgIDAQIeBwIXgAAA9cAA/jiR3yMsZMeEQ40u6uzEoXa6UXeV/S3wwJAXRJy9
-M8s0AP9vuL/7AyTfFXwwzSjDnYmzS0qAhbLDQ643N+MXGBJ2BZxmBVyR9OQSAAAA
-MgorBgEEAZdVAQUBAQdA+nysrzml2UCweAqtpDuncSPlvrcBWKU0yfU0YvYWWAoD
-AQgHAAAAAAAiAP9OdAPppjU1WwpqjIItkxr+VPQRT8Zm/Riw7U3F6v3OiBFHiHoF
-GBYIACwiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVAUCXJH05AIb
-DAAAOSQBAP4BOOIR/sGLNMOfeb5fPs/02QMieoiSjIBnijhob2U5AQC+RtOHCHx7
-TcIYl5/Uyoi+FOvPLcNw4hOv2nwUzSSVAw==
-=IiS2
------END PGP PRIVATE KEY BLOCK-----`
-
-// See OpenPGP crypto refresh Section A.3.
-const v6PrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-
-xUsGY4d/4xsAAAAg+U2nu0jWCmHlZ3BqZYfQMxmZu52JGggkLq2EVD34laMAGXKB
-exK+cH6NX1hs5hNhIB00TrJmosgv3mg1ditlsLfCsQYfGwoAAABCBYJjh3/jAwsJ
-BwUVCg4IDAIWAAKbAwIeCSIhBssYbE8GCaaX5NUt+mxyKwwfHifBilZwj2Ul7Ce6
-2azJBScJAgcCAAAAAK0oIBA+LX0ifsDm185Ecds2v8lwgyU2kCcUmKfvBXbAf6rh
-RYWzuQOwEn7E/aLwIwRaLsdry0+VcallHhSu4RN6HWaEQsiPlR4zxP/TP7mhfVEe
-7XWPxtnMUMtf15OyA51YBMdLBmOHf+MZAAAAIIaTJINn+eUBXbki+PSAld2nhJh/
-LVmFsS+60WyvXkQ1AE1gCk95TUR3XFeibg/u/tVY6a//1q0NWC1X+yui3O24wpsG
-GBsKAAAALAWCY4d/4wKbDCIhBssYbE8GCaaX5NUt+mxyKwwfHifBilZwj2Ul7Ce6
-2azJAAAAAAQBIKbpGG2dWTX8j+VjFM21J0hqWlEg+bdiojWnKfA5AQpWUWtnNwDE
-M0g12vYxoWM8Y81W+bHBw805I8kWVkXU6vFOi+HWvv/ira7ofJu16NnoUkhclkUr
-k0mXubZvyl4GBg==
------END PGP PRIVATE KEY BLOCK-----`
-
-// See OpenPGP crypto refresh merge request:
-// https://gitlab.com/openpgp-wg/rfc4880bis/-/merge_requests/304
-const v6PrivKeyMsg = `-----BEGIN PGP MESSAGE-----
-
-wV0GIQYSyD8ecG9jCP4VGkF3Q6HwM3kOk+mXhIjR2zeNqZMIhRmHzxjV8bU/gXzO
-WgBM85PMiVi93AZfJfhK9QmxfdNnZBjeo1VDeVZheQHgaVf7yopqR6W1FT6NOrfS
-aQIHAgZhZBZTW+CwcW1g4FKlbExAf56zaw76/prQoN+bAzxpohup69LA7JW/Vp0l
-yZnuSj3hcFj0DfqLTGgr4/u717J+sPWbtQBfgMfG9AOIwwrUBqsFE9zW+f1zdlYo
-bhF30A+IitsxxA==
------END PGP MESSAGE-----`
-
-// See OpenPGP crypto refresh merge request:
-// https://gitlab.com/openpgp-wg/rfc4880bis/-/merge_requests/305
-const v6PrivKeyInlineSignMsg = `-----BEGIN PGP MESSAGE-----
-
-wV0GIQYSyD8ecG9jCP4VGkF3Q6HwM3kOk+mXhIjR2zeNqZMIhRmHzxjV8bU/gXzO
-WgBM85PMiVi93AZfJfhK9QmxfdNnZBjeo1VDeVZheQHgaVf7yopqR6W1FT6NOrfS
-aQIHAgZhZBZTW+CwcW1g4FKlbExAf56zaw76/prQoN+bAzxpohup69LA7JW/Vp0l
-yZnuSj3hcFj0DfqLTGgr4/u717J+sPWbtQBfgMfG9AOIwwrUBqsFE9zW+f1zdlYo
-bhF30A+IitsxxA==
------END PGP MESSAGE-----`
-
-// See https://gitlab.com/openpgp-wg/rfc4880bis/-/merge_requests/274
-// decryption password: "correct horse battery staple"
-const v6ArgonSealedPrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-
-xYIGY4d/4xsAAAAg+U2nu0jWCmHlZ3BqZYfQMxmZu52JGggkLq2EVD34laP9JgkC
-FARdb9ccngltHraRe25uHuyuAQQVtKipJ0+r5jL4dacGWSAheCWPpITYiyfyIOPS
-3gIDyg8f7strd1OB4+LZsUhcIjOMpVHgmiY/IutJkulneoBYwrEGHxsKAAAAQgWC
-Y4d/4wMLCQcFFQoOCAwCFgACmwMCHgkiIQbLGGxPBgmml+TVLfpscisMHx4nwYpW
-cI9lJewnutmsyQUnCQIHAgAAAACtKCAQPi19In7A5tfORHHbNr/JcIMlNpAnFJin
-7wV2wH+q4UWFs7kDsBJ+xP2i8CMEWi7Ha8tPlXGpZR4UruETeh1mhELIj5UeM8T/
-0z+5oX1RHu11j8bZzFDLX9eTsgOdWATHggZjh3/jGQAAACCGkySDZ/nlAV25Ivj0
-gJXdp4SYfy1ZhbEvutFsr15ENf0mCQIUBA5hhGgp2oaavg6mFUXcFMwBBBUuE8qf
-9Ock+xwusd+GAglBr5LVyr/lup3xxQvHXFSjjA2haXfoN6xUGRdDEHI6+uevKjVR
-v5oAxgu7eJpaXNjCmwYYGwoAAAAsBYJjh3/jApsMIiEGyxhsTwYJppfk1S36bHIr
-DB8eJ8GKVnCPZSXsJ7rZrMkAAAAABAEgpukYbZ1ZNfyP5WMUzbUnSGpaUSD5t2Ki
-Nacp8DkBClZRa2c3AMQzSDXa9jGhYzxjzVb5scHDzTkjyRZWRdTq8U6L4da+/+Kt
-ruh8m7Xo2ehSSFyWRSuTSZe5tm/KXgYG
------END PGP PRIVATE KEY BLOCK-----`
-
-const v4Key25519 = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-
-xUkEZB3qzRto01j2k2pwN5ux9w70stPinAdXULLr20CRW7U7h2GSeACch0M+
-qzQg8yjFQ8VBvu3uwgKH9senoHmj72lLSCLTmhFKzQR0ZXN0wogEEBsIAD4F
-gmQd6s0ECwkHCAmQIf45+TuC+xMDFQgKBBYAAgECGQECmwMCHgEWIQSWEzMi
-jJUHvyIbVKIh/jn5O4L7EwAAUhaHNlgudvxARdPPETUzVgjuWi+YIz8w1xIb
-lHQMvIrbe2sGCQIethpWofd0x7DHuv/ciHg+EoxJ/Td6h4pWtIoKx0kEZB3q
-zRm4CyA7quliq7yx08AoOqHTuuCgvpkSdEhpp3pEyejQOgBo0p6ywIiLPllY
-0t+jpNspHpAGfXID6oqjpYuJw3AfVRBlwnQEGBsIACoFgmQd6s0JkCH+Ofk7
-gvsTApsMFiEElhMzIoyVB78iG1SiIf45+TuC+xMAAGgQuN9G73446ykvJ/mL
-sCZ7zGFId2gBd1EnG0FTC4npfOKpck0X8dngByrCxU8LDSfvjsEp/xDAiKsQ
-aU71tdtNBQ==
-=e7jT
------END PGP PRIVATE KEY BLOCK-----`
-
-const keyWithExpiredCrossSig = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv
-/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz
-/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/
-5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3
-X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv
-9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0
-qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb
-SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb
-vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w
-bGU+wsEABBMBCgATBYJeO2eVAgsJAxUICgKbAQIeAQAhCRD7/MgqAV5zMBYhBNGm
-bhojsYLJmA94jPv8yCoBXnMwKWUMAJ3FKZfJ2mXvh+GFqgymvK4NoKkDRPB0CbUN
-aDdG7ZOizQrWXo7Da2MYIZ6eZUDqBKLdhZ5gZfVnisDfu/yeCgpENaKib1MPHpA8
-nZQjnPejbBDomNqY8HRzr5jvXNlwywBpjWGtegCKUY9xbSynjbfzIlMrWL4S+Rfl
-+bOOQKRyYJWXmECmVyqY8cz2VUYmETjNcwC8VCDUxQnhtcCJ7Aej22hfYwVEPb/J
-BsJBPq8WECCiGfJ9Y2y6TF+62KzG9Kfs5hqUeHhQy8V4TSi479ewwL7DH86XmIIK
-chSANBS+7iyMtctjNZfmF9zYdGJFvjI/mbBR/lK66E515Inuf75XnL8hqlXuwqvG
-ni+i03Aet1DzULZEIio4uIU6ioc1lGO9h7K2Xn4S7QQH1QoISNMWqXibUR0RCGjw
-FsEDTt2QwJl8XXxoJCooM7BCcCQo+rMNVUHDjIwrdoQjPld3YZsUQQRcqH6bLuln
-cfn5ufl8zTGWKydoj/iTz8KcjZ7w187AzQRdpZzyAQwA1jC/XGxjK6ddgrRfW9j+
-s/U00++EvIsgTs2kr3Rg0GP7FLWV0YNtR1mpl55/bEl7yAxCDTkOgPUMXcaKlnQh
-6zrlt6H53mF6Bvs3inOHQvOsGtU0dqvb1vkTF0juLiJgPlM7pWv+pNQ6IA39vKoQ
-sTMBv4v5vYNXP9GgKbg8inUNT17BxzZYHfw5+q63ectgDm2on1e8CIRCZ76oBVwz
-dkVxoy3gjh1eENlk2D4P0uJNZzF1Q8GV67yLANGMCDICE/OkWn6daipYDzW4iJQt
-YPUWP4hWhjdm+CK+hg6IQUEn2Vtvi16D2blRP8BpUNNa4fNuylWVuJV76rIHvsLZ
-1pbM3LHpRgE8s6jivS3Rz3WRs0TmWCNnvHPqWizQ3VTy+r3UQVJ5AmhJDrZdZq9i
-aUIuZ01PoE1+CHiJwuxPtWvVAxf2POcm1M/F1fK1J0e+lKlQuyonTXqXR22Y41wr
-fP2aPk3nPSTW2DUAf3vRMZg57ZpRxLEhEMxcM4/LMR+PABEBAAHCwrIEGAEKAAkF
-gl8sAVYCmwIB3QkQ+/zIKgFeczDA+qAEGQEKAAwFgl47Z5UFgwB4TOAAIQkQfC+q
-Tfk8N7IWIQQd3OFfCSF87i87N2B8L6pN+Tw3st58C/0exp0X2U4LqicSHEOSqHZj
-jiysdqIELHGyo5DSPv92UFPp36aqjF9OFgtNNwSa56fmAVCD4+hor/fKARRIeIjF
-qdIC5Y/9a4B10NQFJa5lsvB38x/d39LI2kEoglZnqWgdJskROo3vNQF4KlIcm6FH
-dn4WI8UkC5oUUcrpZVMSKoacIaxLwqnXT42nIVgYYuqrd/ZagZZjG5WlrTOd5+NI
-zi/l0fWProcPHGLjmAh4Thu8i7omtVw1nQaMnq9I77ffg3cPDgXknYrLL+q8xXh/
-0mEJyIhnmPwllWCSZuLv9DrD5pOexFfdlwXhf6cLzNpW6QhXD/Tf5KrqIPr9aOv8
-9xaEEXWh0vEby2kIsI2++ft+vfdIyxYw/wKqx0awTSnuBV1rG3z1dswX4BfoY66x
-Bz3KOVqlz9+mG/FTRQwrgPvR+qgLCHbuotxoGN7fzW+PI75hQG5JQAqhsC9sHjQH
-UrI21/VUNwzfw3v5pYsWuFb5bdQ3ASJetICQiMy7IW8WIQTRpm4aI7GCyZgPeIz7
-/MgqAV5zMG6/C/wLpPl/9e6Hf5wmXIUwpZNQbNZvpiCcyx9sXsHXaycOQVxn3McZ
-nYOUP9/mobl1tIeDQyTNbkxWjU0zzJl8XQsDZerb5098pg+x7oGIL7M1vn5s5JMl
-owROourqF88JEtOBxLMxlAM7X4hB48xKQ3Hu9hS1GdnqLKki4MqRGl4l5FUwyGOM
-GjyS3TzkfiDJNwQxybQiC9n57ij20ieNyLfuWCMLcNNnZUgZtnF6wCctoq/0ZIWu
-a7nvuA/XC2WW9YjEJJiWdy5109pqac+qWiY11HWy/nms4gpMdxVpT0RhrKGWq4o0
-M5q3ZElOoeN70UO3OSbU5EVrG7gB1GuwF9mTHUVlV0veSTw0axkta3FGT//XfSpD
-lRrCkyLzwq0M+UUHQAuYpAfobDlDdnxxOD2jm5GyTzak3GSVFfjW09QFVO6HlGp5
-01/jtzkUiS6nwoHHkfnyn0beZuR8X6KlcrzLB0VFgQFLmkSM9cSOgYhD0PTu9aHb
-hW1Hj9AO8lzggBQ=
-=Nt+N
------END PGP PUBLIC KEY BLOCK-----
-`
-
-const sigFromKeyWithExpiredCrossSig = `-----BEGIN PGP SIGNATURE-----
-
-wsDzBAABCgAGBYJfLAFsACEJEHwvqk35PDeyFiEEHdzhXwkhfO4vOzdgfC+qTfk8
-N7KiqwwAts4QGB7v9bABCC2qkTxJhmStC0wQMcHRcjL/qAiVnmasQWmvE9KVsdm3
-AaXd8mIx4a37/RRvr9dYrY2eE4uw72cMqPxNja2tvVXkHQvk1oEUqfkvbXs4ypKI
-NyeTWjXNOTZEbg0hbm3nMy+Wv7zgB1CEvAsEboLDJlhGqPcD+X8a6CJGrBGUBUrv
-KVmZr3U6vEzClz3DBLpoddCQseJRhT4YM1nKmBlZ5quh2LFgTSpajv5OsZheqt9y
-EZAPbqmLhDmWRQwGzkWHKceKS7nZ/ox2WK6OS7Ob8ZGZkM64iPo6/EGj5Yc19vQN
-AGiIaPEGszBBWlOpHTPhNm0LB0nMWqqaT87oNYwP8CQuuxDb6rKJ2lffCmZH27Lb
-UbQZcH8J+0UhpeaiadPZxH5ATJAcenmVtVVMLVOFnm+eIlxzov9ntpgGYt8hLdXB
-ITEG9mMgp3TGS9ZzSifMZ8UGtHdp9QdBg8NEVPFzDOMGxpc/Bftav7RRRuPiAER+
-7A5CBid5
-=aQkm
------END PGP SIGNATURE-----
-`
-
-const signedMessageWithCriticalNotation = `-----BEGIN PGP MESSAGE-----
-
-owGbwMvMwMH4oOW7S46CznTG09xJDDE3Wl1KUotLuDousDAwcjBYiSmyXL+48d6x
-U1PSGUxcj8IUszKBVMpMaWAAAgEGZpAeh9SKxNyCnFS95PzcytRiBi5OAZjyXXzM
-f8WYLqv7TXP61Sa4rqT12CI3xaN73YS2pt089f96odCKaEPnWJ3iSGmzJaW/ug10
-2Zo8Wj2k4s7t8wt4H3HtTu+y5UZfV3VOO+l//sdE/o+Lsub8FZH7/eOq7OnbNp4n
-vwjE8mqJXetNMfj8r2SCyvkEnlVRYR+/mnge+ib56FdJ8uKtqSxyvgA=
-=fRXs
------END PGP MESSAGE-----`
-
-const criticalNotationSigner = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-mI0EUmEvTgEEANyWtQQMOybQ9JltDqmaX0WnNPJeLILIM36sw6zL0nfTQ5zXSS3+
-fIF6P29lJFxpblWk02PSID5zX/DYU9/zjM2xPO8Oa4xo0cVTOTLj++Ri5mtr//f5
-GLsIXxFrBJhD/ghFsL3Op0GXOeLJ9A5bsOn8th7x6JucNKuaRB6bQbSPABEBAAG0
-JFRlc3QgTWNUZXN0aW5ndG9uIDx0ZXN0QGV4YW1wbGUuY29tPoi5BBMBAgAjBQJS
-YS9OAhsvBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQSmNhOk1uQJQwDAP6
-AgrTyqkRlJVqz2pb46TfbDM2TDF7o9CBnBzIGoxBhlRwpqALz7z2kxBDmwpQa+ki
-Bq3jZN/UosY9y8bhwMAlnrDY9jP1gdCo+H0sD48CdXybblNwaYpwqC8VSpDdTndf
-9j2wE/weihGp/DAdy/2kyBCaiOY1sjhUfJ1GogF49rC4jQRSYS9OAQQA6R/PtBFa
-JaT4jq10yqASk4sqwVMsc6HcifM5lSdxzExFP74naUMMyEsKHP53QxTF0Grqusag
-Qg/ZtgT0CN1HUM152y7ACOdp1giKjpMzOTQClqCoclyvWOFB+L/SwGEIJf7LSCEr
-woBuJifJc8xAVr0XX0JthoW+uP91eTQ3XpsAEQEAAYkBPQQYAQIACQUCUmEvTgIb
-LgCoCRBKY2E6TW5AlJ0gBBkBAgAGBQJSYS9OAAoJEOCE90RsICyXuqIEANmmiRCA
-SF7YK7PvFkieJNwzeK0V3F2lGX+uu6Y3Q/Zxdtwc4xR+me/CSBmsURyXTO29OWhP
-GLszPH9zSJU9BdDi6v0yNprmFPX/1Ng0Abn/sCkwetvjxC1YIvTLFwtUL/7v6NS2
-bZpsUxRTg9+cSrMWWSNjiY9qUKajm1tuzPDZXAUEAMNmAN3xXN/Kjyvj2OK2ck0X
-W748sl/tc3qiKPMJ+0AkMF7Pjhmh9nxqE9+QCEl7qinFqqBLjuzgUhBU4QlwX1GD
-AtNTq6ihLMD5v1d82ZC7tNatdlDMGWnIdvEMCv2GZcuIqDQ9rXWs49e7tq1NncLY
-hz3tYjKhoFTKEIq3y3Pp
-=h/aX
------END PGP PUBLIC KEY BLOCK-----`
-
-const keyv5Test = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-Comment: Bob's OpenPGP Transferable Secret Key
-
-lQVYBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv
-/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz
-/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/
-5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3
-X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv
-9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0
-qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb
-SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb
-vLIwa3T4CyshfT0AEQEAAQAL/RZqbJW2IqQDCnJi4Ozm++gPqBPiX1RhTWSjwxfM
-cJKUZfzLj414rMKm6Jh1cwwGY9jekROhB9WmwaaKT8HtcIgrZNAlYzANGRCM4TLK
-3VskxfSwKKna8l+s+mZglqbAjUg3wmFuf9Tj2xcUZYmyRm1DEmcN2ZzpvRtHgX7z
-Wn1mAKUlSDJZSQks0zjuMNbupcpyJokdlkUg2+wBznBOTKzgMxVNC9b2g5/tMPUs
-hGGWmF1UH+7AHMTaS6dlmr2ZBIyogdnfUqdNg5sZwsxSNrbglKP4sqe7X61uEAIQ
-bD7rT3LonLbhkrj3I8wilUD8usIwt5IecoHhd9HziqZjRCc1BUBkboUEoyedbDV4
-i4qfsFZ6CEWoLuD5pW7dEp0M+WeuHXO164Rc+LnH6i1VQrpb1Okl4qO6ejIpIjBI
-1t3GshtUu/mwGBBxs60KBX5g77mFQ9lLCRj8lSYqOsHRKBhUp4qM869VA+fD0BRP
-fqPT0I9IH4Oa/A3jYJcg622GwQYA1LhnP208Waf6PkQSJ6kyr8ymY1yVh9VBE/g6
-fRDYA+pkqKnw9wfH2Qho3ysAA+OmVOX8Hldg+Pc0Zs0e5pCavb0En8iFLvTA0Q2E
-LR5rLue9uD7aFuKFU/VdcddY9Ww/vo4k5p/tVGp7F8RYCFn9rSjIWbfvvZi1q5Tx
-+akoZbga+4qQ4WYzB/obdX6SCmi6BndcQ1QdjCCQU6gpYx0MddVERbIp9+2SXDyL
-hpxjSyz+RGsZi/9UAshT4txP4+MZBgDfK3ZqtW+h2/eMRxkANqOJpxSjMyLO/FXN
-WxzTDYeWtHNYiAlOwlQZEPOydZFty9IVzzNFQCIUCGjQ/nNyhw7adSgUk3+BXEx/
-MyJPYY0BYuhLxLYcrfQ9nrhaVKxRJj25SVHj2ASsiwGJRZW4CC3uw40OYxfKEvNC
-mer/VxM3kg8qqGf9KUzJ1dVdAvjyx2Hz6jY2qWCyRQ6IMjWHyd43C4r3jxooYKUC
-YnstRQyb/gCSKahveSEjo07CiXMr88UGALwzEr3npFAsPW3osGaFLj49y1oRe11E
-he9gCHFm+fuzbXrWmdPjYU5/ZdqdojzDqfu4ThfnipknpVUM1o6MQqkjM896FHm8
-zbKVFSMhEP6DPHSCexMFrrSgN03PdwHTO6iBaIBBFqmGY01tmJ03SxvSpiBPON9P
-NVvy/6UZFedTq8A07OUAxO62YUSNtT5pmK2vzs3SAZJmbFbMh+NN204TRI72GlqT
-t5hcfkuv8hrmwPS/ZR6q312mKQ6w/1pqO9qitCFCb2IgQmFiYmFnZSA8Ym9iQG9w
-ZW5wZ3AuZXhhbXBsZT6JAc4EEwEKADgCGwMFCwkIBwIGFQoJCAsCBBYCAwECHgEC
-F4AWIQTRpm4aI7GCyZgPeIz7/MgqAV5zMAUCXaWe+gAKCRD7/MgqAV5zMG9sC/9U
-2T3RrqEbw533FPNfEflhEVRIZ8gDXKM8hU6cqqEzCmzZT6xYTe6sv4y+PJBGXJFX
-yhj0g6FDkSyboM5litOcTupURObVqMgA/Y4UKERznm4fzzH9qek85c4ljtLyNufe
-doL2pp3vkGtn7eD0QFRaLLmnxPKQ/TlZKdLE1G3u8Uot8QHicaR6GnAdc5UXQJE3
-BiV7jZuDyWmZ1cUNwJkKL6oRtp+ZNDOQCrLNLecKHcgCqrpjSQG5oouba1I1Q6Vl
-sP44dhA1nkmLHtxlTOzpeHj4jnk1FaXmyasurrrI5CgU/L2Oi39DGKTH/A/cywDN
-4ZplIQ9zR8enkbXquUZvFDe+Xz+6xRXtb5MwQyWODB3nHw85HocLwRoIN9WdQEI+
-L8a/56AuOwhs8llkSuiITjR7r9SgKJC2WlAHl7E8lhJ3VDW3ELC56KH308d6mwOG
-ZRAqIAKzM1T5FGjMBhq7ZV0eqdEntBh3EcOIfj2M8rg1MzJv+0mHZOIjByawikad
-BVgEXaWc8gEMANYwv1xsYyunXYK0X1vY/rP1NNPvhLyLIE7NpK90YNBj+xS1ldGD
-bUdZqZeef2xJe8gMQg05DoD1DF3GipZ0Ies65beh+d5hegb7N4pzh0LzrBrVNHar
-29b5ExdI7i4iYD5TO6Vr/qTUOiAN/byqELEzAb+L+b2DVz/RoCm4PIp1DU9ewcc2
-WB38Ofqut3nLYA5tqJ9XvAiEQme+qAVcM3ZFcaMt4I4dXhDZZNg+D9LiTWcxdUPB
-leu8iwDRjAgyAhPzpFp+nWoqWA81uIiULWD1Fj+IVoY3ZvgivoYOiEFBJ9lbb4te
-g9m5UT/AaVDTWuHzbspVlbiVe+qyB77C2daWzNyx6UYBPLOo4r0t0c91kbNE5lgj
-Z7xz6los0N1U8vq91EFSeQJoSQ62XWavYmlCLmdNT6BNfgh4icLsT7Vr1QMX9jzn
-JtTPxdXytSdHvpSpULsqJ016l0dtmONcK3z9mj5N5z0k1tg1AH970TGYOe2aUcSx
-IRDMXDOPyzEfjwARAQABAAv9F2CwsjS+Sjh1M1vegJbZjei4gF1HHpEM0K0PSXsp
-SfVvpR4AoSJ4He6CXSMWg0ot8XKtDuZoV9jnJaES5UL9pMAD7JwIOqZm/DYVJM5h
-OASCh1c356/wSbFbzRHPtUdZO9Q30WFNJM5pHbCJPjtNoRmRGkf71RxtvHBzy7np
-Ga+W6U/NVKHw0i0CYwMI0YlKDakYW3Pm+QL+gHZFvngGweTod0f9l2VLLAmeQR/c
-+EZs7lNumhuZ8mXcwhUc9JQIhOkpO+wreDysEFkAcsKbkQP3UDUsA1gFx9pbMzT0
-tr1oZq2a4QBtxShHzP/ph7KLpN+6qtjks3xB/yjTgaGmtrwM8tSe0wD1RwXS+/1o
-BHpXTnQ7TfeOGUAu4KCoOQLv6ELpKWbRBLWuiPwMdbGpvVFALO8+kvKAg9/r+/ny
-zM2GQHY+J3Jh5JxPiJnHfXNZjIKLbFbIPdSKNyJBuazXW8xIa//mEHMI5OcvsZBK
-clAIp7LXzjEjKXIwHwDcTn9pBgDpdOKTHOtJ3JUKx0rWVsDH6wq6iKV/FTVSY5jl
-zN+puOEsskF1Lfxn9JsJihAVO3yNsp6RvkKtyNlFazaCVKtDAmkjoh60XNxcNRqr
-gCnwdpbgdHP6v/hvZY54ZaJjz6L2e8unNEkYLxDt8cmAyGPgH2XgL7giHIp9jrsQ
-aS381gnYwNX6wE1aEikgtY91nqJjwPlibF9avSyYQoMtEqM/1UjTjB2KdD/MitK5
-fP0VpvuXpNYZedmyq4UOMwdkiNMGAOrfmOeT0olgLrTMT5H97Cn3Yxbk13uXHNu/
-ZUZZNe8s+QtuLfUlKAJtLEUutN33TlWQY522FV0m17S+b80xJib3yZVJteVurrh5
-HSWHAM+zghQAvCesg5CLXa2dNMkTCmZKgCBvfDLZuZbjFwnwCI6u/NhOY9egKuUf
-SA/je/RXaT8m5VxLYMxwqQXKApzD87fv0tLPlVIEvjEsaf992tFEFSNPcG1l/jpd
-5AVXw6kKuf85UkJtYR1x2MkQDrqY1QX/XMw00kt8y9kMZUre19aCArcmor+hDhRJ
-E3Gt4QJrD9z/bICESw4b4z2DbgD/Xz9IXsA/r9cKiM1h5QMtXvuhyfVeM01enhxM
-GbOH3gjqqGNKysx0UODGEwr6AV9hAd8RWXMchJLaExK9J5SRawSg671ObAU24SdY
-vMQ9Z4kAQ2+1ReUZzf3ogSMRZtMT+d18gT6L90/y+APZIaoArLPhebIAGq39HLmJ
-26x3z0WAgrpA1kNsjXEXkoiZGPLKIGoe3hqJAbYEGAEKACAWIQTRpm4aI7GCyZgP
-eIz7/MgqAV5zMAUCXaWc8gIbDAAKCRD7/MgqAV5zMOn/C/9ugt+HZIwX308zI+QX
-c5vDLReuzmJ3ieE0DMO/uNSC+K1XEioSIZP91HeZJ2kbT9nn9fuReuoff0T0Dief
-rbwcIQQHFFkrqSp1K3VWmUGp2JrUsXFVdjy/fkBIjTd7c5boWljv/6wAsSfiv2V0
-JSM8EFU6TYXxswGjFVfc6X97tJNeIrXL+mpSmPPqy2bztcCCHkWS5lNLWQw+R7Vg
-71Fe6yBSNVrqC2/imYG2J9zlowjx1XU63Wdgqp2Wxt0l8OmsB/W80S1fRF5G4SDH
-s9HXglXXqPsBRZJYfP+VStm9L5P/sKjCcX6WtZR7yS6G8zj/X767MLK/djANvpPd
-NVniEke6hM3CNBXYPAMhQBMWhCulcoz+0lxi8L34rMN+Dsbma96psdUrn7uLaB91
-6we0CTfF8qqm7BsVAgalon/UUiuMY80U3ueoj3okiSTiHIjD/YtpXSPioC8nMng7
-xqAY9Bwizt4FWgXuLm1a4+So4V9j1TRCXd12Uc2l2RNmgDE=
-=miES
------END PGP PRIVATE KEY BLOCK-----
-`
-
-const certv5Test = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-
-lGEFXJH05BYAAAAtCSsGAQQB2kcPAQEHQFhZlVcVVtwf+21xNQPX+ecMJJBL0MPd
-fj75iux+my8QAAAAAAAiAQCHZ1SnSUmWqxEsoI6facIVZQu6mph3cBFzzTvcm5lA
-Ng5ctBhlbW1hLmdvbGRtYW5AZXhhbXBsZS5uZXSIlgUTFggASCIhBRk0e8mHJGQC
-X5nfPsLgAA7ZiEiS4fez6kyUAJFZVptUBQJckfTkAhsDBQsJCAcCAyICAQYVCgkI
-CwIEFgIDAQIeBwIXgAAA9cAA/jiR3yMsZMeEQ40u6uzEoXa6UXeV/S3wwJAXRJy9
-M8s0AP9vuL/7AyTfFXwwzSjDnYmzS0qAhbLDQ643N+MXGBJ2BZxmBVyR9OQSAAAA
-MgorBgEEAZdVAQUBAQdA+nysrzml2UCweAqtpDuncSPlvrcBWKU0yfU0YvYWWAoD
-AQgHAAAAAAAiAP9OdAPppjU1WwpqjIItkxr+VPQRT8Zm/Riw7U3F6v3OiBFHiHoF
-GBYIACwiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVAUCXJH05AIb
-DAAAOSQBAP4BOOIR/sGLNMOfeb5fPs/02QMieoiSjIBnijhob2U5AQC+RtOHCHx7
-TcIYl5/Uyoi+FOvPLcNw4hOv2nwUzSSVAw==
-=IiS2
------END PGP PRIVATE KEY BLOCK-----
-`
-
-const msgv5Test = `-----BEGIN PGP MESSAGE-----
-
-wcDMA3wvqk35PDeyAQv+PcQiLsoYTH30nJYQh3j3cJaO2+jErtVCrIQRIU0+
-rmgMddERYST4A9mA0DQIiTI4FQ0Lp440D3BWCgpq3LlNWewGzduaWwym5rN6
-cwHz5ccDqOcqbd9X0GXXGy/ZH/ljSgzuVMIytMAXKdF/vrRrVgH/+I7cxvm9
-HwnhjMN5dF0j4aEt996H2T7cbtzSr2GN9SWGW8Gyu7I8Zx73hgrGUI7gDiJB
-Afaff+P6hfkkHSGOItr94dde8J/7AUF4VEwwxdVVPvsNEFyvv6gRIbYtOCa2
-6RE6h1V/QTxW2O7zZgzWALrE2ui0oaYr9QuqQSssd9CdgExLfdPbI+3/ZAnE
-v31Idzpk3/6ILiakYHtXkElPXvf46mCNpobty8ysT34irF+fy3C1p3oGwAsx
-5VDV9OSFU6z5U+UPbSPYAy9rkc5ZssuIKxCER2oTvZ2L8Q5cfUvEUiJtRGGn
-CJlHrVDdp3FssKv2tlKgLkvxJLyoOjuEkj44H1qRk+D02FzmmUT/0sAHAYYx
-lTir6mjHeLpcGjn4waUuWIAJyph8SxUexP60bic0L0NBa6Qp5SxxijKsPIDb
-FPHxWwfJSDZRrgUyYT7089YFB/ZM4FHyH9TZcnxn0f0xIB7NS6YNDsxzN2zT
-EVEYf+De4qT/dQTsdww78Chtcv9JY9r2kDm77dk2MUGHL2j7n8jasbLtgA7h
-pn2DMIWLrGamMLWRmlwslolKr1sMV5x8w+5Ias6C33iBMl9phkg42an0gYmc
-byVJHvLO/XErtC+GNIJeMg==
-=liRq
------END PGP MESSAGE-----
-`
-
-// A key that contains a persistent AEAD subkey
-const keyWithAEADSubkey = `-----BEGIN PGP PRIVATE KEY BLOCK-----
-
-xVgEYs/4KxYJKwYBBAHaRw8BAQdA7tIsntXluwloh/H62PJMqasjP00M86fv
-/Pof9A968q8AAQDYcgkPKUdWAxsDjDHJfouPS4q5Me3ks+umlo5RJdwLZw4k
-zQ1TeW1tZXRyaWMgS2V5wowEEBYKAB0FAmLP+CsECwkHCAMVCAoEFgACAQIZ
-AQIbAwIeAQAhCRDkNhFDvaU8vxYhBDJNoyEFquVOCf99d+Q2EUO9pTy/5XQA
-/1F2YPouv0ydBDJU3EOS/4bmPt7yqvzciWzeKVEOkzYuAP9OsP7q/5ccqOPX
-mmRUKwd82/cNjdzdnWZ8Tq89XMwMAMdqBGLP+CtkCfFyZxOMF0BWLwAE8pLy
-RVj2n2K7k6VvrhyuTqDkFDUFALiSLrEfnmTKlsPYS3/YzsODF354ccR63q73
-3lmCrvFRyaf6AHvVrBYPbJR+VhuTjZTwZKvPPKv0zVdSqi5JDEQiocJ4BBgW
-CAAJBQJiz/grAhsMACEJEOQ2EUO9pTy/FiEEMk2jIQWq5U4J/3135DYRQ72l
-PL+fEQEA7RaRbfa+AtiRN7a4GuqVEDZi3qtQZ2/Qcb27/LkAD0sA/3r9drYv
-jyu46h1fdHHyo0HS2MiShZDZ8u60JnDltloD
-=8TxH
------END PGP PRIVATE KEY BLOCK-----
-`
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go
deleted file mode 100644
index f4f5c783..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go
+++ /dev/null
@@ -1,410 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package s2k implements the various OpenPGP string-to-key transforms as
-// specified in RFC 4800 section 3.7.1, and Argon2 specified in
-// draft-ietf-openpgp-crypto-refresh-08 section 3.7.1.4.
-package s2k // import "github.com/ProtonMail/go-crypto/openpgp/s2k"
-
-import (
- "crypto"
- "hash"
- "io"
- "strconv"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
- "golang.org/x/crypto/argon2"
-)
-
-type Mode uint8
-
-// Defines the default S2KMode constants
-//
-// 0 (simple), 1(salted), 3(iterated), 4(argon2)
-const (
- SimpleS2K Mode = 0
- SaltedS2K Mode = 1
- IteratedSaltedS2K Mode = 3
- Argon2S2K Mode = 4
- GnuS2K Mode = 101
-)
-
-const Argon2SaltSize int = 16
-
-// Params contains all the parameters of the s2k packet
-type Params struct {
- // mode is the mode of s2k function.
- // It can be 0 (simple), 1(salted), 3(iterated)
- // 2(reserved) 100-110(private/experimental).
- mode Mode
- // hashId is the ID of the hash function used in any of the modes
- hashId byte
- // salt is a byte array to use as a salt in hashing process or argon2
- saltBytes [Argon2SaltSize]byte
- // countByte is used to determine how many rounds of hashing are to
- // be performed in s2k mode 3. See RFC 4880 Section 3.7.1.3.
- countByte byte
- // passes is a parameter in Argon2 to determine the number of iterations
- // See RFC the crypto refresh Section 3.7.1.4.
- passes byte
- // parallelism is a parameter in Argon2 to determine the degree of paralellism
- // See RFC the crypto refresh Section 3.7.1.4.
- parallelism byte
- // memoryExp is a parameter in Argon2 to determine the memory usage
- // i.e., 2 ** memoryExp kibibytes
- // See RFC the crypto refresh Section 3.7.1.4.
- memoryExp byte
-}
-
-// encodeCount converts an iterative "count" in the range 1024 to
-// 65011712, inclusive, to an encoded count. The return value is the
-// octet that is actually stored in the GPG file. encodeCount panics
-// if i is not in the above range (encodedCount above takes care to
-// pass i in the correct range). See RFC 4880 Section 3.7.7.1.
-func encodeCount(i int) uint8 {
- if i < 65536 || i > 65011712 {
- panic("count arg i outside the required range")
- }
-
- for encoded := 96; encoded < 256; encoded++ {
- count := decodeCount(uint8(encoded))
- if count >= i {
- return uint8(encoded)
- }
- }
-
- return 255
-}
-
-// decodeCount returns the s2k mode 3 iterative "count" corresponding to
-// the encoded octet c.
-func decodeCount(c uint8) int {
- return (16 + int(c&15)) << (uint32(c>>4) + 6)
-}
-
-// encodeMemory converts the Argon2 "memory" in the range parallelism*8 to
-// 2**31, inclusive, to an encoded memory. The return value is the
-// octet that is actually stored in the GPG file. encodeMemory panics
-// if is not in the above range
-// See OpenPGP crypto refresh Section 3.7.1.4.
-func encodeMemory(memory uint32, parallelism uint8) uint8 {
- if memory < (8*uint32(parallelism)) || memory > uint32(2147483648) {
- panic("Memory argument memory is outside the required range")
- }
-
- for exp := 3; exp < 31; exp++ {
- compare := decodeMemory(uint8(exp))
- if compare >= memory {
- return uint8(exp)
- }
- }
-
- return 31
-}
-
-// decodeMemory computes the decoded memory in kibibytes as 2**memoryExponent
-func decodeMemory(memoryExponent uint8) uint32 {
- return uint32(1) << memoryExponent
-}
-
-// Simple writes to out the result of computing the Simple S2K function (RFC
-// 4880, section 3.7.1.1) using the given hash and input passphrase.
-func Simple(out []byte, h hash.Hash, in []byte) {
- Salted(out, h, in, nil)
-}
-
-var zero [1]byte
-
-// Salted writes to out the result of computing the Salted S2K function (RFC
-// 4880, section 3.7.1.2) using the given hash, input passphrase and salt.
-func Salted(out []byte, h hash.Hash, in []byte, salt []byte) {
- done := 0
- var digest []byte
-
- for i := 0; done < len(out); i++ {
- h.Reset()
- for j := 0; j < i; j++ {
- h.Write(zero[:])
- }
- h.Write(salt)
- h.Write(in)
- digest = h.Sum(digest[:0])
- n := copy(out[done:], digest)
- done += n
- }
-}
-
-// Iterated writes to out the result of computing the Iterated and Salted S2K
-// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase,
-// salt and iteration count.
-func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) {
- combined := make([]byte, len(in)+len(salt))
- copy(combined, salt)
- copy(combined[len(salt):], in)
-
- if count < len(combined) {
- count = len(combined)
- }
-
- done := 0
- var digest []byte
- for i := 0; done < len(out); i++ {
- h.Reset()
- for j := 0; j < i; j++ {
- h.Write(zero[:])
- }
- written := 0
- for written < count {
- if written+len(combined) > count {
- todo := count - written
- h.Write(combined[:todo])
- written = count
- } else {
- h.Write(combined)
- written += len(combined)
- }
- }
- digest = h.Sum(digest[:0])
- n := copy(out[done:], digest)
- done += n
- }
-}
-
-// Argon2 writes to out the key derived from the password (in) with the Argon2
-// function (the crypto refresh, section 3.7.1.4)
-func Argon2(out []byte, in []byte, salt []byte, passes uint8, paralellism uint8, memoryExp uint8) {
- key := argon2.IDKey(in, salt, uint32(passes), decodeMemory(memoryExp), paralellism, uint32(len(out)))
- copy(out[:], key)
-}
-
-// Generate generates valid parameters from given configuration.
-// It will enforce the Iterated and Salted or Argon2 S2K method.
-func Generate(rand io.Reader, c *Config) (*Params, error) {
- var params *Params
- if c != nil && c.Mode() == Argon2S2K {
- // handle Argon2 case
- argonConfig := c.Argon2()
- params = &Params{
- mode: Argon2S2K,
- passes: argonConfig.Passes(),
- parallelism: argonConfig.Parallelism(),
- memoryExp: argonConfig.EncodedMemory(),
- }
- } else if c != nil && c.PassphraseIsHighEntropy && c.Mode() == SaltedS2K { // Allow SaltedS2K if PassphraseIsHighEntropy
- hashId, ok := algorithm.HashToHashId(c.hash())
- if !ok {
- return nil, errors.UnsupportedError("no such hash")
- }
-
- params = &Params{
- mode: SaltedS2K,
- hashId: hashId,
- }
- } else { // Enforce IteratedSaltedS2K method otherwise
- hashId, ok := algorithm.HashToHashId(c.hash())
- if !ok {
- return nil, errors.UnsupportedError("no such hash")
- }
- if c != nil {
- c.S2KMode = IteratedSaltedS2K
- }
- params = &Params{
- mode: IteratedSaltedS2K,
- hashId: hashId,
- countByte: c.EncodedCount(),
- }
- }
- if _, err := io.ReadFull(rand, params.salt()); err != nil {
- return nil, err
- }
- return params, nil
-}
-
-// Parse reads a binary specification for a string-to-key transformation from r
-// and returns a function which performs that transform. If the S2K is a special
-// GNU extension that indicates that the private key is missing, then the error
-// returned is errors.ErrDummyPrivateKey.
-func Parse(r io.Reader) (f func(out, in []byte), err error) {
- params, err := ParseIntoParams(r)
- if err != nil {
- return nil, err
- }
-
- return params.Function()
-}
-
-// ParseIntoParams reads a binary specification for a string-to-key
-// transformation from r and returns a struct describing the s2k parameters.
-func ParseIntoParams(r io.Reader) (params *Params, err error) {
- var buf [Argon2SaltSize + 3]byte
-
- _, err = io.ReadFull(r, buf[:1])
- if err != nil {
- return
- }
-
- params = &Params{
- mode: Mode(buf[0]),
- }
-
- switch params.mode {
- case SimpleS2K:
- _, err = io.ReadFull(r, buf[:1])
- if err != nil {
- return nil, err
- }
- params.hashId = buf[0]
- return params, nil
- case SaltedS2K:
- _, err = io.ReadFull(r, buf[:9])
- if err != nil {
- return nil, err
- }
- params.hashId = buf[0]
- copy(params.salt(), buf[1:9])
- return params, nil
- case IteratedSaltedS2K:
- _, err = io.ReadFull(r, buf[:10])
- if err != nil {
- return nil, err
- }
- params.hashId = buf[0]
- copy(params.salt(), buf[1:9])
- params.countByte = buf[9]
- return params, nil
- case Argon2S2K:
- _, err = io.ReadFull(r, buf[:Argon2SaltSize+3])
- if err != nil {
- return nil, err
- }
- copy(params.salt(), buf[:Argon2SaltSize])
- params.passes = buf[Argon2SaltSize]
- params.parallelism = buf[Argon2SaltSize+1]
- params.memoryExp = buf[Argon2SaltSize+2]
- return params, nil
- case GnuS2K:
- // This is a GNU extension. See
- // https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109
- if _, err = io.ReadFull(r, buf[:5]); err != nil {
- return nil, err
- }
- params.hashId = buf[0]
- if buf[1] == 'G' && buf[2] == 'N' && buf[3] == 'U' && buf[4] == 1 {
- return params, nil
- }
- return nil, errors.UnsupportedError("GNU S2K extension")
- }
-
- return nil, errors.UnsupportedError("S2K function")
-}
-
-func (params *Params) Dummy() bool {
- return params != nil && params.mode == GnuS2K
-}
-
-func (params *Params) salt() []byte {
- switch params.mode {
- case SaltedS2K, IteratedSaltedS2K:
- return params.saltBytes[:8]
- case Argon2S2K:
- return params.saltBytes[:Argon2SaltSize]
- default:
- return nil
- }
-}
-
-func (params *Params) Function() (f func(out, in []byte), err error) {
- if params.Dummy() {
- return nil, errors.ErrDummyPrivateKey("dummy key found")
- }
- var hashObj crypto.Hash
- if params.mode != Argon2S2K {
- var ok bool
- hashObj, ok = algorithm.HashIdToHashWithSha1(params.hashId)
- if !ok {
- return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(params.hashId)))
- }
- if !hashObj.Available() {
- return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashObj)))
- }
- }
-
- switch params.mode {
- case SimpleS2K:
- f := func(out, in []byte) {
- Simple(out, hashObj.New(), in)
- }
-
- return f, nil
- case SaltedS2K:
- f := func(out, in []byte) {
- Salted(out, hashObj.New(), in, params.salt())
- }
-
- return f, nil
- case IteratedSaltedS2K:
- f := func(out, in []byte) {
- Iterated(out, hashObj.New(), in, params.salt(), decodeCount(params.countByte))
- }
-
- return f, nil
- case Argon2S2K:
- f := func(out, in []byte) {
- Argon2(out, in, params.salt(), params.passes, params.parallelism, params.memoryExp)
- }
- return f, nil
- }
-
- return nil, errors.UnsupportedError("S2K function")
-}
-
-func (params *Params) Serialize(w io.Writer) (err error) {
- if _, err = w.Write([]byte{uint8(params.mode)}); err != nil {
- return
- }
- if params.mode != Argon2S2K {
- if _, err = w.Write([]byte{params.hashId}); err != nil {
- return
- }
- }
- if params.Dummy() {
- _, err = w.Write(append([]byte("GNU"), 1))
- return
- }
- if params.mode > 0 {
- if _, err = w.Write(params.salt()); err != nil {
- return
- }
- if params.mode == IteratedSaltedS2K {
- _, err = w.Write([]byte{params.countByte})
- }
- if params.mode == Argon2S2K {
- _, err = w.Write([]byte{params.passes, params.parallelism, params.memoryExp})
- }
- }
- return
-}
-
-// Serialize salts and stretches the given passphrase and writes the
-// resulting key into key. It also serializes an S2K descriptor to
-// w. The key stretching can be configured with c, which may be
-// nil. In that case, sensible defaults will be used.
-func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error {
- params, err := Generate(rand, c)
- if err != nil {
- return err
- }
- err = params.Serialize(w)
- if err != nil {
- return err
- }
-
- f, err := params.Function()
- if err != nil {
- return err
- }
- f(key, passphrase)
- return nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go
deleted file mode 100644
index 616e0d12..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_cache.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package s2k
-
-// Cache stores keys derived with s2k functions from one passphrase
-// to avoid recomputation if multiple items are encrypted with
-// the same parameters.
-type Cache map[Params][]byte
-
-// GetOrComputeDerivedKey tries to retrieve the key
-// for the given s2k parameters from the cache.
-// If there is no hit, it derives the key with the s2k function from the passphrase,
-// updates the cache, and returns the key.
-func (c *Cache) GetOrComputeDerivedKey(passphrase []byte, params *Params, expectedKeySize int) ([]byte, error) {
- key, found := (*c)[*params]
- if !found || len(key) != expectedKeySize {
- var err error
- derivedKey := make([]byte, expectedKeySize)
- s2k, err := params.Function()
- if err != nil {
- return nil, err
- }
- s2k(derivedKey, passphrase)
- (*c)[*params] = key
- return derivedKey, nil
- }
- return key, nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go
deleted file mode 100644
index b93db1ab..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k_config.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package s2k
-
-import "crypto"
-
-// Config collects configuration parameters for s2k key-stretching
-// transformations. A nil *Config is valid and results in all default
-// values.
-type Config struct {
- // S2K (String to Key) mode, used for key derivation in the context of secret key encryption
- // and passphrase-encrypted data. Either s2k.Argon2S2K or s2k.IteratedSaltedS2K may be used.
- // If the passphrase is a high-entropy key, indicated by setting PassphraseIsHighEntropy to true,
- // s2k.SaltedS2K can also be used.
- // Note: Argon2 is the strongest option but not all OpenPGP implementations are compatible with it
- //(pending standardisation).
- // 0 (simple), 1(salted), 3(iterated), 4(argon2)
- // 2(reserved) 100-110(private/experimental).
- S2KMode Mode
- // Only relevant if S2KMode is not set to s2k.Argon2S2K.
- // Hash is the default hash function to be used. If
- // nil, SHA256 is used.
- Hash crypto.Hash
- // Argon2 parameters for S2K (String to Key).
- // Only relevant if S2KMode is set to s2k.Argon2S2K.
- // If nil, default parameters are used.
- // For more details on the choice of parameters, see https://tools.ietf.org/html/rfc9106#section-4.
- Argon2Config *Argon2Config
- // Only relevant if S2KMode is set to s2k.IteratedSaltedS2K.
- // Iteration count for Iterated S2K (String to Key). It
- // determines the strength of the passphrase stretching when
- // the said passphrase is hashed to produce a key. S2KCount
- // should be between 65536 and 65011712, inclusive. If Config
- // is nil or S2KCount is 0, the value 16777216 used. Not all
- // values in the above range can be represented. S2KCount will
- // be rounded up to the next representable value if it cannot
- // be encoded exactly. When set, it is strongly encrouraged to
- // use a value that is at least 65536. See RFC 4880 Section
- // 3.7.1.3.
- S2KCount int
- // Indicates whether the passphrase passed by the application is a
- // high-entropy key (e.g. it's randomly generated or derived from
- // another passphrase using a strong key derivation function).
- // When true, allows the S2KMode to be s2k.SaltedS2K.
- // When the passphrase is not a high-entropy key, using SaltedS2K is
- // insecure, and not allowed by draft-ietf-openpgp-crypto-refresh-08.
- PassphraseIsHighEntropy bool
-}
-
-// Argon2Config stores the Argon2 parameters
-// A nil *Argon2Config is valid and results in all default
-type Argon2Config struct {
- NumberOfPasses uint8
- DegreeOfParallelism uint8
- // Memory specifies the desired Argon2 memory usage in kibibytes.
- // For example memory=64*1024 sets the memory cost to ~64 MB.
- Memory uint32
-}
-
-func (c *Config) Mode() Mode {
- if c == nil {
- return IteratedSaltedS2K
- }
- return c.S2KMode
-}
-
-func (c *Config) hash() crypto.Hash {
- if c == nil || uint(c.Hash) == 0 {
- return crypto.SHA256
- }
-
- return c.Hash
-}
-
-func (c *Config) Argon2() *Argon2Config {
- if c == nil || c.Argon2Config == nil {
- return nil
- }
- return c.Argon2Config
-}
-
-// EncodedCount get encoded count
-func (c *Config) EncodedCount() uint8 {
- if c == nil || c.S2KCount == 0 {
- return 224 // The common case. Corresponding to 16777216
- }
-
- i := c.S2KCount
-
- switch {
- case i < 65536:
- i = 65536
- case i > 65011712:
- i = 65011712
- }
-
- return encodeCount(i)
-}
-
-func (c *Argon2Config) Passes() uint8 {
- if c == nil || c.NumberOfPasses == 0 {
- return 3
- }
- return c.NumberOfPasses
-}
-
-func (c *Argon2Config) Parallelism() uint8 {
- if c == nil || c.DegreeOfParallelism == 0 {
- return 4
- }
- return c.DegreeOfParallelism
-}
-
-func (c *Argon2Config) EncodedMemory() uint8 {
- if c == nil || c.Memory == 0 {
- return 16 // 64 MiB of RAM
- }
-
- memory := c.Memory
- lowerBound := uint32(c.Parallelism()) * 8
- upperBound := uint32(2147483648)
-
- switch {
- case memory < lowerBound:
- memory = lowerBound
- case memory > upperBound:
- memory = upperBound
- }
-
- return encodeMemory(memory, c.Parallelism())
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/symmetric/aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/symmetric/aead.go
deleted file mode 100644
index 044b1394..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/symmetric/aead.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package symmetric
-
-import (
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
- "io"
-)
-
-type AEADPublicKey struct {
- Cipher algorithm.CipherFunction
- BindingHash [32]byte
- Key []byte
-}
-
-type AEADPrivateKey struct {
- PublicKey AEADPublicKey
- HashSeed [32]byte
- Key []byte
-}
-
-func AEADGenerateKey(rand io.Reader, cipher algorithm.CipherFunction) (priv *AEADPrivateKey, err error) {
- priv, err = generatePrivatePartAEAD(rand, cipher)
- if err != nil {
- return
- }
-
- priv.generatePublicPartAEAD(cipher)
- return
-}
-
-func generatePrivatePartAEAD(rand io.Reader, cipher algorithm.CipherFunction) (priv *AEADPrivateKey, err error) {
- priv = new(AEADPrivateKey)
- var seed [32] byte
- _, err = rand.Read(seed[:])
- if err != nil {
- return
- }
-
- key := make([]byte, cipher.KeySize())
- _, err = rand.Read(key)
- if err != nil {
- return
- }
-
- priv.HashSeed = seed
- priv.Key = key
- return
-}
-
-func (priv *AEADPrivateKey) generatePublicPartAEAD(cipher algorithm.CipherFunction) (err error) {
- priv.PublicKey.Cipher = cipher
-
- bindingHash := ComputeBindingHash(priv.HashSeed)
-
- priv.PublicKey.Key = make([]byte, len(priv.Key))
- copy(priv.PublicKey.Key, priv.Key)
- copy(priv.PublicKey.BindingHash[:], bindingHash)
- return
-}
-
-func (pub *AEADPublicKey) Encrypt(rand io.Reader, data []byte, mode algorithm.AEADMode) (nonce []byte, ciphertext []byte, err error) {
- block := pub.Cipher.New(pub.Key)
- aead := mode.New(block)
- nonce = make([]byte, aead.NonceSize())
- rand.Read(nonce)
- ciphertext = aead.Seal(nil, nonce, data, nil)
- return
-}
-
-func (priv *AEADPrivateKey) Decrypt(nonce []byte, ciphertext []byte, mode algorithm.AEADMode) (message []byte, err error) {
-
- block := priv.PublicKey.Cipher.New(priv.Key)
- aead := mode.New(block)
- message, err = aead.Open(nil, nonce, ciphertext, nil)
- return
-}
-
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/symmetric/hmac.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/symmetric/hmac.go
deleted file mode 100644
index fd4a7cbb..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/symmetric/hmac.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package symmetric
-
-import (
- "crypto"
- "crypto/hmac"
- "crypto/sha256"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
-)
-
-type HMACPublicKey struct {
- Hash algorithm.Hash
- BindingHash [32]byte
- // While this is a "public" key, the symmetric key needs to be present here.
- // Symmetric cryptographic operations use the same key material for
- // signing and verifying, and go-crypto assumes that a public key type will
- // be used for verification. Thus, this `Key` field must never be exported
- // publicly.
- Key []byte
-}
-
-type HMACPrivateKey struct {
- PublicKey HMACPublicKey
- HashSeed [32]byte
- Key []byte
-}
-
-func HMACGenerateKey(rand io.Reader, hash algorithm.Hash) (priv *HMACPrivateKey, err error) {
- priv, err = generatePrivatePartHMAC(rand, hash)
- if err != nil {
- return
- }
-
- priv.generatePublicPartHMAC(hash)
- return
-}
-
-func generatePrivatePartHMAC(rand io.Reader, hash algorithm.Hash) (priv *HMACPrivateKey, err error) {
- priv = new(HMACPrivateKey)
- var seed [32] byte
- _, err = rand.Read(seed[:])
- if err != nil {
- return
- }
-
- key := make([]byte, hash.Size())
- _, err = rand.Read(key)
- if err != nil {
- return
- }
-
- priv.HashSeed = seed
- priv.Key = key
- return
-}
-
-func (priv *HMACPrivateKey) generatePublicPartHMAC(hash algorithm.Hash) (err error) {
- priv.PublicKey.Hash = hash
-
- bindingHash := ComputeBindingHash(priv.HashSeed)
- copy(priv.PublicKey.BindingHash[:], bindingHash)
-
- priv.PublicKey.Key = make([]byte, len(priv.Key))
- copy(priv.PublicKey.Key, priv.Key)
- return
-}
-
-func ComputeBindingHash(seed [32]byte) []byte {
- bindingHash := sha256.New()
- bindingHash.Write(seed[:])
-
- return bindingHash.Sum(nil)
-}
-
-func (priv *HMACPrivateKey) Public() crypto.PublicKey {
- return &priv.PublicKey
-}
-
-func (priv *HMACPrivateKey) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) {
- expectedMAC, err := calculateMAC(priv.PublicKey.Hash, priv.Key, digest)
- if err != nil {
- return
- }
- signature = make([]byte, len(expectedMAC))
- copy(signature, expectedMAC)
- return
-}
-
-func (pub *HMACPublicKey) Verify(digest []byte, signature []byte) (bool, error) {
- expectedMAC, err := calculateMAC(pub.Hash, pub.Key, digest)
- if err != nil {
- return false, err
- }
- return hmac.Equal(expectedMAC, signature), nil
-}
-
-func calculateMAC(hash algorithm.Hash, key []byte, data []byte) ([]byte, error) {
- hashFunc := hash.HashFunc()
- if !hashFunc.Available() {
- return nil, errors.UnsupportedError("hash function")
- }
-
- mac := hmac.New(hashFunc.New, key)
- mac.Write(data)
-
- return mac.Sum(nil), nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go
deleted file mode 100644
index 0db5526c..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go
+++ /dev/null
@@ -1,614 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package openpgp
-
-import (
- "crypto"
- "hash"
- "io"
- "strconv"
- "time"
-
- "github.com/ProtonMail/go-crypto/openpgp/armor"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm"
- "github.com/ProtonMail/go-crypto/openpgp/packet"
-)
-
-// DetachSign signs message with the private key from signer (which must
-// already have been decrypted) and writes the signature to w.
-// If config is nil, sensible defaults will be used.
-func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
- return detachSign(w, signer, message, packet.SigTypeBinary, config)
-}
-
-// ArmoredDetachSign signs message with the private key from signer (which
-// must already have been decrypted) and writes an armored signature to w.
-// If config is nil, sensible defaults will be used.
-func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) {
- return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config)
-}
-
-// DetachSignText signs message (after canonicalising the line endings) with
-// the private key from signer (which must already have been decrypted) and
-// writes the signature to w.
-// If config is nil, sensible defaults will be used.
-func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
- return detachSign(w, signer, message, packet.SigTypeText, config)
-}
-
-// ArmoredDetachSignText signs message (after canonicalising the line endings)
-// with the private key from signer (which must already have been decrypted)
-// and writes an armored signature to w.
-// If config is nil, sensible defaults will be used.
-func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error {
- return armoredDetachSign(w, signer, message, packet.SigTypeText, config)
-}
-
-func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) {
- out, err := armor.Encode(w, SignatureType, nil)
- if err != nil {
- return
- }
- err = detachSign(out, signer, message, sigType, config)
- if err != nil {
- return
- }
- return out.Close()
-}
-
-func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) {
- signingKey, ok := signer.SigningKeyById(config.Now(), config.SigningKey())
- if !ok {
- return errors.InvalidArgumentError("no valid signing keys")
- }
- if signingKey.PrivateKey == nil {
- return errors.InvalidArgumentError("signing key doesn't have a private key")
- }
- if signingKey.PrivateKey.Encrypted {
- return errors.InvalidArgumentError("signing key is encrypted")
- }
- if _, ok := algorithm.HashToHashId(config.Hash()); !ok {
- return errors.InvalidArgumentError("invalid hash function")
- }
-
- sig := createSignaturePacket(signingKey.PublicKey, sigType, config)
-
- h, err := sig.PrepareSign(config)
- if err != nil {
- return
- }
- wrappedHash, err := wrapHashForSignature(h, sig.SigType)
- if err != nil {
- return
- }
- if _, err = io.Copy(wrappedHash, message); err != nil {
- return err
- }
-
- err = sig.Sign(h, signingKey.PrivateKey, config)
- if err != nil {
- return
- }
-
- return sig.Serialize(w)
-}
-
-// FileHints contains metadata about encrypted files. This metadata is, itself,
-// encrypted.
-type FileHints struct {
- // IsBinary can be set to hint that the contents are binary data.
- IsBinary bool
- // FileName hints at the name of the file that should be written. It's
- // truncated to 255 bytes if longer. It may be empty to suggest that the
- // file should not be written to disk. It may be equal to "_CONSOLE" to
- // suggest the data should not be written to disk.
- FileName string
- // ModTime contains the modification time of the file, or the zero time if not applicable.
- ModTime time.Time
-}
-
-// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase.
-// The resulting WriteCloser must be closed after the contents of the file have
-// been written.
-// If config is nil, sensible defaults will be used.
-func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
- if hints == nil {
- hints = &FileHints{}
- }
-
- key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config)
- if err != nil {
- return
- }
-
- var w io.WriteCloser
- cipherSuite := packet.CipherSuite{
- Cipher: config.Cipher(),
- Mode: config.AEAD().Mode(),
- }
- w, err = packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), config.AEAD() != nil, cipherSuite, key, config)
- if err != nil {
- return
- }
-
- literalData := w
- if algo := config.Compression(); algo != packet.CompressionNone {
- var compConfig *packet.CompressionConfig
- if config != nil {
- compConfig = config.CompressionConfig
- }
- literalData, err = packet.SerializeCompressed(w, algo, compConfig)
- if err != nil {
- return
- }
- }
-
- var epochSeconds uint32
- if !hints.ModTime.IsZero() {
- epochSeconds = uint32(hints.ModTime.Unix())
- }
- return packet.SerializeLiteral(literalData, hints.IsBinary, hints.FileName, epochSeconds)
-}
-
-// intersectPreferences mutates and returns a prefix of a that contains only
-// the values in the intersection of a and b. The order of a is preserved.
-func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) {
- var j int
- for _, v := range a {
- for _, v2 := range b {
- if v == v2 {
- a[j] = v
- j++
- break
- }
- }
- }
-
- return a[:j]
-}
-
-// intersectPreferences mutates and returns a prefix of a that contains only
-// the values in the intersection of a and b. The order of a is preserved.
-func intersectCipherSuites(a [][2]uint8, b [][2]uint8) (intersection [][2]uint8) {
- var j int
- for _, v := range a {
- for _, v2 := range b {
- if v[0] == v2[0] && v[1] == v2[1] {
- a[j] = v
- j++
- break
- }
- }
- }
-
- return a[:j]
-}
-
-func hashToHashId(h crypto.Hash) uint8 {
- v, ok := algorithm.HashToHashId(h)
- if !ok {
- panic("tried to convert unknown hash")
- }
- return v
-}
-
-// EncryptText encrypts a message to a number of recipients and, optionally,
-// signs it. Optional information is contained in 'hints', also encrypted, that
-// aids the recipients in processing the message. The resulting WriteCloser
-// must be closed after the contents of the file have been written. If config
-// is nil, sensible defaults will be used. The signing is done in text mode.
-func EncryptText(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
- return encrypt(ciphertext, ciphertext, to, signed, hints, packet.SigTypeText, config)
-}
-
-// Encrypt encrypts a message to a number of recipients and, optionally, signs
-// it. hints contains optional information, that is also encrypted, that aids
-// the recipients in processing the message. The resulting WriteCloser must
-// be closed after the contents of the file have been written.
-// If config is nil, sensible defaults will be used.
-func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
- return encrypt(ciphertext, ciphertext, to, signed, hints, packet.SigTypeBinary, config)
-}
-
-// EncryptSplit encrypts a message to a number of recipients and, optionally, signs
-// it. hints contains optional information, that is also encrypted, that aids
-// the recipients in processing the message. The resulting WriteCloser must
-// be closed after the contents of the file have been written.
-// If config is nil, sensible defaults will be used.
-func EncryptSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
- return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeBinary, config)
-}
-
-// EncryptTextSplit encrypts a message to a number of recipients and, optionally, signs
-// it. hints contains optional information, that is also encrypted, that aids
-// the recipients in processing the message. The resulting WriteCloser must
-// be closed after the contents of the file have been written.
-// If config is nil, sensible defaults will be used.
-func EncryptTextSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) {
- return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeText, config)
-}
-
-// writeAndSign writes the data as a payload package and, optionally, signs
-// it. hints contains optional information, that is also encrypted,
-// that aids the recipients in processing the message. The resulting
-// WriteCloser must be closed after the contents of the file have been
-// written. If config is nil, sensible defaults will be used.
-func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, sigType packet.SignatureType, config *packet.Config) (plaintext io.WriteCloser, err error) {
- var signer *packet.PrivateKey
- if signed != nil {
- signKey, ok := signed.SigningKeyById(config.Now(), config.SigningKey())
- if !ok {
- return nil, errors.InvalidArgumentError("no valid signing keys")
- }
- signer = signKey.PrivateKey
- if signer == nil {
- return nil, errors.InvalidArgumentError("no private key in signing key")
- }
- if signer.Encrypted {
- return nil, errors.InvalidArgumentError("signing key must be decrypted")
- }
- }
-
- var hash crypto.Hash
- for _, hashId := range candidateHashes {
- if h, ok := algorithm.HashIdToHash(hashId); ok && h.Available() {
- hash = h
- break
- }
- }
-
- // If the hash specified by config is a candidate, we'll use that.
- if configuredHash := config.Hash(); configuredHash.Available() {
- for _, hashId := range candidateHashes {
- if h, ok := algorithm.HashIdToHash(hashId); ok && h == configuredHash {
- hash = h
- break
- }
- }
- }
-
- if hash == 0 {
- hashId := candidateHashes[0]
- name, ok := algorithm.HashIdToString(hashId)
- if !ok {
- name = "#" + strconv.Itoa(int(hashId))
- }
- return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)")
- }
-
- var salt []byte
- if signer != nil {
- var opsVersion = 3
- if signer.Version == 6 {
- opsVersion = signer.Version
- }
- ops := &packet.OnePassSignature{
- Version: opsVersion,
- SigType: sigType,
- Hash: hash,
- PubKeyAlgo: signer.PubKeyAlgo,
- KeyId: signer.KeyId,
- IsLast: true,
- }
- if opsVersion == 6 {
- ops.KeyFingerprint = signer.Fingerprint
- salt, err = packet.SignatureSaltForHash(hash, config.Random())
- if err != nil {
- return nil, err
- }
- ops.Salt = salt
- }
- if err := ops.Serialize(payload); err != nil {
- return nil, err
- }
- }
-
- if hints == nil {
- hints = &FileHints{}
- }
-
- w := payload
- if signer != nil {
- // If we need to write a signature packet after the literal
- // data then we need to stop literalData from closing
- // encryptedData.
- w = noOpCloser{w}
-
- }
- var epochSeconds uint32
- if !hints.ModTime.IsZero() {
- epochSeconds = uint32(hints.ModTime.Unix())
- }
- literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds)
- if err != nil {
- return nil, err
- }
-
- if signer != nil {
- h, wrappedHash, err := hashForSignature(hash, sigType, salt)
- if err != nil {
- return nil, err
- }
- metadata := &packet.LiteralData{
- Format: 'u',
- FileName: hints.FileName,
- Time: epochSeconds,
- }
- if hints.IsBinary {
- metadata.Format = 'b'
- }
- return signatureWriter{payload, literalData, hash, wrappedHash, h, salt, signer, sigType, config, metadata}, nil
- }
- return literalData, nil
-}
-
-// encrypt encrypts a message to a number of recipients and, optionally, signs
-// it. hints contains optional information, that is also encrypted, that aids
-// the recipients in processing the message. The resulting WriteCloser must
-// be closed after the contents of the file have been written.
-// If config is nil, sensible defaults will be used.
-func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, sigType packet.SignatureType, config *packet.Config) (plaintext io.WriteCloser, err error) {
- if len(to) == 0 {
- return nil, errors.InvalidArgumentError("no encryption recipient provided")
- }
-
- // These are the possible ciphers that we'll use for the message.
- candidateCiphers := []uint8{
- uint8(packet.CipherAES256),
- uint8(packet.CipherAES128),
- }
-
- // These are the possible hash functions that we'll use for the signature.
- candidateHashes := []uint8{
- hashToHashId(crypto.SHA256),
- hashToHashId(crypto.SHA384),
- hashToHashId(crypto.SHA512),
- hashToHashId(crypto.SHA3_256),
- hashToHashId(crypto.SHA3_512),
- }
-
- // Prefer GCM if everyone supports it
- candidateCipherSuites := [][2]uint8{
- {uint8(packet.CipherAES256), uint8(packet.AEADModeGCM)},
- {uint8(packet.CipherAES256), uint8(packet.AEADModeEAX)},
- {uint8(packet.CipherAES256), uint8(packet.AEADModeOCB)},
- {uint8(packet.CipherAES128), uint8(packet.AEADModeGCM)},
- {uint8(packet.CipherAES128), uint8(packet.AEADModeEAX)},
- {uint8(packet.CipherAES128), uint8(packet.AEADModeOCB)},
- }
-
- candidateCompression := []uint8{
- uint8(packet.CompressionNone),
- uint8(packet.CompressionZIP),
- uint8(packet.CompressionZLIB),
- }
-
- encryptKeys := make([]Key, len(to))
-
- // AEAD is used only if config enables it and every key supports it
- aeadSupported := config.AEAD() != nil
-
- for i := range to {
- var ok bool
- encryptKeys[i], ok = to[i].EncryptionKey(config.Now())
- if !ok {
- return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no valid encryption keys")
- }
-
- primarySelfSignature, _ := to[i].PrimarySelfSignature()
- if primarySelfSignature == nil {
- return nil, errors.InvalidArgumentError("entity without a self-signature")
- }
-
- if !primarySelfSignature.SEIPDv2 {
- aeadSupported = false
- }
-
- candidateCiphers = intersectPreferences(candidateCiphers, primarySelfSignature.PreferredSymmetric)
- candidateHashes = intersectPreferences(candidateHashes, primarySelfSignature.PreferredHash)
- candidateCipherSuites = intersectCipherSuites(candidateCipherSuites, primarySelfSignature.PreferredCipherSuites)
- candidateCompression = intersectPreferences(candidateCompression, primarySelfSignature.PreferredCompression)
- }
-
- // In the event that the intersection of supported algorithms is empty we use the ones
- // labelled as MUST that every implementation supports.
- if len(candidateCiphers) == 0 {
- // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.3
- candidateCiphers = []uint8{uint8(packet.CipherAES128)}
- }
- if len(candidateHashes) == 0 {
- // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#hash-algos
- candidateHashes = []uint8{hashToHashId(crypto.SHA256)}
- }
- if len(candidateCipherSuites) == 0 {
- // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.6
- candidateCipherSuites = [][2]uint8{{uint8(packet.CipherAES128), uint8(packet.AEADModeOCB)}}
- }
-
- cipher := packet.CipherFunction(candidateCiphers[0])
- aeadCipherSuite := packet.CipherSuite{
- Cipher: packet.CipherFunction(candidateCipherSuites[0][0]),
- Mode: packet.AEADMode(candidateCipherSuites[0][1]),
- }
-
- // If the cipher specified by config is a candidate, we'll use that.
- configuredCipher := config.Cipher()
- for _, c := range candidateCiphers {
- cipherFunc := packet.CipherFunction(c)
- if cipherFunc == configuredCipher {
- cipher = cipherFunc
- break
- }
- }
-
- symKey := make([]byte, cipher.KeySize())
- if _, err := io.ReadFull(config.Random(), symKey); err != nil {
- return nil, err
- }
-
- for _, key := range encryptKeys {
- if err := packet.SerializeEncryptedKeyAEAD(keyWriter, key.PublicKey, cipher, aeadSupported, symKey, config); err != nil {
- return nil, err
- }
- }
-
- var payload io.WriteCloser
- payload, err = packet.SerializeSymmetricallyEncrypted(dataWriter, cipher, aeadSupported, aeadCipherSuite, symKey, config)
- if err != nil {
- return
- }
-
- payload, err = handleCompression(payload, candidateCompression, config)
- if err != nil {
- return nil, err
- }
-
- return writeAndSign(payload, candidateHashes, signed, hints, sigType, config)
-}
-
-// Sign signs a message. The resulting WriteCloser must be closed after the
-// contents of the file have been written. hints contains optional information
-// that aids the recipients in processing the message.
-// If config is nil, sensible defaults will be used.
-func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) {
- if signed == nil {
- return nil, errors.InvalidArgumentError("no signer provided")
- }
-
- // These are the possible hash functions that we'll use for the signature.
- candidateHashes := []uint8{
- hashToHashId(crypto.SHA256),
- hashToHashId(crypto.SHA384),
- hashToHashId(crypto.SHA512),
- hashToHashId(crypto.SHA3_256),
- hashToHashId(crypto.SHA3_512),
- }
- defaultHashes := candidateHashes[0:1]
- primarySelfSignature, _ := signed.PrimarySelfSignature()
- if primarySelfSignature == nil {
- return nil, errors.StructuralError("signed entity has no self-signature")
- }
- preferredHashes := primarySelfSignature.PreferredHash
- if len(preferredHashes) == 0 {
- preferredHashes = defaultHashes
- }
- candidateHashes = intersectPreferences(candidateHashes, preferredHashes)
- if len(candidateHashes) == 0 {
- return nil, errors.StructuralError("cannot sign because signing key shares no common algorithms with candidate hashes")
- }
-
- return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, packet.SigTypeBinary, config)
-}
-
-// signatureWriter hashes the contents of a message while passing it along to
-// literalData. When closed, it closes literalData, writes a signature packet
-// to encryptedData and then also closes encryptedData.
-type signatureWriter struct {
- encryptedData io.WriteCloser
- literalData io.WriteCloser
- hashType crypto.Hash
- wrappedHash hash.Hash
- h hash.Hash
- salt []byte // v6 only
- signer *packet.PrivateKey
- sigType packet.SignatureType
- config *packet.Config
- metadata *packet.LiteralData // V5 signatures protect document metadata
-}
-
-func (s signatureWriter) Write(data []byte) (int, error) {
- s.wrappedHash.Write(data)
- switch s.sigType {
- case packet.SigTypeBinary:
- return s.literalData.Write(data)
- case packet.SigTypeText:
- flag := 0
- return writeCanonical(s.literalData, data, &flag)
- }
- return 0, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(s.sigType)))
-}
-
-func (s signatureWriter) Close() error {
- sig := createSignaturePacket(&s.signer.PublicKey, s.sigType, s.config)
- sig.Hash = s.hashType
- sig.Metadata = s.metadata
-
- if err := sig.SetSalt(s.salt); err != nil {
- return err
- }
-
- if err := sig.Sign(s.h, s.signer, s.config); err != nil {
- return err
- }
- if err := s.literalData.Close(); err != nil {
- return err
- }
- if err := sig.Serialize(s.encryptedData); err != nil {
- return err
- }
- return s.encryptedData.Close()
-}
-
-func createSignaturePacket(signer *packet.PublicKey, sigType packet.SignatureType, config *packet.Config) *packet.Signature {
- sigLifetimeSecs := config.SigLifetime()
- return &packet.Signature{
- Version: signer.Version,
- SigType: sigType,
- PubKeyAlgo: signer.PubKeyAlgo,
- Hash: config.Hash(),
- CreationTime: config.Now(),
- IssuerKeyId: &signer.KeyId,
- IssuerFingerprint: signer.Fingerprint,
- Notations: config.Notations(),
- SigLifetimeSecs: &sigLifetimeSecs,
- }
-}
-
-// noOpCloser is like an ioutil.NopCloser, but for an io.Writer.
-// TODO: we have two of these in OpenPGP packages alone. This probably needs
-// to be promoted somewhere more common.
-type noOpCloser struct {
- w io.Writer
-}
-
-func (c noOpCloser) Write(data []byte) (n int, err error) {
- return c.w.Write(data)
-}
-
-func (c noOpCloser) Close() error {
- return nil
-}
-
-func handleCompression(compressed io.WriteCloser, candidateCompression []uint8, config *packet.Config) (data io.WriteCloser, err error) {
- data = compressed
- confAlgo := config.Compression()
- if confAlgo == packet.CompressionNone {
- return
- }
-
- // Set algorithm labelled as MUST as fallback
- // https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-9.4
- finalAlgo := packet.CompressionNone
- // if compression specified by config available we will use it
- for _, c := range candidateCompression {
- if uint8(confAlgo) == c {
- finalAlgo = confAlgo
- break
- }
- }
-
- if finalAlgo != packet.CompressionNone {
- var compConfig *packet.CompressionConfig
- if config != nil {
- compConfig = config.CompressionConfig
- }
- data, err = packet.SerializeCompressed(compressed, finalAlgo, compConfig)
- if err != nil {
- return
- }
- }
- return data, nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/x25519/x25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/x25519/x25519.go
deleted file mode 100644
index 38afcc74..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/x25519/x25519.go
+++ /dev/null
@@ -1,221 +0,0 @@
-package x25519
-
-import (
- "crypto/sha256"
- "crypto/subtle"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- x25519lib "github.com/cloudflare/circl/dh/x25519"
- "golang.org/x/crypto/hkdf"
-)
-
-const (
- hkdfInfo = "OpenPGP X25519"
- aes128KeySize = 16
- // The size of a public or private key in bytes.
- KeySize = x25519lib.Size
-)
-
-type PublicKey struct {
- // Point represents the encoded elliptic curve point of the public key.
- Point []byte
-}
-
-type PrivateKey struct {
- PublicKey
- // Secret represents the secret of the private key.
- Secret []byte
-}
-
-// NewPrivateKey creates a new empty private key including the public key.
-func NewPrivateKey(key PublicKey) *PrivateKey {
- return &PrivateKey{
- PublicKey: key,
- }
-}
-
-// Validate validates that the provided public key matches the private key.
-func Validate(pk *PrivateKey) (err error) {
- var expectedPublicKey, privateKey x25519lib.Key
- subtle.ConstantTimeCopy(1, privateKey[:], pk.Secret)
- x25519lib.KeyGen(&expectedPublicKey, &privateKey)
- if subtle.ConstantTimeCompare(expectedPublicKey[:], pk.PublicKey.Point) == 0 {
- return errors.KeyInvalidError("x25519: invalid key")
- }
- return nil
-}
-
-// GenerateKey generates a new x25519 key pair.
-func GenerateKey(rand io.Reader) (*PrivateKey, error) {
- var privateKey, publicKey x25519lib.Key
- privateKeyOut := new(PrivateKey)
- err := generateKey(rand, &privateKey, &publicKey)
- if err != nil {
- return nil, err
- }
- privateKeyOut.PublicKey.Point = publicKey[:]
- privateKeyOut.Secret = privateKey[:]
- return privateKeyOut, nil
-}
-
-func generateKey(rand io.Reader, privateKey *x25519lib.Key, publicKey *x25519lib.Key) error {
- maxRounds := 10
- isZero := true
- for round := 0; isZero; round++ {
- if round == maxRounds {
- return errors.InvalidArgumentError("x25519: zero keys only, randomness source might be corrupt")
- }
- _, err := io.ReadFull(rand, privateKey[:])
- if err != nil {
- return err
- }
- isZero = constantTimeIsZero(privateKey[:])
- }
- x25519lib.KeyGen(publicKey, privateKey)
- return nil
-}
-
-// Encrypt encrypts a sessionKey with x25519 according to
-// the OpenPGP crypto refresh specification section 5.1.6. The function assumes that the
-// sessionKey has the correct format and padding according to the specification.
-func Encrypt(rand io.Reader, publicKey *PublicKey, sessionKey []byte) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, err error) {
- var ephemeralPrivate, ephemeralPublic, staticPublic, shared x25519lib.Key
- // Check that the input static public key has 32 bytes
- if len(publicKey.Point) != KeySize {
- err = errors.KeyInvalidError("x25519: the public key has the wrong size")
- return
- }
- copy(staticPublic[:], publicKey.Point)
- // Generate ephemeral keyPair
- err = generateKey(rand, &ephemeralPrivate, &ephemeralPublic)
- if err != nil {
- return
- }
- // Compute shared key
- ok := x25519lib.Shared(&shared, &ephemeralPrivate, &staticPublic)
- if !ok {
- err = errors.KeyInvalidError("x25519: the public key is a low order point")
- return
- }
- // Derive the encryption key from the shared secret
- encryptionKey := applyHKDF(ephemeralPublic[:], publicKey.Point[:], shared[:])
- ephemeralPublicKey = &PublicKey{
- Point: ephemeralPublic[:],
- }
- // Encrypt the sessionKey with aes key wrapping
- encryptedSessionKey, err = keywrap.Wrap(encryptionKey, sessionKey)
- return
-}
-
-// Decrypt decrypts a session key stored in ciphertext with the provided x25519
-// private key and ephemeral public key.
-func Decrypt(privateKey *PrivateKey, ephemeralPublicKey *PublicKey, ciphertext []byte) (encodedSessionKey []byte, err error) {
- var ephemeralPublic, staticPrivate, shared x25519lib.Key
- // Check that the input ephemeral public key has 32 bytes
- if len(ephemeralPublicKey.Point) != KeySize {
- err = errors.KeyInvalidError("x25519: the public key has the wrong size")
- return
- }
- copy(ephemeralPublic[:], ephemeralPublicKey.Point)
- subtle.ConstantTimeCopy(1, staticPrivate[:], privateKey.Secret)
- // Compute shared key
- ok := x25519lib.Shared(&shared, &staticPrivate, &ephemeralPublic)
- if !ok {
- err = errors.KeyInvalidError("x25519: the ephemeral public key is a low order point")
- return
- }
- // Derive the encryption key from the shared secret
- encryptionKey := applyHKDF(ephemeralPublicKey.Point[:], privateKey.PublicKey.Point[:], shared[:])
- // Decrypt the session key with aes key wrapping
- encodedSessionKey, err = keywrap.Unwrap(encryptionKey, ciphertext)
- return
-}
-
-func applyHKDF(ephemeralPublicKey []byte, publicKey []byte, sharedSecret []byte) []byte {
- inputKey := make([]byte, 3*KeySize)
- // ephemeral public key | recipient public key | shared secret
- subtle.ConstantTimeCopy(1, inputKey[:KeySize], ephemeralPublicKey)
- subtle.ConstantTimeCopy(1, inputKey[KeySize:2*KeySize], publicKey)
- subtle.ConstantTimeCopy(1, inputKey[2*KeySize:], sharedSecret)
- hkdfReader := hkdf.New(sha256.New, inputKey, []byte{}, []byte(hkdfInfo))
- encryptionKey := make([]byte, aes128KeySize)
- _, _ = io.ReadFull(hkdfReader, encryptionKey)
- return encryptionKey
-}
-
-func constantTimeIsZero(bytes []byte) bool {
- isZero := byte(0)
- for _, b := range bytes {
- isZero |= b
- }
- return isZero == 0
-}
-
-// ENCODING/DECODING ciphertexts:
-
-// EncodeFieldsLength returns the length of the ciphertext encoding
-// given the encrypted session key.
-func EncodedFieldsLength(encryptedSessionKey []byte, v6 bool) int {
- lenCipherFunction := 0
- if !v6 {
- lenCipherFunction = 1
- }
- return KeySize + 1 + len(encryptedSessionKey) + lenCipherFunction
-}
-
-// EncodeField encodes x25519 session key encryption fields as
-// ephemeral x25519 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey
-// and writes it to writer.
-func EncodeFields(writer io.Writer, ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, v6 bool) (err error) {
- lenAlgorithm := 0
- if !v6 {
- lenAlgorithm = 1
- }
- if _, err = writer.Write(ephemeralPublicKey.Point); err != nil {
- return err
- }
- if _, err = writer.Write([]byte{byte(len(encryptedSessionKey) + lenAlgorithm)}); err != nil {
- return err
- }
- if !v6 {
- if _, err = writer.Write([]byte{cipherFunction}); err != nil {
- return err
- }
- }
- _, err = writer.Write(encryptedSessionKey)
- return err
-}
-
-// DecodeField decodes a x25519 session key encryption as
-// ephemeral x25519 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey.
-func DecodeFields(reader io.Reader, v6 bool) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, err error) {
- var buf [1]byte
- ephemeralPublicKey = &PublicKey{
- Point: make([]byte, KeySize),
- }
- // 32 octets representing an ephemeral x25519 public key.
- if _, err = io.ReadFull(reader, ephemeralPublicKey.Point); err != nil {
- return nil, nil, 0, err
- }
- // A one-octet size of the following fields.
- if _, err = io.ReadFull(reader, buf[:]); err != nil {
- return nil, nil, 0, err
- }
- followingLen := buf[0]
- // The one-octet algorithm identifier, if it was passed (in the case of a v3 PKESK packet).
- if !v6 {
- if _, err = io.ReadFull(reader, buf[:]); err != nil {
- return nil, nil, 0, err
- }
- cipherFunction = buf[0]
- followingLen -= 1
- }
- // The encrypted session key.
- encryptedSessionKey = make([]byte, followingLen)
- if _, err = io.ReadFull(reader, encryptedSessionKey); err != nil {
- return nil, nil, 0, err
- }
- return ephemeralPublicKey, encryptedSessionKey, cipherFunction, nil
-}
diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/x448/x448.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/x448/x448.go
deleted file mode 100644
index 65a082da..00000000
--- a/vendor/github.com/ProtonMail/go-crypto/openpgp/x448/x448.go
+++ /dev/null
@@ -1,229 +0,0 @@
-package x448
-
-import (
- "crypto/sha512"
- "crypto/subtle"
- "io"
-
- "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap"
- "github.com/ProtonMail/go-crypto/openpgp/errors"
- x448lib "github.com/cloudflare/circl/dh/x448"
- "golang.org/x/crypto/hkdf"
-)
-
-const (
- hkdfInfo = "OpenPGP X448"
- aes256KeySize = 32
- // The size of a public or private key in bytes.
- KeySize = x448lib.Size
-)
-
-type PublicKey struct {
- // Point represents the encoded elliptic curve point of the public key.
- Point []byte
-}
-
-type PrivateKey struct {
- PublicKey
- // Secret represents the secret of the private key.
- Secret []byte
-}
-
-// NewPrivateKey creates a new empty private key including the public key.
-func NewPrivateKey(key PublicKey) *PrivateKey {
- return &PrivateKey{
- PublicKey: key,
- }
-}
-
-// Validate validates that the provided public key matches
-// the private key.
-func Validate(pk *PrivateKey) (err error) {
- var expectedPublicKey, privateKey x448lib.Key
- subtle.ConstantTimeCopy(1, privateKey[:], pk.Secret)
- x448lib.KeyGen(&expectedPublicKey, &privateKey)
- if subtle.ConstantTimeCompare(expectedPublicKey[:], pk.PublicKey.Point) == 0 {
- return errors.KeyInvalidError("x448: invalid key")
- }
- return nil
-}
-
-// GenerateKey generates a new x448 key pair.
-func GenerateKey(rand io.Reader) (*PrivateKey, error) {
- var privateKey, publicKey x448lib.Key
- privateKeyOut := new(PrivateKey)
- err := generateKey(rand, &privateKey, &publicKey)
- if err != nil {
- return nil, err
- }
- privateKeyOut.PublicKey.Point = publicKey[:]
- privateKeyOut.Secret = privateKey[:]
- return privateKeyOut, nil
-}
-
-func generateKey(rand io.Reader, privateKey *x448lib.Key, publicKey *x448lib.Key) error {
- maxRounds := 10
- isZero := true
- for round := 0; isZero; round++ {
- if round == maxRounds {
- return errors.InvalidArgumentError("x448: zero keys only, randomness source might be corrupt")
- }
- _, err := io.ReadFull(rand, privateKey[:])
- if err != nil {
- return err
- }
- isZero = constantTimeIsZero(privateKey[:])
- }
- x448lib.KeyGen(publicKey, privateKey)
- return nil
-}
-
-// Encrypt encrypts a sessionKey with x448 according to
-// the OpenPGP crypto refresh specification section 5.1.7. The function assumes that the
-// sessionKey has the correct format and padding according to the specification.
-func Encrypt(rand io.Reader, publicKey *PublicKey, sessionKey []byte) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, err error) {
- var ephemeralPrivate, ephemeralPublic, staticPublic, shared x448lib.Key
- // Check that the input static public key has 56 bytes.
- if len(publicKey.Point) != KeySize {
- err = errors.KeyInvalidError("x448: the public key has the wrong size")
- return nil, nil, err
- }
- copy(staticPublic[:], publicKey.Point)
- // Generate ephemeral keyPair.
- if err = generateKey(rand, &ephemeralPrivate, &ephemeralPublic); err != nil {
- return nil, nil, err
- }
- // Compute shared key.
- ok := x448lib.Shared(&shared, &ephemeralPrivate, &staticPublic)
- if !ok {
- err = errors.KeyInvalidError("x448: the public key is a low order point")
- return nil, nil, err
- }
- // Derive the encryption key from the shared secret.
- encryptionKey := applyHKDF(ephemeralPublic[:], publicKey.Point[:], shared[:])
- ephemeralPublicKey = &PublicKey{
- Point: ephemeralPublic[:],
- }
- // Encrypt the sessionKey with aes key wrapping.
- encryptedSessionKey, err = keywrap.Wrap(encryptionKey, sessionKey)
- if err != nil {
- return nil, nil, err
- }
- return ephemeralPublicKey, encryptedSessionKey, nil
-}
-
-// Decrypt decrypts a session key stored in ciphertext with the provided x448
-// private key and ephemeral public key.
-func Decrypt(privateKey *PrivateKey, ephemeralPublicKey *PublicKey, ciphertext []byte) (encodedSessionKey []byte, err error) {
- var ephemeralPublic, staticPrivate, shared x448lib.Key
- // Check that the input ephemeral public key has 56 bytes.
- if len(ephemeralPublicKey.Point) != KeySize {
- err = errors.KeyInvalidError("x448: the public key has the wrong size")
- return nil, err
- }
- copy(ephemeralPublic[:], ephemeralPublicKey.Point)
- subtle.ConstantTimeCopy(1, staticPrivate[:], privateKey.Secret)
- // Compute shared key.
- ok := x448lib.Shared(&shared, &staticPrivate, &ephemeralPublic)
- if !ok {
- err = errors.KeyInvalidError("x448: the ephemeral public key is a low order point")
- return nil, err
- }
- // Derive the encryption key from the shared secret.
- encryptionKey := applyHKDF(ephemeralPublicKey.Point[:], privateKey.PublicKey.Point[:], shared[:])
- // Decrypt the session key with aes key wrapping.
- encodedSessionKey, err = keywrap.Unwrap(encryptionKey, ciphertext)
- if err != nil {
- return nil, err
- }
- return encodedSessionKey, nil
-}
-
-func applyHKDF(ephemeralPublicKey []byte, publicKey []byte, sharedSecret []byte) []byte {
- inputKey := make([]byte, 3*KeySize)
- // ephemeral public key | recipient public key | shared secret.
- subtle.ConstantTimeCopy(1, inputKey[:KeySize], ephemeralPublicKey)
- subtle.ConstantTimeCopy(1, inputKey[KeySize:2*KeySize], publicKey)
- subtle.ConstantTimeCopy(1, inputKey[2*KeySize:], sharedSecret)
- hkdfReader := hkdf.New(sha512.New, inputKey, []byte{}, []byte(hkdfInfo))
- encryptionKey := make([]byte, aes256KeySize)
- _, _ = io.ReadFull(hkdfReader, encryptionKey)
- return encryptionKey
-}
-
-func constantTimeIsZero(bytes []byte) bool {
- isZero := byte(0)
- for _, b := range bytes {
- isZero |= b
- }
- return isZero == 0
-}
-
-// ENCODING/DECODING ciphertexts:
-
-// EncodeFieldsLength returns the length of the ciphertext encoding
-// given the encrypted session key.
-func EncodedFieldsLength(encryptedSessionKey []byte, v6 bool) int {
- lenCipherFunction := 0
- if !v6 {
- lenCipherFunction = 1
- }
- return KeySize + 1 + len(encryptedSessionKey) + lenCipherFunction
-}
-
-// EncodeField encodes x448 session key encryption fields as
-// ephemeral x448 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey
-// and writes it to writer.
-func EncodeFields(writer io.Writer, ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, v6 bool) (err error) {
- lenAlgorithm := 0
- if !v6 {
- lenAlgorithm = 1
- }
- if _, err = writer.Write(ephemeralPublicKey.Point); err != nil {
- return err
- }
- if _, err = writer.Write([]byte{byte(len(encryptedSessionKey) + lenAlgorithm)}); err != nil {
- return err
- }
- if !v6 {
- if _, err = writer.Write([]byte{cipherFunction}); err != nil {
- return err
- }
- }
- if _, err = writer.Write(encryptedSessionKey); err != nil {
- return err
- }
- return nil
-}
-
-// DecodeField decodes a x448 session key encryption as
-// ephemeral x448 public key | follow byte length | cipherFunction (v3 only) | encryptedSessionKey.
-func DecodeFields(reader io.Reader, v6 bool) (ephemeralPublicKey *PublicKey, encryptedSessionKey []byte, cipherFunction byte, err error) {
- var buf [1]byte
- ephemeralPublicKey = &PublicKey{
- Point: make([]byte, KeySize),
- }
- // 56 octets representing an ephemeral x448 public key.
- if _, err = io.ReadFull(reader, ephemeralPublicKey.Point); err != nil {
- return nil, nil, 0, err
- }
- // A one-octet size of the following fields.
- if _, err = io.ReadFull(reader, buf[:]); err != nil {
- return nil, nil, 0, err
- }
- followingLen := buf[0]
- // The one-octet algorithm identifier, if it was passed (in the case of a v3 PKESK packet).
- if !v6 {
- if _, err = io.ReadFull(reader, buf[:]); err != nil {
- return nil, nil, 0, err
- }
- cipherFunction = buf[0]
- followingLen -= 1
- }
- // The encrypted session key.
- encryptedSessionKey = make([]byte, followingLen)
- if _, err = io.ReadFull(reader, encryptedSessionKey); err != nil {
- return nil, nil, 0, err
- }
- return ephemeralPublicKey, encryptedSessionKey, cipherFunction, nil
-}
diff --git a/vendor/github.com/agext/levenshtein/.gitignore b/vendor/github.com/agext/levenshtein/.gitignore
deleted file mode 100644
index 4473da19..00000000
--- a/vendor/github.com/agext/levenshtein/.gitignore
+++ /dev/null
@@ -1,53 +0,0 @@
-# Ignore docs files
-_gh_pages
-_site
-
-# Ignore temporary files
-README.html
-coverage.out
-.tmp
-
-# Numerous always-ignore extensions
-*.diff
-*.err
-*.log
-*.orig
-*.rej
-*.swo
-*.swp
-*.vi
-*.zip
-*~
-
-# OS or Editor folders
-._*
-.cache
-.DS_Store
-.idea
-.project
-.settings
-.tmproj
-*.esproj
-*.sublime-project
-*.sublime-workspace
-nbproject
-Thumbs.db
-
-# Komodo
-.komodotools
-*.komodoproject
-
-# SCSS-Lint
-scss-lint-report.xml
-
-# grunt-contrib-sass cache
-.sass-cache
-
-# Jekyll metadata
-docs/.jekyll-metadata
-
-# Folders to ignore
-.build
-.test
-bower_components
-node_modules
diff --git a/vendor/github.com/agext/levenshtein/.travis.yml b/vendor/github.com/agext/levenshtein/.travis.yml
deleted file mode 100644
index 68d38816..00000000
--- a/vendor/github.com/agext/levenshtein/.travis.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-language: go
-sudo: false
-matrix:
- fast_finish: true
- include:
- - go: 1.14.x
- env: TEST_METHOD=goveralls
- - go: 1.13.x
- - go: 1.12.x
- - go: 1.11.x
- - go: 1.10.x
- - go: tip
- - go: 1.9.x
- - go: 1.8.x
- - go: 1.7.x
- - go: 1.6.x
- - go: 1.5.x
- allow_failures:
- - go: tip
- - go: 1.11.x
- - go: 1.10.x
- - go: 1.9.x
- - go: 1.8.x
- - go: 1.7.x
- - go: 1.6.x
- - go: 1.5.x
-script: ./test.sh $TEST_METHOD
-notifications:
- email:
- on_success: never
diff --git a/vendor/github.com/agext/levenshtein/DCO b/vendor/github.com/agext/levenshtein/DCO
deleted file mode 100644
index 716561d5..00000000
--- a/vendor/github.com/agext/levenshtein/DCO
+++ /dev/null
@@ -1,36 +0,0 @@
-Developer Certificate of Origin
-Version 1.1
-
-Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
-660 York Street, Suite 102,
-San Francisco, CA 94110 USA
-
-Everyone is permitted to copy and distribute verbatim copies of this
-license document, but changing it is not allowed.
-
-
-Developer's Certificate of Origin 1.1
-
-By making a contribution to this project, I certify that:
-
-(a) The contribution was created in whole or in part by me and I
- have the right to submit it under the open source license
- indicated in the file; or
-
-(b) The contribution is based upon previous work that, to the best
- of my knowledge, is covered under an appropriate open source
- license and I have the right under that license to submit that
- work with modifications, whether created in whole or in part
- by me, under the same open source license (unless I am
- permitted to submit under a different license), as indicated
- in the file; or
-
-(c) The contribution was provided directly to me by some other
- person who certified (a), (b) or (c) and I have not modified
- it.
-
-(d) I understand and agree that this project and the contribution
- are public and that a record of the contribution (including all
- personal information I submit with it, including my sign-off) is
- maintained indefinitely and may be redistributed consistent with
- this project or the open source license(s) involved.
diff --git a/vendor/github.com/agext/levenshtein/LICENSE b/vendor/github.com/agext/levenshtein/LICENSE
deleted file mode 100644
index 261eeb9e..00000000
--- a/vendor/github.com/agext/levenshtein/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/agext/levenshtein/MAINTAINERS b/vendor/github.com/agext/levenshtein/MAINTAINERS
deleted file mode 100644
index 726c2afb..00000000
--- a/vendor/github.com/agext/levenshtein/MAINTAINERS
+++ /dev/null
@@ -1 +0,0 @@
-Alex Bucataru (@AlexBucataru)
diff --git a/vendor/github.com/agext/levenshtein/NOTICE b/vendor/github.com/agext/levenshtein/NOTICE
deleted file mode 100644
index eaffaab9..00000000
--- a/vendor/github.com/agext/levenshtein/NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-Alrux Go EXTensions (AGExt) - package levenshtein
-Copyright 2016 ALRUX Inc.
-
-This product includes software developed at ALRUX Inc.
-(http://www.alrux.com/).
diff --git a/vendor/github.com/agext/levenshtein/README.md b/vendor/github.com/agext/levenshtein/README.md
deleted file mode 100644
index d9a8ce16..00000000
--- a/vendor/github.com/agext/levenshtein/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
-# A Go package for calculating the Levenshtein distance between two strings
-
-[](https://github.com/agext/levenshtein/releases/latest)
-[](https://godoc.org/github.com/agext/levenshtein)
-[](https://travis-ci.org/agext/levenshtein)
-[](https://coveralls.io/github/agext/levenshtein)
-[](https://goreportcard.com/report/github.com/agext/levenshtein)
-
-
-This package implements distance and similarity metrics for strings, based on the Levenshtein measure, in [Go](http://golang.org).
-
-## Project Status
-
-v1.2.3 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis.
-
-This package is being actively maintained. If you encounter any problems or have any suggestions for improvement, please [open an issue](https://github.com/agext/levenshtein/issues). Pull requests are welcome.
-
-## Overview
-
-The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0.
-
-A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded.
-
-The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0.
-
-The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest.
-
-The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed.
-
-## Installation
-
-```
-go get github.com/agext/levenshtein
-```
-
-## License
-
-Package levenshtein is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
diff --git a/vendor/github.com/agext/levenshtein/levenshtein.go b/vendor/github.com/agext/levenshtein/levenshtein.go
deleted file mode 100644
index 56d719b8..00000000
--- a/vendor/github.com/agext/levenshtein/levenshtein.go
+++ /dev/null
@@ -1,290 +0,0 @@
-// Copyright 2016 ALRUX Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package levenshtein implements distance and similarity metrics for strings, based on the Levenshtein measure.
-
-The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0.
-
-A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded.
-
-The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0.
-
-The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest.
-
-The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed.
-*/
-package levenshtein
-
-// Calculate determines the Levenshtein distance between two strings, using
-// the given costs for each edit operation. It returns the distance along with
-// the lengths of the longest common prefix and suffix.
-//
-// If maxCost is non-zero, the calculation stops as soon as the distance is determined
-// to be greater than maxCost. Therefore, any return value higher than maxCost is a
-// lower bound for the actual distance.
-func Calculate(str1, str2 []rune, maxCost, insCost, subCost, delCost int) (dist, prefixLen, suffixLen int) {
- l1, l2 := len(str1), len(str2)
- // trim common prefix, if any, as it doesn't affect the distance
- for ; prefixLen < l1 && prefixLen < l2; prefixLen++ {
- if str1[prefixLen] != str2[prefixLen] {
- break
- }
- }
- str1, str2 = str1[prefixLen:], str2[prefixLen:]
- l1 -= prefixLen
- l2 -= prefixLen
- // trim common suffix, if any, as it doesn't affect the distance
- for 0 < l1 && 0 < l2 {
- if str1[l1-1] != str2[l2-1] {
- str1, str2 = str1[:l1], str2[:l2]
- break
- }
- l1--
- l2--
- suffixLen++
- }
- // if the first string is empty, the distance is the length of the second string times the cost of insertion
- if l1 == 0 {
- dist = l2 * insCost
- return
- }
- // if the second string is empty, the distance is the length of the first string times the cost of deletion
- if l2 == 0 {
- dist = l1 * delCost
- return
- }
-
- // variables used in inner "for" loops
- var y, dy, c, l int
-
- // if maxCost is greater than or equal to the maximum possible distance, it's equivalent to 'unlimited'
- if maxCost > 0 {
- if subCost < delCost+insCost {
- if maxCost >= l1*subCost+(l2-l1)*insCost {
- maxCost = 0
- }
- } else {
- if maxCost >= l1*delCost+l2*insCost {
- maxCost = 0
- }
- }
- }
-
- if maxCost > 0 {
- // prefer the longer string first, to minimize time;
- // a swap also transposes the meanings of insertion and deletion.
- if l1 < l2 {
- str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost
- }
-
- // the length differential times cost of deletion is a lower bound for the cost;
- // if it is higher than the maxCost, there is no point going into the main calculation.
- if dist = (l1 - l2) * delCost; dist > maxCost {
- return
- }
-
- d := make([]int, l1+1)
-
- // offset and length of d in the current row
- doff, dlen := 0, 1
- for y, dy = 1, delCost; y <= l1 && dy <= maxCost; dlen++ {
- d[y] = dy
- y++
- dy = y * delCost
- }
- // fmt.Printf("%q -> %q: init doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, doff, dlen, doff, doff+dlen, d[doff:doff+dlen])
-
- for x := 0; x < l2; x++ {
- dy, d[doff] = d[doff], d[doff]+insCost
- for doff < l1 && d[doff] > maxCost && dlen > 0 {
- if str1[doff] != str2[x] {
- dy += subCost
- }
- doff++
- dlen--
- if c = d[doff] + insCost; c < dy {
- dy = c
- }
- dy, d[doff] = d[doff], dy
- }
- for y, l = doff, doff+dlen-1; y < l; dy, d[y] = d[y], dy {
- if str1[y] != str2[x] {
- dy += subCost
- }
- if c = d[y] + delCost; c < dy {
- dy = c
- }
- y++
- if c = d[y] + insCost; c < dy {
- dy = c
- }
- }
- if y < l1 {
- if str1[y] != str2[x] {
- dy += subCost
- }
- if c = d[y] + delCost; c < dy {
- dy = c
- }
- for ; dy <= maxCost && y < l1; dy, d[y] = dy+delCost, dy {
- y++
- dlen++
- }
- }
- // fmt.Printf("%q -> %q: x=%d doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, x, doff, dlen, doff, doff+dlen, d[doff:doff+dlen])
- if dlen == 0 {
- dist = maxCost + 1
- return
- }
- }
- if doff+dlen-1 < l1 {
- dist = maxCost + 1
- return
- }
- dist = d[l1]
- } else {
- // ToDo: This is O(l1*l2) time and O(min(l1,l2)) space; investigate if it is
- // worth to implement diagonal approach - O(l1*(1+dist)) time, up to O(l1*l2) space
- // http://www.csse.monash.edu.au/~lloyd/tildeStrings/Alignment/92.IPL.html
-
- // prefer the shorter string first, to minimize space; time is O(l1*l2) anyway;
- // a swap also transposes the meanings of insertion and deletion.
- if l1 > l2 {
- str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost
- }
- d := make([]int, l1+1)
-
- for y = 1; y <= l1; y++ {
- d[y] = y * delCost
- }
- for x := 0; x < l2; x++ {
- dy, d[0] = d[0], d[0]+insCost
- for y = 0; y < l1; dy, d[y] = d[y], dy {
- if str1[y] != str2[x] {
- dy += subCost
- }
- if c = d[y] + delCost; c < dy {
- dy = c
- }
- y++
- if c = d[y] + insCost; c < dy {
- dy = c
- }
- }
- }
- dist = d[l1]
- }
-
- return
-}
-
-// Distance returns the Levenshtein distance between str1 and str2, using the
-// default or provided cost values. Pass nil for the third argument to use the
-// default cost of 1 for all three operations, with no maximum.
-func Distance(str1, str2 string, p *Params) int {
- if p == nil {
- p = defaultParams
- }
- dist, _, _ := Calculate([]rune(str1), []rune(str2), p.maxCost, p.insCost, p.subCost, p.delCost)
- return dist
-}
-
-// Similarity returns a score in the range of 0..1 for how similar the two strings are.
-// A score of 1 means the strings are identical, and 0 means they have nothing in common.
-//
-// A nil third argument uses the default cost of 1 for all three operations.
-//
-// If a non-zero MinScore value is provided in the parameters, scores lower than it
-// will be returned as 0.
-func Similarity(str1, str2 string, p *Params) float64 {
- return Match(str1, str2, p.Clone().BonusThreshold(1.1)) // guaranteed no bonus
-}
-
-// Match returns a similarity score adjusted by the same method as proposed by Winkler for
-// the Jaro distance - giving a bonus to string pairs that share a common prefix, only if their
-// similarity score is already over a threshold.
-//
-// The score is in the range of 0..1, with 1 meaning the strings are identical,
-// and 0 meaning they have nothing in common.
-//
-// A nil third argument uses the default cost of 1 for all three operations, maximum length of
-// common prefix to consider for bonus of 4, scaling factor of 0.1, and bonus threshold of 0.7.
-//
-// If a non-zero MinScore value is provided in the parameters, scores lower than it
-// will be returned as 0.
-func Match(str1, str2 string, p *Params) float64 {
- s1, s2 := []rune(str1), []rune(str2)
- l1, l2 := len(s1), len(s2)
- // two empty strings are identical; shortcut also avoids divByZero issues later on.
- if l1 == 0 && l2 == 0 {
- return 1
- }
-
- if p == nil {
- p = defaultParams
- }
-
- // a min over 1 can never be satisfied, so the score is 0.
- if p.minScore > 1 {
- return 0
- }
-
- insCost, delCost, maxDist, max := p.insCost, p.delCost, 0, 0
- if l1 > l2 {
- l1, l2, insCost, delCost = l2, l1, delCost, insCost
- }
-
- if p.subCost < delCost+insCost {
- maxDist = l1*p.subCost + (l2-l1)*insCost
- } else {
- maxDist = l1*delCost + l2*insCost
- }
-
- // a zero min is always satisfied, so no need to set a max cost.
- if p.minScore > 0 {
- // if p.minScore is lower than p.bonusThreshold, we can use a simplified formula
- // for the max cost, because a sim score below min cannot receive a bonus.
- if p.minScore < p.bonusThreshold {
- // round down the max - a cost equal to a rounded up max would already be under min.
- max = int((1 - p.minScore) * float64(maxDist))
- } else {
- // p.minScore <= sim + p.bonusPrefix*p.bonusScale*(1-sim)
- // p.minScore <= (1-dist/maxDist) + p.bonusPrefix*p.bonusScale*(1-(1-dist/maxDist))
- // p.minScore <= 1 - dist/maxDist + p.bonusPrefix*p.bonusScale*dist/maxDist
- // 1 - p.minScore >= dist/maxDist - p.bonusPrefix*p.bonusScale*dist/maxDist
- // (1-p.minScore)*maxDist/(1-p.bonusPrefix*p.bonusScale) >= dist
- max = int((1 - p.minScore) * float64(maxDist) / (1 - float64(p.bonusPrefix)*p.bonusScale))
- }
- }
-
- dist, pl, _ := Calculate(s1, s2, max, p.insCost, p.subCost, p.delCost)
- if max > 0 && dist > max {
- return 0
- }
- sim := 1 - float64(dist)/float64(maxDist)
-
- if sim >= p.bonusThreshold && sim < 1 && p.bonusPrefix > 0 && p.bonusScale > 0 {
- if pl > p.bonusPrefix {
- pl = p.bonusPrefix
- }
- sim += float64(pl) * p.bonusScale * (1 - sim)
- }
-
- if sim < p.minScore {
- return 0
- }
-
- return sim
-}
diff --git a/vendor/github.com/agext/levenshtein/params.go b/vendor/github.com/agext/levenshtein/params.go
deleted file mode 100644
index a85727b3..00000000
--- a/vendor/github.com/agext/levenshtein/params.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2016 ALRUX Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package levenshtein
-
-// Params represents a set of parameter values for the various formulas involved
-// in the calculation of the Levenshtein string metrics.
-type Params struct {
- insCost int
- subCost int
- delCost int
- maxCost int
- minScore float64
- bonusPrefix int
- bonusScale float64
- bonusThreshold float64
-}
-
-var (
- defaultParams = NewParams()
-)
-
-// NewParams creates a new set of parameters and initializes it with the default values.
-func NewParams() *Params {
- return &Params{
- insCost: 1,
- subCost: 1,
- delCost: 1,
- maxCost: 0,
- minScore: 0,
- bonusPrefix: 4,
- bonusScale: .1,
- bonusThreshold: .7,
- }
-}
-
-// Clone returns a pointer to a copy of the receiver parameter set, or of a new
-// default parameter set if the receiver is nil.
-func (p *Params) Clone() *Params {
- if p == nil {
- return NewParams()
- }
- return &Params{
- insCost: p.insCost,
- subCost: p.subCost,
- delCost: p.delCost,
- maxCost: p.maxCost,
- minScore: p.minScore,
- bonusPrefix: p.bonusPrefix,
- bonusScale: p.bonusScale,
- bonusThreshold: p.bonusThreshold,
- }
-}
-
-// InsCost overrides the default value of 1 for the cost of insertion.
-// The new value must be zero or positive.
-func (p *Params) InsCost(v int) *Params {
- if v >= 0 {
- p.insCost = v
- }
- return p
-}
-
-// SubCost overrides the default value of 1 for the cost of substitution.
-// The new value must be zero or positive.
-func (p *Params) SubCost(v int) *Params {
- if v >= 0 {
- p.subCost = v
- }
- return p
-}
-
-// DelCost overrides the default value of 1 for the cost of deletion.
-// The new value must be zero or positive.
-func (p *Params) DelCost(v int) *Params {
- if v >= 0 {
- p.delCost = v
- }
- return p
-}
-
-// MaxCost overrides the default value of 0 (meaning unlimited) for the maximum cost.
-// The calculation of Distance() stops when the result is guaranteed to exceed
-// this maximum, returning a lower-bound rather than exact value.
-// The new value must be zero or positive.
-func (p *Params) MaxCost(v int) *Params {
- if v >= 0 {
- p.maxCost = v
- }
- return p
-}
-
-// MinScore overrides the default value of 0 for the minimum similarity score.
-// Scores below this threshold are returned as 0 by Similarity() and Match().
-// The new value must be zero or positive. Note that a minimum greater than 1
-// can never be satisfied, resulting in a score of 0 for any pair of strings.
-func (p *Params) MinScore(v float64) *Params {
- if v >= 0 {
- p.minScore = v
- }
- return p
-}
-
-// BonusPrefix overrides the default value for the maximum length of
-// common prefix to be considered for bonus by Match().
-// The new value must be zero or positive.
-func (p *Params) BonusPrefix(v int) *Params {
- if v >= 0 {
- p.bonusPrefix = v
- }
- return p
-}
-
-// BonusScale overrides the default value for the scaling factor used by Match()
-// in calculating the bonus.
-// The new value must be zero or positive. To guarantee that the similarity score
-// remains in the interval 0..1, this scaling factor is not allowed to exceed
-// 1 / BonusPrefix.
-func (p *Params) BonusScale(v float64) *Params {
- if v >= 0 {
- p.bonusScale = v
- }
-
- // the bonus cannot exceed (1-sim), or the score may become greater than 1.
- if float64(p.bonusPrefix)*p.bonusScale > 1 {
- p.bonusScale = 1 / float64(p.bonusPrefix)
- }
-
- return p
-}
-
-// BonusThreshold overrides the default value for the minimum similarity score
-// for which Match() can assign a bonus.
-// The new value must be zero or positive. Note that a threshold greater than 1
-// effectively makes Match() become the equivalent of Similarity().
-func (p *Params) BonusThreshold(v float64) *Params {
- if v >= 0 {
- p.bonusThreshold = v
- }
- return p
-}
diff --git a/vendor/github.com/agext/levenshtein/test.sh b/vendor/github.com/agext/levenshtein/test.sh
deleted file mode 100644
index c5ed7246..00000000
--- a/vendor/github.com/agext/levenshtein/test.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-set -ev
-
-if [[ "$1" == "goveralls" ]]; then
- echo "Testing with goveralls..."
- go get github.com/mattn/goveralls
- $HOME/gopath/bin/goveralls -service=travis-ci
-else
- echo "Testing with go test..."
- go test -v ./...
-fi
diff --git a/vendor/github.com/apparentlymart/go-textseg/v15/LICENSE b/vendor/github.com/apparentlymart/go-textseg/v15/LICENSE
deleted file mode 100644
index 684b03b4..00000000
--- a/vendor/github.com/apparentlymart/go-textseg/v15/LICENSE
+++ /dev/null
@@ -1,95 +0,0 @@
-Copyright (c) 2017 Martin Atkins
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
----------
-
-Unicode table generation programs are under a separate copyright and license:
-
-Copyright (c) 2014 Couchbase, Inc.
-Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
-except in compliance with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software distributed under the
-License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
-either express or implied. See the License for the specific language governing permissions
-and limitations under the License.
-
----------
-
-Grapheme break data is provided as part of the Unicode character database,
-copright 2016 Unicode, Inc, which is provided with the following license:
-
-Unicode Data Files include all data files under the directories
-http://www.unicode.org/Public/, http://www.unicode.org/reports/,
-http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and
-http://www.unicode.org/utility/trac/browser/.
-
-Unicode Data Files do not include PDF online code charts under the
-directory http://www.unicode.org/Public/.
-
-Software includes any source code published in the Unicode Standard
-or under the directories
-http://www.unicode.org/Public/, http://www.unicode.org/reports/,
-http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and
-http://www.unicode.org/utility/trac/browser/.
-
-NOTICE TO USER: Carefully read the following legal agreement.
-BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S
-DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"),
-YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE
-TERMS AND CONDITIONS OF THIS AGREEMENT.
-IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE
-THE DATA FILES OR SOFTWARE.
-
-COPYRIGHT AND PERMISSION NOTICE
-
-Copyright © 1991-2017 Unicode, Inc. All rights reserved.
-Distributed under the Terms of Use in http://www.unicode.org/copyright.html.
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of the Unicode data files and any associated documentation
-(the "Data Files") or Unicode software and any associated documentation
-(the "Software") to deal in the Data Files or Software
-without restriction, including without limitation the rights to use,
-copy, modify, merge, publish, distribute, and/or sell copies of
-the Data Files or Software, and to permit persons to whom the Data Files
-or Software are furnished to do so, provided that either
-(a) this copyright and permission notice appear with all copies
-of the Data Files or Software, or
-(b) this copyright and permission notice appear in associated
-Documentation.
-
-THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
-ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
-WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT OF THIRD PARTY RIGHTS.
-IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
-NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
-DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
-DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
-TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-PERFORMANCE OF THE DATA FILES OR SOFTWARE.
-
-Except as contained in this notice, the name of a copyright holder
-shall not be used in advertising or otherwise to promote the sale,
-use or other dealings in these Data Files or Software without prior
-written authorization of the copyright holder.
diff --git a/vendor/github.com/apparentlymart/go-textseg/v15/textseg/all_tokens.go b/vendor/github.com/apparentlymart/go-textseg/v15/textseg/all_tokens.go
deleted file mode 100644
index 5752e9ef..00000000
--- a/vendor/github.com/apparentlymart/go-textseg/v15/textseg/all_tokens.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package textseg
-
-import (
- "bufio"
- "bytes"
-)
-
-// AllTokens is a utility that uses a bufio.SplitFunc to produce a slice of
-// all of the recognized tokens in the given buffer.
-func AllTokens(buf []byte, splitFunc bufio.SplitFunc) ([][]byte, error) {
- scanner := bufio.NewScanner(bytes.NewReader(buf))
- scanner.Split(splitFunc)
- var ret [][]byte
- for scanner.Scan() {
- ret = append(ret, scanner.Bytes())
- }
- return ret, scanner.Err()
-}
-
-// TokenCount is a utility that uses a bufio.SplitFunc to count the number of
-// recognized tokens in the given buffer.
-func TokenCount(buf []byte, splitFunc bufio.SplitFunc) (int, error) {
- scanner := bufio.NewScanner(bytes.NewReader(buf))
- scanner.Split(splitFunc)
- var ret int
- for scanner.Scan() {
- ret++
- }
- return ret, scanner.Err()
-}
diff --git a/vendor/github.com/apparentlymart/go-textseg/v15/textseg/emoji_table.rl b/vendor/github.com/apparentlymart/go-textseg/v15/textseg/emoji_table.rl
deleted file mode 100644
index 10b93e47..00000000
--- a/vendor/github.com/apparentlymart/go-textseg/v15/textseg/emoji_table.rl
+++ /dev/null
@@ -1,545 +0,0 @@
-# The following Ragel file was autogenerated with unicode2ragel.rb
-# from: https://www.unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt
-#
-# It defines ["Extended_Pictographic"].
-#
-# To use this, make sure that your alphtype is set to byte,
-# and that your input is in utf8.
-
-%%{
- machine Emoji;
-
- Extended_Pictographic =
- 0xC2 0xA9 #E0.6 [1] (©️) copyright
- | 0xC2 0xAE #E0.6 [1] (®️) registered
- | 0xE2 0x80 0xBC #E0.6 [1] (‼️) double exclamation mark
- | 0xE2 0x81 0x89 #E0.6 [1] (⁉️) exclamation question ...
- | 0xE2 0x84 0xA2 #E0.6 [1] (™️) trade mark
- | 0xE2 0x84 0xB9 #E0.6 [1] (ℹ️) information
- | 0xE2 0x86 0x94..0x99 #E0.6 [6] (↔️..↙️) left-right arrow..do...
- | 0xE2 0x86 0xA9..0xAA #E0.6 [2] (↩️..↪️) right arrow curving ...
- | 0xE2 0x8C 0x9A..0x9B #E0.6 [2] (⌚..⌛) watch..hourglass done
- | 0xE2 0x8C 0xA8 #E1.0 [1] (⌨️) keyboard
- | 0xE2 0x8E 0x88 #E0.0 [1] (⎈) HELM SYMBOL
- | 0xE2 0x8F 0x8F #E1.0 [1] (⏏️) eject button
- | 0xE2 0x8F 0xA9..0xAC #E0.6 [4] (⏩..⏬) fast-forward button..f...
- | 0xE2 0x8F 0xAD..0xAE #E0.7 [2] (⏭️..⏮️) next track button..l...
- | 0xE2 0x8F 0xAF #E1.0 [1] (⏯️) play or pause button
- | 0xE2 0x8F 0xB0 #E0.6 [1] (⏰) alarm clock
- | 0xE2 0x8F 0xB1..0xB2 #E1.0 [2] (⏱️..⏲️) stopwatch..timer clock
- | 0xE2 0x8F 0xB3 #E0.6 [1] (⏳) hourglass not done
- | 0xE2 0x8F 0xB8..0xBA #E0.7 [3] (⏸️..⏺️) pause button..record...
- | 0xE2 0x93 0x82 #E0.6 [1] (Ⓜ️) circled M
- | 0xE2 0x96 0xAA..0xAB #E0.6 [2] (▪️..▫️) black small square.....
- | 0xE2 0x96 0xB6 #E0.6 [1] (▶️) play button
- | 0xE2 0x97 0x80 #E0.6 [1] (◀️) reverse button
- | 0xE2 0x97 0xBB..0xBE #E0.6 [4] (◻️..◾) white medium square.....
- | 0xE2 0x98 0x80..0x81 #E0.6 [2] (☀️..☁️) sun..cloud
- | 0xE2 0x98 0x82..0x83 #E0.7 [2] (☂️..☃️) umbrella..snowman
- | 0xE2 0x98 0x84 #E1.0 [1] (☄️) comet
- | 0xE2 0x98 0x85 #E0.0 [1] (★) BLACK STAR
- | 0xE2 0x98 0x87..0x8D #E0.0 [7] (☇..☍) LIGHTNING..OPPOSITION
- | 0xE2 0x98 0x8E #E0.6 [1] (☎️) telephone
- | 0xE2 0x98 0x8F..0x90 #E0.0 [2] (☏..☐) WHITE TELEPHONE..BALLO...
- | 0xE2 0x98 0x91 #E0.6 [1] (☑️) check box with check
- | 0xE2 0x98 0x92 #E0.0 [1] (☒) BALLOT BOX WITH X
- | 0xE2 0x98 0x94..0x95 #E0.6 [2] (☔..☕) umbrella with rain dro...
- | 0xE2 0x98 0x96..0x97 #E0.0 [2] (☖..☗) WHITE SHOGI PIECE..BLA...
- | 0xE2 0x98 0x98 #E1.0 [1] (☘️) shamrock
- | 0xE2 0x98 0x99..0x9C #E0.0 [4] (☙..☜) REVERSED ROTATED FLORA...
- | 0xE2 0x98 0x9D #E0.6 [1] (☝️) index pointing up
- | 0xE2 0x98 0x9E..0x9F #E0.0 [2] (☞..☟) WHITE RIGHT POINTING I...
- | 0xE2 0x98 0xA0 #E1.0 [1] (☠️) skull and crossbones
- | 0xE2 0x98 0xA1 #E0.0 [1] (☡) CAUTION SIGN
- | 0xE2 0x98 0xA2..0xA3 #E1.0 [2] (☢️..☣️) radioactive..biohazard
- | 0xE2 0x98 0xA4..0xA5 #E0.0 [2] (☤..☥) CADUCEUS..ANKH
- | 0xE2 0x98 0xA6 #E1.0 [1] (☦️) orthodox cross
- | 0xE2 0x98 0xA7..0xA9 #E0.0 [3] (☧..☩) CHI RHO..CROSS OF JERU...
- | 0xE2 0x98 0xAA #E0.7 [1] (☪️) star and crescent
- | 0xE2 0x98 0xAB..0xAD #E0.0 [3] (☫..☭) FARSI SYMBOL..HAMMER A...
- | 0xE2 0x98 0xAE #E1.0 [1] (☮️) peace symbol
- | 0xE2 0x98 0xAF #E0.7 [1] (☯️) yin yang
- | 0xE2 0x98 0xB0..0xB7 #E0.0 [8] (☰..☷) TRIGRAM FOR HEAVEN..TR...
- | 0xE2 0x98 0xB8..0xB9 #E0.7 [2] (☸️..☹️) wheel of dharma..fro...
- | 0xE2 0x98 0xBA #E0.6 [1] (☺️) smiling face
- | 0xE2 0x98 0xBB..0xBF #E0.0 [5] (☻..☿) BLACK SMILING FACE..ME...
- | 0xE2 0x99 0x80 #E4.0 [1] (♀️) female sign
- | 0xE2 0x99 0x81 #E0.0 [1] (♁) EARTH
- | 0xE2 0x99 0x82 #E4.0 [1] (♂️) male sign
- | 0xE2 0x99 0x83..0x87 #E0.0 [5] (♃..♇) JUPITER..PLUTO
- | 0xE2 0x99 0x88..0x93 #E0.6 [12] (♈..♓) Aries..Pisces
- | 0xE2 0x99 0x94..0x9E #E0.0 [11] (♔..♞) WHITE CHESS KING..BLAC...
- | 0xE2 0x99 0x9F #E11.0 [1] (♟️) chess pawn
- | 0xE2 0x99 0xA0 #E0.6 [1] (♠️) spade suit
- | 0xE2 0x99 0xA1..0xA2 #E0.0 [2] (♡..♢) WHITE HEART SUIT..WHIT...
- | 0xE2 0x99 0xA3 #E0.6 [1] (♣️) club suit
- | 0xE2 0x99 0xA4 #E0.0 [1] (♤) WHITE SPADE SUIT
- | 0xE2 0x99 0xA5..0xA6 #E0.6 [2] (♥️..♦️) heart suit..diamond ...
- | 0xE2 0x99 0xA7 #E0.0 [1] (♧) WHITE CLUB SUIT
- | 0xE2 0x99 0xA8 #E0.6 [1] (♨️) hot springs
- | 0xE2 0x99 0xA9..0xBA #E0.0 [18] (♩..♺) QUARTER NOTE..RECYCLIN...
- | 0xE2 0x99 0xBB #E0.6 [1] (♻️) recycling symbol
- | 0xE2 0x99 0xBC..0xBD #E0.0 [2] (♼..♽) RECYCLED PAPER SYMBOL....
- | 0xE2 0x99 0xBE #E11.0 [1] (♾️) infinity
- | 0xE2 0x99 0xBF #E0.6 [1] (♿) wheelchair symbol
- | 0xE2 0x9A 0x80..0x85 #E0.0 [6] (⚀..⚅) DIE FACE-1..DIE FACE-6
- | 0xE2 0x9A 0x90..0x91 #E0.0 [2] (⚐..⚑) WHITE FLAG..BLACK FLAG
- | 0xE2 0x9A 0x92 #E1.0 [1] (⚒️) hammer and pick
- | 0xE2 0x9A 0x93 #E0.6 [1] (⚓) anchor
- | 0xE2 0x9A 0x94 #E1.0 [1] (⚔️) crossed swords
- | 0xE2 0x9A 0x95 #E4.0 [1] (⚕️) medical symbol
- | 0xE2 0x9A 0x96..0x97 #E1.0 [2] (⚖️..⚗️) balance scale..alembic
- | 0xE2 0x9A 0x98 #E0.0 [1] (⚘) FLOWER
- | 0xE2 0x9A 0x99 #E1.0 [1] (⚙️) gear
- | 0xE2 0x9A 0x9A #E0.0 [1] (⚚) STAFF OF HERMES
- | 0xE2 0x9A 0x9B..0x9C #E1.0 [2] (⚛️..⚜️) atom symbol..fleur-d...
- | 0xE2 0x9A 0x9D..0x9F #E0.0 [3] (⚝..⚟) OUTLINED WHITE STAR..T...
- | 0xE2 0x9A 0xA0..0xA1 #E0.6 [2] (⚠️..⚡) warning..high voltage
- | 0xE2 0x9A 0xA2..0xA6 #E0.0 [5] (⚢..⚦) DOUBLED FEMALE SIGN..M...
- | 0xE2 0x9A 0xA7 #E13.0 [1] (⚧️) transgender symbol
- | 0xE2 0x9A 0xA8..0xA9 #E0.0 [2] (⚨..⚩) VERTICAL MALE WITH STR...
- | 0xE2 0x9A 0xAA..0xAB #E0.6 [2] (⚪..⚫) white circle..black ci...
- | 0xE2 0x9A 0xAC..0xAF #E0.0 [4] (⚬..⚯) MEDIUM SMALL WHITE CIR...
- | 0xE2 0x9A 0xB0..0xB1 #E1.0 [2] (⚰️..⚱️) coffin..funeral urn
- | 0xE2 0x9A 0xB2..0xBC #E0.0 [11] (⚲..⚼) NEUTER..SESQUIQUADRATE
- | 0xE2 0x9A 0xBD..0xBE #E0.6 [2] (⚽..⚾) soccer ball..baseball
- | 0xE2 0x9A 0xBF..0xFF #E0.0 [5] (⚿..⛃) SQUARED KEY..BLACK DRA...
- | 0xE2 0x9B 0x00..0x83 #
- | 0xE2 0x9B 0x84..0x85 #E0.6 [2] (⛄..⛅) snowman without snow.....
- | 0xE2 0x9B 0x86..0x87 #E0.0 [2] (⛆..⛇) RAIN..BLACK SNOWMAN
- | 0xE2 0x9B 0x88 #E0.7 [1] (⛈️) cloud with lightning ...
- | 0xE2 0x9B 0x89..0x8D #E0.0 [5] (⛉..⛍) TURNED WHITE SHOGI PIE...
- | 0xE2 0x9B 0x8E #E0.6 [1] (⛎) Ophiuchus
- | 0xE2 0x9B 0x8F #E0.7 [1] (⛏️) pick
- | 0xE2 0x9B 0x90 #E0.0 [1] (⛐) CAR SLIDING
- | 0xE2 0x9B 0x91 #E0.7 [1] (⛑️) rescue worker’s helmet
- | 0xE2 0x9B 0x92 #E0.0 [1] (⛒) CIRCLED CROSSING LANES
- | 0xE2 0x9B 0x93 #E0.7 [1] (⛓️) chains
- | 0xE2 0x9B 0x94 #E0.6 [1] (⛔) no entry
- | 0xE2 0x9B 0x95..0xA8 #E0.0 [20] (⛕..⛨) ALTERNATE ONE-WAY LEFT...
- | 0xE2 0x9B 0xA9 #E0.7 [1] (⛩️) shinto shrine
- | 0xE2 0x9B 0xAA #E0.6 [1] (⛪) church
- | 0xE2 0x9B 0xAB..0xAF #E0.0 [5] (⛫..⛯) CASTLE..MAP SYMBOL FOR...
- | 0xE2 0x9B 0xB0..0xB1 #E0.7 [2] (⛰️..⛱️) mountain..umbrella o...
- | 0xE2 0x9B 0xB2..0xB3 #E0.6 [2] (⛲..⛳) fountain..flag in hole
- | 0xE2 0x9B 0xB4 #E0.7 [1] (⛴️) ferry
- | 0xE2 0x9B 0xB5 #E0.6 [1] (⛵) sailboat
- | 0xE2 0x9B 0xB6 #E0.0 [1] (⛶) SQUARE FOUR CORNERS
- | 0xE2 0x9B 0xB7..0xB9 #E0.7 [3] (⛷️..⛹️) skier..person bounci...
- | 0xE2 0x9B 0xBA #E0.6 [1] (⛺) tent
- | 0xE2 0x9B 0xBB..0xBC #E0.0 [2] (⛻..⛼) JAPANESE BANK SYMBOL.....
- | 0xE2 0x9B 0xBD #E0.6 [1] (⛽) fuel pump
- | 0xE2 0x9B 0xBE..0xFF #E0.0 [4] (⛾..✁) CUP ON BLACK SQUARE..U...
- | 0xE2 0x9C 0x00..0x81 #
- | 0xE2 0x9C 0x82 #E0.6 [1] (✂️) scissors
- | 0xE2 0x9C 0x83..0x84 #E0.0 [2] (✃..✄) LOWER BLADE SCISSORS.....
- | 0xE2 0x9C 0x85 #E0.6 [1] (✅) check mark button
- | 0xE2 0x9C 0x88..0x8C #E0.6 [5] (✈️..✌️) airplane..victory hand
- | 0xE2 0x9C 0x8D #E0.7 [1] (✍️) writing hand
- | 0xE2 0x9C 0x8E #E0.0 [1] (✎) LOWER RIGHT PENCIL
- | 0xE2 0x9C 0x8F #E0.6 [1] (✏️) pencil
- | 0xE2 0x9C 0x90..0x91 #E0.0 [2] (✐..✑) UPPER RIGHT PENCIL..WH...
- | 0xE2 0x9C 0x92 #E0.6 [1] (✒️) black nib
- | 0xE2 0x9C 0x94 #E0.6 [1] (✔️) check mark
- | 0xE2 0x9C 0x96 #E0.6 [1] (✖️) multiply
- | 0xE2 0x9C 0x9D #E0.7 [1] (✝️) latin cross
- | 0xE2 0x9C 0xA1 #E0.7 [1] (✡️) star of David
- | 0xE2 0x9C 0xA8 #E0.6 [1] (✨) sparkles
- | 0xE2 0x9C 0xB3..0xB4 #E0.6 [2] (✳️..✴️) eight-spoked asteris...
- | 0xE2 0x9D 0x84 #E0.6 [1] (❄️) snowflake
- | 0xE2 0x9D 0x87 #E0.6 [1] (❇️) sparkle
- | 0xE2 0x9D 0x8C #E0.6 [1] (❌) cross mark
- | 0xE2 0x9D 0x8E #E0.6 [1] (❎) cross mark button
- | 0xE2 0x9D 0x93..0x95 #E0.6 [3] (❓..❕) red question mark..whi...
- | 0xE2 0x9D 0x97 #E0.6 [1] (❗) red exclamation mark
- | 0xE2 0x9D 0xA3 #E1.0 [1] (❣️) heart exclamation
- | 0xE2 0x9D 0xA4 #E0.6 [1] (❤️) red heart
- | 0xE2 0x9D 0xA5..0xA7 #E0.0 [3] (❥..❧) ROTATED HEAVY BLACK HE...
- | 0xE2 0x9E 0x95..0x97 #E0.6 [3] (➕..➗) plus..divide
- | 0xE2 0x9E 0xA1 #E0.6 [1] (➡️) right arrow
- | 0xE2 0x9E 0xB0 #E0.6 [1] (➰) curly loop
- | 0xE2 0x9E 0xBF #E1.0 [1] (➿) double curly loop
- | 0xE2 0xA4 0xB4..0xB5 #E0.6 [2] (⤴️..⤵️) right arrow curving ...
- | 0xE2 0xAC 0x85..0x87 #E0.6 [3] (⬅️..⬇️) left arrow..down arrow
- | 0xE2 0xAC 0x9B..0x9C #E0.6 [2] (⬛..⬜) black large square..wh...
- | 0xE2 0xAD 0x90 #E0.6 [1] (⭐) star
- | 0xE2 0xAD 0x95 #E0.6 [1] (⭕) hollow red circle
- | 0xE3 0x80 0xB0 #E0.6 [1] (〰️) wavy dash
- | 0xE3 0x80 0xBD #E0.6 [1] (〽️) part alternation mark
- | 0xE3 0x8A 0x97 #E0.6 [1] (㊗️) Japanese “congratulat...
- | 0xE3 0x8A 0x99 #E0.6 [1] (㊙️) Japanese “secret” button
- | 0xF0 0x9F 0x80 0x80..0x83 #E0.0 [4] (🀀..🀃) MAHJONG TILE EAST W...
- | 0xF0 0x9F 0x80 0x84 #E0.6 [1] (🀄) mahjong red dragon
- | 0xF0 0x9F 0x80 0x85..0xFF #E0.0 [202] (🀅..🃎) MAHJONG TILE ...
- | 0xF0 0x9F 0x81..0x82 0x00..0xFF #
- | 0xF0 0x9F 0x83 0x00..0x8E #
- | 0xF0 0x9F 0x83 0x8F #E0.6 [1] (🃏) joker
- | 0xF0 0x9F 0x83 0x90..0xBF #E0.0 [48] (..) ..<...
- | 0xF0 0x9F 0x84 0x8D..0x8F #E0.0 [3] (🄍..🄏) CIRCLED ZERO WITH S...
- | 0xF0 0x9F 0x84 0xAF #E0.0 [1] (🄯) COPYLEFT SYMBOL
- | 0xF0 0x9F 0x85 0xAC..0xAF #E0.0 [4] (🅬..🅯) RAISED MR SIGN..CIR...
- | 0xF0 0x9F 0x85 0xB0..0xB1 #E0.6 [2] (🅰️..🅱️) A button (blood t...
- | 0xF0 0x9F 0x85 0xBE..0xBF #E0.6 [2] (🅾️..🅿️) O button (blood t...
- | 0xF0 0x9F 0x86 0x8E #E0.6 [1] (🆎) AB button (blood type)
- | 0xF0 0x9F 0x86 0x91..0x9A #E0.6 [10] (🆑..🆚) CL button..VS button
- | 0xF0 0x9F 0x86 0xAD..0xFF #E0.0 [57] (🆭..) MASK WORK SYMBOL..<...
- | 0xF0 0x9F 0x87 0x00..0xA5 #
- | 0xF0 0x9F 0x88 0x81..0x82 #E0.6 [2] (🈁..🈂️) Japanese “here” bu...
- | 0xF0 0x9F 0x88 0x83..0x8F #E0.0 [13] (..) ..<...
- | 0xF0 0x9F 0x88 0x9A #E0.6 [1] (🈚) Japanese “free of char...
- | 0xF0 0x9F 0x88 0xAF #E0.6 [1] (🈯) Japanese “reserved” bu...
- | 0xF0 0x9F 0x88 0xB2..0xBA #E0.6 [9] (🈲..🈺) Japanese “prohibite...
- | 0xF0 0x9F 0x88 0xBC..0xBF #E0.0 [4] (..) ..<...
- | 0xF0 0x9F 0x89 0x89..0x8F #E0.0 [7] (..) ..<...
- | 0xF0 0x9F 0x89 0x90..0x91 #E0.6 [2] (🉐..🉑) Japanese “bargain” ...
- | 0xF0 0x9F 0x89 0x92..0xFF #E0.0 [174] (..) ..<...
- | 0xF0 0x9F 0x9B 0x9C #E15.0 [1] (🛜) wireless
- | 0xF0 0x9F 0x9B 0x9D..0x9F #E14.0 [3] (🛝..🛟) playground slide..r...
- | 0xF0 0x9F 0x9B 0xA0..0xA5 #E0.7 [6] (🛠️..🛥️) hammer and wrench...
- | 0xF0 0x9F 0x9B 0xA6..0xA8 #E0.0 [3] (🛦..🛨) UP-POINTING MILITAR...
- | 0xF0 0x9F 0x9B 0xA9 #E0.7 [1] (🛩️) small airplane
- | 0xF0 0x9F 0x9B 0xAA #E0.0 [1] (🛪) NORTHEAST-POINTING AIR...
- | 0xF0 0x9F 0x9B 0xAB..0xAC #E1.0 [2] (🛫..🛬) airplane departure....
- | 0xF0 0x9F 0x9B 0xAD..0xAF #E0.0 [3] (..) ..<...
- | 0xF0 0x9F 0x9B 0xB0 #E0.7 [1] (🛰️) satellite
- | 0xF0 0x9F 0x9B 0xB1..0xB2 #E0.0 [2] (🛱..🛲) ONCOMING FIRE ENGIN...
- | 0xF0 0x9F 0x9B 0xB3 #E0.7 [1] (🛳️) passenger ship
- | 0xF0 0x9F 0x9B 0xB4..0xB6 #E3.0 [3] (🛴..🛶) kick scooter..canoe
- | 0xF0 0x9F 0x9B 0xB7..0xB8 #E5.0 [2] (🛷..🛸) sled..flying saucer
- | 0xF0 0x9F 0x9B 0xB9 #E11.0 [1] (🛹) skateboard
- | 0xF0 0x9F 0x9B 0xBA #E12.0 [1] (🛺) auto rickshaw
- | 0xF0 0x9F 0x9B 0xBB..0xBC #E13.0 [2] (🛻..🛼) pickup truck..rolle...
- | 0xF0 0x9F 0x9B 0xBD..0xBF #E0.0 [3] (..) ..<...
- | 0xF0 0x9F 0x9D 0xB4..0xBF #E0.0 [12] (🝴..🝿) LOT OF FORTUNE..ORCUS
- | 0xF0 0x9F 0x9F 0x95..0x9F #E0.0 [11] (🟕..) CIRCLED TRIANGLE..<...
- | 0xF0 0x9F 0x9F 0xA0..0xAB #E12.0 [12] (🟠..🟫) orange circle..brow...
- | 0xF0 0x9F 0x9F 0xAC..0xAF #E0.0 [4] (..) ..<...
- | 0xF0 0x9F 0x9F 0xB0 #E14.0 [1] (🟰) heavy equals sign
- | 0xF0 0x9F 0x9F 0xB1..0xBF #E0.0 [15] (..) ..<...
- | 0xF0 0x9F 0xA0 0x8C..0x8F #E0.0 [4] (..) ..<...
- | 0xF0 0x9F 0xA1 0x88..0x8F #E0.0 [8] (..) ..<...
- | 0xF0 0x9F 0xA1 0x9A..0x9F #E0.0 [6] (..) ..<...
- | 0xF0 0x9F 0xA2 0x88..0x8F #E0.0 [8] (..) ..<...
- | 0xF0 0x9F 0xA2 0xAE..0xFF #E0.0 [82] (..) ..<...
- | 0xF0 0x9F 0xA3 0x00..0xBF #
- | 0xF0 0x9F 0xA4 0x8C #E13.0 [1] (🤌) pinched fingers
- | 0xF0 0x9F 0xA4 0x8D..0x8F #E12.0 [3] (🤍..🤏) white heart..pinchi...
- | 0xF0 0x9F 0xA4 0x90..0x98 #E1.0 [9] (🤐..🤘) zipper-mouth face.....
- | 0xF0 0x9F 0xA4 0x99..0x9E #E3.0 [6] (🤙..🤞) call me hand..cross...
- | 0xF0 0x9F 0xA4 0x9F #E5.0 [1] (🤟) love-you gesture
- | 0xF0 0x9F 0xA4 0xA0..0xA7 #E3.0 [8] (🤠..🤧) cowboy hat face..sn...
- | 0xF0 0x9F 0xA4 0xA8..0xAF #E5.0 [8] (🤨..🤯) face with raised ey...
- | 0xF0 0x9F 0xA4 0xB0 #E3.0 [1] (🤰) pregnant woman
- | 0xF0 0x9F 0xA4 0xB1..0xB2 #E5.0 [2] (🤱..🤲) breast-feeding..pal...
- | 0xF0 0x9F 0xA4 0xB3..0xBA #E3.0 [8] (🤳..🤺) selfie..person fencing
- | 0xF0 0x9F 0xA4 0xBC..0xBE #E3.0 [3] (🤼..🤾) people wrestling..p...
- | 0xF0 0x9F 0xA4 0xBF #E12.0 [1] (🤿) diving mask
- | 0xF0 0x9F 0xA5 0x80..0x85 #E3.0 [6] (🥀..🥅) wilted flower..goal...
- | 0xF0 0x9F 0xA5 0x87..0x8B #E3.0 [5] (🥇..🥋) 1st place medal..ma...
- | 0xF0 0x9F 0xA5 0x8C #E5.0 [1] (🥌) curling stone
- | 0xF0 0x9F 0xA5 0x8D..0x8F #E11.0 [3] (🥍..🥏) lacrosse..flying disc
- | 0xF0 0x9F 0xA5 0x90..0x9E #E3.0 [15] (🥐..🥞) croissant..pancakes
- | 0xF0 0x9F 0xA5 0x9F..0xAB #E5.0 [13] (🥟..🥫) dumpling..canned food
- | 0xF0 0x9F 0xA5 0xAC..0xB0 #E11.0 [5] (🥬..🥰) leafy green..smilin...
- | 0xF0 0x9F 0xA5 0xB1 #E12.0 [1] (🥱) yawning face
- | 0xF0 0x9F 0xA5 0xB2 #E13.0 [1] (🥲) smiling face with tear
- | 0xF0 0x9F 0xA5 0xB3..0xB6 #E11.0 [4] (🥳..🥶) partying face..cold...
- | 0xF0 0x9F 0xA5 0xB7..0xB8 #E13.0 [2] (🥷..🥸) ninja..disguised face
- | 0xF0 0x9F 0xA5 0xB9 #E14.0 [1] (🥹) face holding back tears
- | 0xF0 0x9F 0xA5 0xBA #E11.0 [1] (🥺) pleading face
- | 0xF0 0x9F 0xA5 0xBB #E12.0 [1] (🥻) sari
- | 0xF0 0x9F 0xA5 0xBC..0xBF #E11.0 [4] (🥼..🥿) lab coat..flat shoe
- | 0xF0 0x9F 0xA6 0x80..0x84 #E1.0 [5] (🦀..🦄) crab..unicorn
- | 0xF0 0x9F 0xA6 0x85..0x91 #E3.0 [13] (🦅..🦑) eagle..squid
- | 0xF0 0x9F 0xA6 0x92..0x97 #E5.0 [6] (🦒..🦗) giraffe..cricket
- | 0xF0 0x9F 0xA6 0x98..0xA2 #E11.0 [11] (🦘..🦢) kangaroo..swan
- | 0xF0 0x9F 0xA6 0xA3..0xA4 #E13.0 [2] (🦣..🦤) mammoth..dodo
- | 0xF0 0x9F 0xA6 0xA5..0xAA #E12.0 [6] (🦥..🦪) sloth..oyster
- | 0xF0 0x9F 0xA6 0xAB..0xAD #E13.0 [3] (🦫..🦭) beaver..seal
- | 0xF0 0x9F 0xA6 0xAE..0xAF #E12.0 [2] (🦮..🦯) guide dog..white cane
- | 0xF0 0x9F 0xA6 0xB0..0xB9 #E11.0 [10] (🦰..🦹) red hair..supervillain
- | 0xF0 0x9F 0xA6 0xBA..0xBF #E12.0 [6] (🦺..🦿) safety vest..mechan...
- | 0xF0 0x9F 0xA7 0x80 #E1.0 [1] (🧀) cheese wedge
- | 0xF0 0x9F 0xA7 0x81..0x82 #E11.0 [2] (🧁..🧂) cupcake..salt
- | 0xF0 0x9F 0xA7 0x83..0x8A #E12.0 [8] (🧃..🧊) beverage box..ice
- | 0xF0 0x9F 0xA7 0x8B #E13.0 [1] (🧋) bubble tea
- | 0xF0 0x9F 0xA7 0x8C #E14.0 [1] (🧌) troll
- | 0xF0 0x9F 0xA7 0x8D..0x8F #E12.0 [3] (🧍..🧏) person standing..de...
- | 0xF0 0x9F 0xA7 0x90..0xA6 #E5.0 [23] (🧐..🧦) face with monocle.....
- | 0xF0 0x9F 0xA7 0xA7..0xBF #E11.0 [25] (🧧..🧿) red envelope..nazar...
- | 0xF0 0x9F 0xA8 0x80..0xFF #E0.0 [112] (🨀..) NEUTRAL CHESS KING....
- | 0xF0 0x9F 0xA9 0x00..0xAF #
- | 0xF0 0x9F 0xA9 0xB0..0xB3 #E12.0 [4] (🩰..🩳) ballet shoes..shorts
- | 0xF0 0x9F 0xA9 0xB4 #E13.0 [1] (🩴) thong sandal
- | 0xF0 0x9F 0xA9 0xB5..0xB7 #E15.0 [3] (🩵..🩷) light blue heart..p...
- | 0xF0 0x9F 0xA9 0xB8..0xBA #E12.0 [3] (🩸..🩺) drop of blood..stet...
- | 0xF0 0x9F 0xA9 0xBB..0xBC #E14.0 [2] (🩻..🩼) x-ray..crutch
- | 0xF0 0x9F 0xA9 0xBD..0xBF #E0.0 [3] (..) ..<...
- | 0xF0 0x9F 0xAA 0x80..0x82 #E12.0 [3] (🪀..🪂) yo-yo..parachute
- | 0xF0 0x9F 0xAA 0x83..0x86 #E13.0 [4] (🪃..🪆) boomerang..nesting ...
- | 0xF0 0x9F 0xAA 0x87..0x88 #E15.0 [2] (🪇..🪈) maracas..flute
- | 0xF0 0x9F 0xAA 0x89..0x8F #E0.0 [7] (..) ..<...
- | 0xF0 0x9F 0xAA 0x90..0x95 #E12.0 [6] (🪐..🪕) ringed planet..banjo
- | 0xF0 0x9F 0xAA 0x96..0xA8 #E13.0 [19] (🪖..🪨) military helmet..rock
- | 0xF0 0x9F 0xAA 0xA9..0xAC #E14.0 [4] (🪩..🪬) mirror ball..hamsa
- | 0xF0 0x9F 0xAA 0xAD..0xAF #E15.0 [3] (🪭..🪯) folding hand fan..k...
- | 0xF0 0x9F 0xAA 0xB0..0xB6 #E13.0 [7] (🪰..🪶) fly..feather
- | 0xF0 0x9F 0xAA 0xB7..0xBA #E14.0 [4] (🪷..🪺) lotus..nest with eggs
- | 0xF0 0x9F 0xAA 0xBB..0xBD #E15.0 [3] (🪻..🪽) hyacinth..wing
- | 0xF0 0x9F 0xAA 0xBE #E0.0 [1] ()
- | 0xF0 0x9F 0xAA 0xBF #E15.0 [1] (🪿) goose
- | 0xF0 0x9F 0xAB 0x80..0x82 #E13.0 [3] (🫀..🫂) anatomical heart..p...
- | 0xF0 0x9F 0xAB 0x83..0x85 #E14.0 [3] (🫃..🫅) pregnant man..perso...
- | 0xF0 0x9F 0xAB 0x86..0x8D #E0.0 [8] (..) ..<...
- | 0xF0 0x9F 0xAB 0x8E..0x8F #E15.0 [2] (🫎..🫏) moose..donkey
- | 0xF0 0x9F 0xAB 0x90..0x96 #E13.0 [7] (🫐..🫖) blueberries..teapot
- | 0xF0 0x9F 0xAB 0x97..0x99 #E14.0 [3] (🫗..🫙) pouring liquid..jar
- | 0xF0 0x9F 0xAB 0x9A..0x9B #E15.0 [2] (🫚..🫛) ginger root..pea pod
- | 0xF0 0x9F 0xAB 0x9C..0x9F #E0.0 [4] (..) ..<...
- | 0xF0 0x9F 0xAB 0xA0..0xA7 #E14.0 [8] (🫠..🫧) melting face..bubbles
- | 0xF0 0x9F 0xAB 0xA8 #E15.0 [1] (🫨) shaking face
- | 0xF0 0x9F 0xAB 0xA9..0xAF #E0.0 [7] (..) ..<...
- | 0xF0 0x9F 0xAB 0xB0..0xB6 #E14.0 [7] (🫰..🫶) hand with index fin...
- | 0xF0 0x9F 0xAB 0xB7..0xB8 #E15.0 [2] (🫷..🫸) leftwards pushing h...
- | 0xF0 0x9F 0xAB 0xB9..0xBF #E0.0 [7] (..) ..<...
- | 0xF0 0x9F 0xB0 0x80..0xFF #E0.0[1022] (..) 0; _nacts-- {
- _acts++
- switch _graphclust_actions[_acts-1] {
- case 4:
-//line NONE:1
- ts = p
-
-//line grapheme_clusters.go:4080
- }
- }
-
- _keys = int(_graphclust_key_offsets[cs])
- _trans = int(_graphclust_index_offsets[cs])
-
- _klen = int(_graphclust_single_lengths[cs])
- if _klen > 0 {
- _lower := int(_keys)
- var _mid int
- _upper := int(_keys + _klen - 1)
- for {
- if _upper < _lower {
- break
- }
-
- _mid = _lower + ((_upper - _lower) >> 1)
- switch {
- case data[p] < _graphclust_trans_keys[_mid]:
- _upper = _mid - 1
- case data[p] > _graphclust_trans_keys[_mid]:
- _lower = _mid + 1
- default:
- _trans += int(_mid - int(_keys))
- goto _match
- }
- }
- _keys += _klen
- _trans += _klen
- }
-
- _klen = int(_graphclust_range_lengths[cs])
- if _klen > 0 {
- _lower := int(_keys)
- var _mid int
- _upper := int(_keys + (_klen << 1) - 2)
- for {
- if _upper < _lower {
- break
- }
-
- _mid = _lower + (((_upper - _lower) >> 1) & ^1)
- switch {
- case data[p] < _graphclust_trans_keys[_mid]:
- _upper = _mid - 2
- case data[p] > _graphclust_trans_keys[_mid+1]:
- _lower = _mid + 2
- default:
- _trans += int((_mid - int(_keys)) >> 1)
- goto _match
- }
- }
- _trans += _klen
- }
-
- _match:
- _trans = int(_graphclust_indicies[_trans])
- _eof_trans:
- cs = int(_graphclust_trans_targs[_trans])
-
- if _graphclust_trans_actions[_trans] == 0 {
- goto _again
- }
-
- _acts = int(_graphclust_trans_actions[_trans])
- _nacts = uint(_graphclust_actions[_acts])
- _acts++
- for ; _nacts > 0; _nacts-- {
- _acts++
- switch _graphclust_actions[_acts-1] {
- case 0:
-//line grapheme_clusters.rl:47
-
- startPos = p
-
- case 1:
-//line grapheme_clusters.rl:51
-
- endPos = p
-
- case 5:
-//line NONE:1
- te = p + 1
-
- case 6:
-//line grapheme_clusters.rl:55
- act = 3
- case 7:
-//line grapheme_clusters.rl:55
- act = 4
- case 8:
-//line grapheme_clusters.rl:55
- act = 8
- case 9:
-//line grapheme_clusters.rl:55
- te = p + 1
- {
- return endPos + 1, data[startPos : endPos+1], nil
- }
- case 10:
-//line grapheme_clusters.rl:55
- te = p + 1
- {
- return endPos + 1, data[startPos : endPos+1], nil
- }
- case 11:
-//line grapheme_clusters.rl:55
- te = p
- p--
- {
- return endPos + 1, data[startPos : endPos+1], nil
- }
- case 12:
-//line grapheme_clusters.rl:55
- te = p
- p--
- {
- return endPos + 1, data[startPos : endPos+1], nil
- }
- case 13:
-//line grapheme_clusters.rl:55
- te = p
- p--
- {
- return endPos + 1, data[startPos : endPos+1], nil
- }
- case 14:
-//line grapheme_clusters.rl:55
- te = p
- p--
- {
- return endPos + 1, data[startPos : endPos+1], nil
- }
- case 15:
-//line grapheme_clusters.rl:55
- te = p
- p--
- {
- return endPos + 1, data[startPos : endPos+1], nil
- }
- case 16:
-//line grapheme_clusters.rl:55
- te = p
- p--
- {
- return endPos + 1, data[startPos : endPos+1], nil
- }
- case 17:
-//line grapheme_clusters.rl:55
- p = (te) - 1
- {
- return endPos + 1, data[startPos : endPos+1], nil
- }
- case 18:
-//line grapheme_clusters.rl:55
- p = (te) - 1
- {
- return endPos + 1, data[startPos : endPos+1], nil
- }
- case 19:
-//line grapheme_clusters.rl:55
- p = (te) - 1
- {
- return endPos + 1, data[startPos : endPos+1], nil
- }
- case 20:
-//line grapheme_clusters.rl:55
- p = (te) - 1
- {
- return endPos + 1, data[startPos : endPos+1], nil
- }
- case 21:
-//line grapheme_clusters.rl:55
- p = (te) - 1
- {
- return endPos + 1, data[startPos : endPos+1], nil
- }
- case 22:
-//line grapheme_clusters.rl:55
- p = (te) - 1
- {
- return endPos + 1, data[startPos : endPos+1], nil
- }
- case 23:
-//line NONE:1
- switch act {
- case 0:
- {
- cs = 0
- goto _again
- }
- case 3:
- {
- p = (te) - 1
-
- return endPos + 1, data[startPos : endPos+1], nil
- }
- case 4:
- {
- p = (te) - 1
-
- return endPos + 1, data[startPos : endPos+1], nil
- }
- case 8:
- {
- p = (te) - 1
-
- return endPos + 1, data[startPos : endPos+1], nil
- }
- }
-
-//line grapheme_clusters.go:4287
- }
- }
-
- _again:
- _acts = int(_graphclust_to_state_actions[cs])
- _nacts = uint(_graphclust_actions[_acts])
- _acts++
- for ; _nacts > 0; _nacts-- {
- _acts++
- switch _graphclust_actions[_acts-1] {
- case 2:
-//line NONE:1
- ts = 0
-
- case 3:
-//line NONE:1
- act = 0
-
-//line grapheme_clusters.go:4305
- }
- }
-
- if cs == 0 {
- goto _out
- }
- p++
- if p != pe {
- goto _resume
- }
- _test_eof:
- {
- }
- if p == eof {
- if _graphclust_eof_trans[cs] > 0 {
- _trans = int(_graphclust_eof_trans[cs] - 1)
- goto _eof_trans
- }
- }
-
- _out:
- {
- }
- }
-
-//line grapheme_clusters.rl:117
-
- // If we fall out here then we were unable to complete a sequence.
- // If we weren't able to complete a sequence then either we've
- // reached the end of a partial buffer (so there's more data to come)
- // or we have an isolated symbol that would normally be part of a
- // grapheme cluster but has appeared in isolation here.
-
- if !atEOF {
- // Request more
- return 0, nil, nil
- }
-
- // Just take the first UTF-8 sequence and return that.
- _, seqLen := utf8.DecodeRune(data)
- return seqLen, data[:seqLen], nil
-}
diff --git a/vendor/github.com/apparentlymart/go-textseg/v15/textseg/grapheme_clusters.rl b/vendor/github.com/apparentlymart/go-textseg/v15/textseg/grapheme_clusters.rl
deleted file mode 100644
index 737db18b..00000000
--- a/vendor/github.com/apparentlymart/go-textseg/v15/textseg/grapheme_clusters.rl
+++ /dev/null
@@ -1,133 +0,0 @@
-package textseg
-
-import (
- "errors"
- "unicode/utf8"
-)
-
-// Generated from grapheme_clusters.rl. DO NOT EDIT
-%%{
- # (except you are actually in grapheme_clusters.rl here, so edit away!)
-
- machine graphclust;
- write data;
-}%%
-
-var Error = errors.New("invalid UTF8 text")
-
-// ScanGraphemeClusters is a split function for bufio.Scanner that splits
-// on grapheme cluster boundaries.
-func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) {
- if len(data) == 0 {
- return 0, nil, nil
- }
-
- // Ragel state
- cs := 0 // Current State
- p := 0 // "Pointer" into data
- pe := len(data) // End-of-data "pointer"
- ts := 0
- te := 0
- act := 0
- eof := pe
-
- // Make Go compiler happy
- _ = ts
- _ = te
- _ = act
- _ = eof
-
- startPos := 0
- endPos := 0
-
- %%{
- include GraphemeCluster "grapheme_clusters_table.rl";
- include Emoji "emoji_table.rl";
-
- action start {
- startPos = p
- }
-
- action end {
- endPos = p
- }
-
- action emit {
- return endPos+1, data[startPos:endPos+1], nil
- }
-
- ZWJGlue = ZWJ (Extended_Pictographic Extend*)?;
- AnyExtender = Extend | ZWJGlue | SpacingMark;
- Extension = AnyExtender*;
- ReplacementChar = (0xEF 0xBF 0xBD);
-
- CRLFSeq = CR LF;
- ControlSeq = Control | ReplacementChar;
- HangulSeq = (
- L+ (((LV? V+ | LVT) T*)?|LV?) |
- LV V* T* |
- V+ T* |
- LVT T* |
- T+
- ) Extension;
- EmojiSeq = Extended_Pictographic Extend* Extension;
- ZWJSeq = ZWJ (ZWJ | Extend | SpacingMark)*;
- EmojiFlagSeq = Regional_Indicator Regional_Indicator? Extension;
-
- UTF8Cont = 0x80 .. 0xBF;
- AnyUTF8 = (
- 0x00..0x7F |
- 0xC0..0xDF . UTF8Cont |
- 0xE0..0xEF . UTF8Cont . UTF8Cont |
- 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont
- );
-
- # OtherSeq is any character that isn't at the start of one of the extended sequences above, followed by extension
- OtherSeq = (AnyUTF8 - (CR|LF|Control|ReplacementChar|L|LV|V|LVT|T|Extended_Pictographic|ZWJ|Regional_Indicator|Prepend)) (Extend | ZWJ | SpacingMark)*;
-
- # PrependSeq is prepend followed by any of the other patterns above, except control characters which explicitly break
- PrependSeq = Prepend+ (HangulSeq|EmojiSeq|ZWJSeq|EmojiFlagSeq|OtherSeq)?;
-
- CRLFTok = CRLFSeq >start @end;
- ControlTok = ControlSeq >start @end;
- HangulTok = HangulSeq >start @end;
- EmojiTok = EmojiSeq >start @end;
- ZWJTok = ZWJSeq >start @end;
- EmojiFlagTok = EmojiFlagSeq >start @end;
- OtherTok = OtherSeq >start @end;
- PrependTok = PrependSeq >start @end;
-
- main := |*
- CRLFTok => emit;
- ControlTok => emit;
- HangulTok => emit;
- EmojiTok => emit;
- ZWJTok => emit;
- EmojiFlagTok => emit;
- PrependTok => emit;
- OtherTok => emit;
-
- # any single valid UTF-8 character would also be valid per spec,
- # but we'll handle that separately after the loop so we can deal
- # with requesting more bytes if we're not at EOF.
- *|;
-
- write init;
- write exec;
- }%%
-
- // If we fall out here then we were unable to complete a sequence.
- // If we weren't able to complete a sequence then either we've
- // reached the end of a partial buffer (so there's more data to come)
- // or we have an isolated symbol that would normally be part of a
- // grapheme cluster but has appeared in isolation here.
-
- if !atEOF {
- // Request more
- return 0, nil, nil
- }
-
- // Just take the first UTF-8 sequence and return that.
- _, seqLen := utf8.DecodeRune(data)
- return seqLen, data[:seqLen], nil
-}
\ No newline at end of file
diff --git a/vendor/github.com/apparentlymart/go-textseg/v15/textseg/grapheme_clusters_table.rl b/vendor/github.com/apparentlymart/go-textseg/v15/textseg/grapheme_clusters_table.rl
deleted file mode 100644
index 3cff4291..00000000
--- a/vendor/github.com/apparentlymart/go-textseg/v15/textseg/grapheme_clusters_table.rl
+++ /dev/null
@@ -1,1637 +0,0 @@
-# The following Ragel file was autogenerated with unicode2ragel.rb
-# from: https://www.unicode.org/Public/15.0.0/ucd/auxiliary/GraphemeBreakProperty.txt
-#
-# It defines ["Prepend", "CR", "LF", "Control", "Extend", "Regional_Indicator", "SpacingMark", "L", "V", "T", "LV", "LVT", "ZWJ"].
-#
-# To use this, make sure that your alphtype is set to byte,
-# and that your input is in utf8.
-
-%%{
- machine GraphemeCluster;
-
- Prepend =
- 0xD8 0x80..0x85 #Cf [6] ARABIC NUMBER SIGN..ARABIC NUMBER ...
- | 0xDB 0x9D #Cf ARABIC END OF AYAH
- | 0xDC 0x8F #Cf SYRIAC ABBREVIATION MARK
- | 0xE0 0xA2 0x90..0x91 #Cf [2] ARABIC POUND MARK ABOVE..ARABIC PI...
- | 0xE0 0xA3 0xA2 #Cf ARABIC DISPUTED END OF AYAH
- | 0xE0 0xB5 0x8E #Lo MALAYALAM LETTER DOT REPH
- | 0xF0 0x91 0x82 0xBD #Cf KAITHI NUMBER SIGN
- | 0xF0 0x91 0x83 0x8D #Cf KAITHI NUMBER SIGN ABOVE
- | 0xF0 0x91 0x87 0x82..0x83 #Lo [2] SHARADA SIGN JIHVAMULIYA..SHARA...
- | 0xF0 0x91 0xA4 0xBF #Lo DIVES AKURU PREFIXED NASAL SIGN
- | 0xF0 0x91 0xA5 0x81 #Lo DIVES AKURU INITIAL RA
- | 0xF0 0x91 0xA8 0xBA #Lo ZANABAZAR SQUARE CLUSTER-INITIAL L...
- | 0xF0 0x91 0xAA 0x84..0x89 #Lo [6] SOYOMBO SIGN JIHVAMULIYA..SOYOM...
- | 0xF0 0x91 0xB5 0x86 #Lo MASARAM GONDI REPHA
- | 0xF0 0x91 0xBC 0x82 #Lo KAWI SIGN REPHA
- ;
-
- CR =
- 0x0D #Cc
- ;
-
- LF =
- 0x0A #Cc
- ;
-
- Control =
- 0x00..0x09 #Cc [10] ..
- | 0x0B..0x0C #Cc [2] ..
- | 0x0E..0x1F #Cc [18] ..
- | 0x7F #Cc [33] ..
- | 0xC2 0x80..0x9F #
- | 0xC2 0xAD #Cf SOFT HYPHEN
- | 0xD8 0x9C #Cf ARABIC LETTER MARK
- | 0xE1 0xA0 0x8E #Cf MONGOLIAN VOWEL SEPARATOR
- | 0xE2 0x80 0x8B #Cf ZERO WIDTH SPACE
- | 0xE2 0x80 0x8E..0x8F #Cf [2] LEFT-TO-RIGHT MARK..RIGHT-TO-LEFT ...
- | 0xE2 0x80 0xA8 #Zl LINE SEPARATOR
- | 0xE2 0x80 0xA9 #Zp PARAGRAPH SEPARATOR
- | 0xE2 0x80 0xAA..0xAE #Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-...
- | 0xE2 0x81 0xA0..0xA4 #Cf [5] WORD JOINER..INVISIBLE PLUS
- | 0xE2 0x81 0xA5 #Cn
- | 0xE2 0x81 0xA6..0xAF #Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIG...
- | 0xEF 0xBB 0xBF #Cf ZERO WIDTH NO-BREAK SPACE
- | 0xEF 0xBF 0xB0..0xB8 #Cn [9] ..
- | 0xEF 0xBF 0xB9..0xBB #Cf [3] INTERLINEAR ANNOTATION ANCHOR..INT...
- | 0xF0 0x93 0x90 0xB0..0xBF #Cf [16] EGYPTIAN HIEROGLYPH VERTICAL JO...
- | 0xF0 0x9B 0xB2 0xA0..0xA3 #Cf [4] SHORTHAND FORMAT LETTER OVERLAP...
- | 0xF0 0x9D 0x85 0xB3..0xBA #Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSI...
- | 0xF3 0xA0 0x80 0x80 #Cn
- | 0xF3 0xA0 0x80 0x81 #Cf LANGUAGE TAG
- | 0xF3 0xA0 0x80 0x82..0x9F #Cn [30] ..
- | 0xF3 0xA0 0x82 0x80..0xFF #Cn [128] ..
- | 0xF3 0xA0 0x83 0x00..0xBF #
- | 0xF3 0xA0 0x87 0xB0..0xFF #Cn [3600] ..
-#
-# This script uses the unicode spec to generate a Ragel state machine
-# that recognizes unicode alphanumeric characters. It generates 5
-# character classes: uupper, ulower, ualpha, udigit, and ualnum.
-# Currently supported encodings are UTF-8 [default] and UCS-4.
-#
-# Usage: unicode2ragel.rb [options]
-# -e, --encoding [ucs4 | utf8] Data encoding
-# -h, --help Show this message
-#
-# This script was originally written as part of the Ferret search
-# engine library.
-#
-# Author: Rakan El-Khalil
-
-require 'optparse'
-require 'open-uri'
-
-ENCODINGS = [ :utf8, :ucs4 ]
-ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" }
-DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt"
-DEFAULT_MACHINE_NAME= "WChar"
-
-###
-# Display vars & default option
-
-TOTAL_WIDTH = 80
-RANGE_WIDTH = 23
-@encoding = :utf8
-@chart_url = DEFAULT_CHART_URL
-machine_name = DEFAULT_MACHINE_NAME
-properties = []
-@output = $stdout
-
-###
-# Option parsing
-
-cli_opts = OptionParser.new do |opts|
- opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o|
- @encoding = o.downcase.to_sym
- end
- opts.on("-h", "--help", "Show this message") do
- puts opts
- exit
- end
- opts.on("-u", "--url URL", "URL to process") do |o|
- @chart_url = o
- end
- opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o|
- machine_name = o
- end
- opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o|
- properties = o
- end
- opts.on("-o", "--output FILE", "output file") do |o|
- @output = File.new(o, "w+")
- end
-end
-
-cli_opts.parse(ARGV)
-unless ENCODINGS.member? @encoding
- puts "Invalid encoding: #{@encoding}"
- puts cli_opts
- exit
-end
-
-##
-# Downloads the document at url and yields every alpha line's hex
-# range and description.
-
-def each_alpha( url, property )
- URI.open( url ) do |file|
- file.each_line do |line|
- next if line =~ /^#/;
- next if line !~ /; #{property} *#/;
-
- range, description = line.split(/;/)
- range.strip!
- description.gsub!(/.*#/, '').strip!
-
- if range =~ /\.\./
- start, stop = range.split '..'
- else start = stop = range
- end
-
- yield start.hex .. stop.hex, description
- end
- end
-end
-
-###
-# Formats to hex at minimum width
-
-def to_hex( n )
- r = "%0X" % n
- r = "0#{r}" unless (r.length % 2).zero?
- r
-end
-
-###
-# UCS4 is just a straight hex conversion of the unicode codepoint.
-
-def to_ucs4( range )
- rangestr = "0x" + to_hex(range.begin)
- rangestr << "..0x" + to_hex(range.end) if range.begin != range.end
- [ rangestr ]
-end
-
-##
-# 0x00 - 0x7f -> 0zzzzzzz[7]
-# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6]
-# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6]
-# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6]
-
-UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff]
-
-def to_utf8_enc( n )
- r = 0
- if n <= 0x7f
- r = n
- elsif n <= 0x7ff
- y = 0xc0 | (n >> 6)
- z = 0x80 | (n & 0x3f)
- r = y << 8 | z
- elsif n <= 0xffff
- x = 0xe0 | (n >> 12)
- y = 0x80 | (n >> 6) & 0x3f
- z = 0x80 | n & 0x3f
- r = x << 16 | y << 8 | z
- elsif n <= 0x10ffff
- w = 0xf0 | (n >> 18)
- x = 0x80 | (n >> 12) & 0x3f
- y = 0x80 | (n >> 6) & 0x3f
- z = 0x80 | n & 0x3f
- r = w << 24 | x << 16 | y << 8 | z
- end
-
- to_hex(r)
-end
-
-def from_utf8_enc( n )
- n = n.hex
- r = 0
- if n <= 0x7f
- r = n
- elsif n <= 0xdfff
- y = (n >> 8) & 0x1f
- z = n & 0x3f
- r = y << 6 | z
- elsif n <= 0xefffff
- x = (n >> 16) & 0x0f
- y = (n >> 8) & 0x3f
- z = n & 0x3f
- r = x << 10 | y << 6 | z
- elsif n <= 0xf7ffffff
- w = (n >> 24) & 0x07
- x = (n >> 16) & 0x3f
- y = (n >> 8) & 0x3f
- z = n & 0x3f
- r = w << 18 | x << 12 | y << 6 | z
- end
- r
-end
-
-###
-# Given a range, splits it up into ranges that can be continuously
-# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff]
-# This is not strictly needed since the current [5.1] unicode standard
-# doesn't have ranges that straddle utf8 boundaries. This is included
-# for completeness as there is no telling if that will ever change.
-
-def utf8_ranges( range )
- ranges = []
- UTF8_BOUNDARIES.each do |max|
- if range.begin <= max
- if range.end <= max
- ranges << range
- return ranges
- end
-
- ranges << (range.begin .. max)
- range = (max + 1) .. range.end
- end
- end
- ranges
-end
-
-def build_range( start, stop )
- size = start.size/2
- left = size - 1
- return [""] if size < 1
-
- a = start[0..1]
- b = stop[0..1]
-
- ###
- # Shared prefix
-
- if a == b
- return build_range(start[2..-1], stop[2..-1]).map do |elt|
- "0x#{a} " + elt
- end
- end
-
- ###
- # Unshared prefix, end of run
-
- return ["0x#{a}..0x#{b} "] if left.zero?
-
- ###
- # Unshared prefix, not end of run
- # Range can be 0x123456..0x56789A
- # Which is equivalent to:
- # 0x123456 .. 0x12FFFF
- # 0x130000 .. 0x55FFFF
- # 0x560000 .. 0x56789A
-
- ret = []
- ret << build_range(start, a + "FF" * left)
-
- ###
- # Only generate middle range if need be.
-
- if a.hex+1 != b.hex
- max = to_hex(b.hex - 1)
- max = "FF" if b == "FF"
- ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left
- end
-
- ###
- # Don't generate last range if it is covered by first range
-
- ret << build_range(b + "00" * left, stop) unless b == "FF"
- ret.flatten!
-end
-
-def to_utf8( range )
- utf8_ranges( range ).map do |r|
- begin_enc = to_utf8_enc(r.begin)
- end_enc = to_utf8_enc(r.end)
- build_range begin_enc, end_enc
- end.flatten!
-end
-
-##
-# Perform a 3-way comparison of the number of codepoints advertised by
-# the unicode spec for the given range, the originally parsed range,
-# and the resulting utf8 encoded range.
-
-def count_codepoints( code )
- code.split(' ').inject(1) do |acc, elt|
- if elt =~ /0x(.+)\.\.0x(.+)/
- if @encoding == :utf8
- acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1)
- else
- acc * ($2.hex - $1.hex + 1)
- end
- else
- acc
- end
- end
-end
-
-def is_valid?( range, desc, codes )
- spec_count = 1
- spec_count = $1.to_i if desc =~ /\[(\d+)\]/
- range_count = range.end - range.begin + 1
-
- sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) }
- sum == spec_count and sum == range_count
-end
-
-##
-# Generate the state maching to stdout
-
-def generate_machine( name, property )
- pipe = " "
- @output.puts " #{name} = "
- each_alpha( @chart_url, property ) do |range, desc|
-
- codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range)
-
- #raise "Invalid encoding of range #{range}: #{codes.inspect}" unless
- # is_valid? range, desc, codes
-
- range_width = codes.map { |a| a.size }.max
- range_width = RANGE_WIDTH if range_width < RANGE_WIDTH
-
- desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11
- desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH
-
- if desc.size > desc_width
- desc = desc[0..desc_width - 4] + "..."
- end
-
- codes.each_with_index do |r, idx|
- desc = "" unless idx.zero?
- code = "%-#{range_width}s" % r
- @output.puts " #{pipe} #{code} ##{desc}"
- pipe = "|"
- end
- end
- @output.puts " ;"
- @output.puts ""
-end
-
-@output.puts <> uint(j)) & 1)
- copy(w[0][:], tableGenerator[s*Size:(s+1)*Size])
- diffAdd(&w, swap^bit)
- swap = bit
- }
- for s := 0; s < h; s++ {
- double(&w[1], &w[2])
- }
- toAffine((*[fp.Size]byte)(k), &w[1], &w[2])
-}
-
-// ladderMontgomery calculates a generic scalar point multiplication
-// The algorithm implemented is the left-to-right Montgomery's ladder.
-func ladderMontgomery(k, xP *Key) {
- w := [5]fp.Elt{} // [x1, x2, z2, x3, z3] order must be preserved.
- w[0] = *(*fp.Elt)(xP) // x1 = xP
- fp.SetOne(&w[1]) // x2 = 1
- w[3] = *(*fp.Elt)(xP) // x3 = xP
- fp.SetOne(&w[4]) // z3 = 1
-
- move := uint(0)
- for s := 255 - 1; s >= 0; s-- {
- i := s / 8
- j := s % 8
- bit := uint((k[i] >> uint(j)) & 1)
- ladderStep(&w, move^bit)
- move = bit
- }
- toAffine((*[fp.Size]byte)(k), &w[1], &w[2])
-}
-
-func toAffine(k *[fp.Size]byte, x, z *fp.Elt) {
- fp.Inv(z, z)
- fp.Mul(x, x, z)
- _ = fp.ToBytes(k[:], x)
-}
-
-var lowOrderPoints = [5]fp.Elt{
- { /* (0,_,1) point of order 2 on Curve25519 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- },
- { /* (1,_,1) point of order 4 on Curve25519 */
- 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- },
- { /* (x,_,1) first point of order 8 on Curve25519 */
- 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae,
- 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a,
- 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd,
- 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x00,
- },
- { /* (x,_,1) second point of order 8 on Curve25519 */
- 0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24,
- 0xb1, 0xd0, 0xb1, 0x55, 0x9c, 0x83, 0xef, 0x5b,
- 0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c, 0x8e, 0x86,
- 0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0x57,
- },
- { /* (-1,_,1) a point of order 4 on the twist of Curve25519 */
- 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f,
- },
-}
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go
deleted file mode 100644
index 8a3d54c5..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.go
+++ /dev/null
@@ -1,30 +0,0 @@
-//go:build amd64 && !purego
-// +build amd64,!purego
-
-package x25519
-
-import (
- fp "github.com/cloudflare/circl/math/fp25519"
- "golang.org/x/sys/cpu"
-)
-
-var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX
-
-var _ = hasBmi2Adx
-
-func double(x, z *fp.Elt) { doubleAmd64(x, z) }
-func diffAdd(w *[5]fp.Elt, b uint) { diffAddAmd64(w, b) }
-func ladderStep(w *[5]fp.Elt, b uint) { ladderStepAmd64(w, b) }
-func mulA24(z, x *fp.Elt) { mulA24Amd64(z, x) }
-
-//go:noescape
-func ladderStepAmd64(w *[5]fp.Elt, b uint)
-
-//go:noescape
-func diffAddAmd64(w *[5]fp.Elt, b uint)
-
-//go:noescape
-func doubleAmd64(x, z *fp.Elt)
-
-//go:noescape
-func mulA24Amd64(z, x *fp.Elt)
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h
deleted file mode 100644
index 8c1ae4d0..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.h
+++ /dev/null
@@ -1,111 +0,0 @@
-#define ladderStepLeg \
- addSub(x2,z2) \
- addSub(x3,z3) \
- integerMulLeg(b0,x2,z3) \
- integerMulLeg(b1,x3,z2) \
- reduceFromDoubleLeg(t0,b0) \
- reduceFromDoubleLeg(t1,b1) \
- addSub(t0,t1) \
- cselect(x2,x3,regMove) \
- cselect(z2,z3,regMove) \
- integerSqrLeg(b0,t0) \
- integerSqrLeg(b1,t1) \
- reduceFromDoubleLeg(x3,b0) \
- reduceFromDoubleLeg(z3,b1) \
- integerMulLeg(b0,x1,z3) \
- reduceFromDoubleLeg(z3,b0) \
- integerSqrLeg(b0,x2) \
- integerSqrLeg(b1,z2) \
- reduceFromDoubleLeg(x2,b0) \
- reduceFromDoubleLeg(z2,b1) \
- subtraction(t0,x2,z2) \
- multiplyA24Leg(t1,t0) \
- additionLeg(t1,t1,z2) \
- integerMulLeg(b0,x2,z2) \
- integerMulLeg(b1,t0,t1) \
- reduceFromDoubleLeg(x2,b0) \
- reduceFromDoubleLeg(z2,b1)
-
-#define ladderStepBmi2Adx \
- addSub(x2,z2) \
- addSub(x3,z3) \
- integerMulAdx(b0,x2,z3) \
- integerMulAdx(b1,x3,z2) \
- reduceFromDoubleAdx(t0,b0) \
- reduceFromDoubleAdx(t1,b1) \
- addSub(t0,t1) \
- cselect(x2,x3,regMove) \
- cselect(z2,z3,regMove) \
- integerSqrAdx(b0,t0) \
- integerSqrAdx(b1,t1) \
- reduceFromDoubleAdx(x3,b0) \
- reduceFromDoubleAdx(z3,b1) \
- integerMulAdx(b0,x1,z3) \
- reduceFromDoubleAdx(z3,b0) \
- integerSqrAdx(b0,x2) \
- integerSqrAdx(b1,z2) \
- reduceFromDoubleAdx(x2,b0) \
- reduceFromDoubleAdx(z2,b1) \
- subtraction(t0,x2,z2) \
- multiplyA24Adx(t1,t0) \
- additionAdx(t1,t1,z2) \
- integerMulAdx(b0,x2,z2) \
- integerMulAdx(b1,t0,t1) \
- reduceFromDoubleAdx(x2,b0) \
- reduceFromDoubleAdx(z2,b1)
-
-#define difAddLeg \
- addSub(x1,z1) \
- integerMulLeg(b0,z1,ui) \
- reduceFromDoubleLeg(z1,b0) \
- addSub(x1,z1) \
- integerSqrLeg(b0,x1) \
- integerSqrLeg(b1,z1) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1) \
- integerMulLeg(b0,x1,z2) \
- integerMulLeg(b1,z1,x2) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1)
-
-#define difAddBmi2Adx \
- addSub(x1,z1) \
- integerMulAdx(b0,z1,ui) \
- reduceFromDoubleAdx(z1,b0) \
- addSub(x1,z1) \
- integerSqrAdx(b0,x1) \
- integerSqrAdx(b1,z1) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1) \
- integerMulAdx(b0,x1,z2) \
- integerMulAdx(b1,z1,x2) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1)
-
-#define doubleLeg \
- addSub(x1,z1) \
- integerSqrLeg(b0,x1) \
- integerSqrLeg(b1,z1) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1) \
- subtraction(t0,x1,z1) \
- multiplyA24Leg(t1,t0) \
- additionLeg(t1,t1,z1) \
- integerMulLeg(b0,x1,z1) \
- integerMulLeg(b1,t0,t1) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1)
-
-#define doubleBmi2Adx \
- addSub(x1,z1) \
- integerSqrAdx(b0,x1) \
- integerSqrAdx(b1,z1) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1) \
- subtraction(t0,x1,z1) \
- multiplyA24Adx(t1,t0) \
- additionAdx(t1,t1,z1) \
- integerMulAdx(b0,x1,z1) \
- integerMulAdx(b1,t0,t1) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1)
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s b/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s
deleted file mode 100644
index ce9f0628..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/curve_amd64.s
+++ /dev/null
@@ -1,157 +0,0 @@
-//go:build amd64 && !purego
-// +build amd64,!purego
-
-#include "textflag.h"
-
-// Depends on circl/math/fp25519 package
-#include "../../math/fp25519/fp_amd64.h"
-#include "curve_amd64.h"
-
-// CTE_A24 is (A+2)/4 from Curve25519
-#define CTE_A24 121666
-
-#define Size 32
-
-// multiplyA24Leg multiplies x times CTE_A24 and stores in z
-// Uses: AX, DX, R8-R13, FLAGS
-// Instr: x86_64, cmov
-#define multiplyA24Leg(z,x) \
- MOVL $CTE_A24, AX; MULQ 0+x; MOVQ AX, R8; MOVQ DX, R9; \
- MOVL $CTE_A24, AX; MULQ 8+x; MOVQ AX, R12; MOVQ DX, R10; \
- MOVL $CTE_A24, AX; MULQ 16+x; MOVQ AX, R13; MOVQ DX, R11; \
- MOVL $CTE_A24, AX; MULQ 24+x; \
- ADDQ R12, R9; \
- ADCQ R13, R10; \
- ADCQ AX, R11; \
- ADCQ $0, DX; \
- MOVL $38, AX; /* 2*C = 38 = 2^256 MOD 2^255-19*/ \
- IMULQ AX, DX; \
- ADDQ DX, R8; \
- ADCQ $0, R9; MOVQ R9, 8+z; \
- ADCQ $0, R10; MOVQ R10, 16+z; \
- ADCQ $0, R11; MOVQ R11, 24+z; \
- MOVQ $0, DX; \
- CMOVQCS AX, DX; \
- ADDQ DX, R8; MOVQ R8, 0+z;
-
-// multiplyA24Adx multiplies x times CTE_A24 and stores in z
-// Uses: AX, DX, R8-R12, FLAGS
-// Instr: x86_64, cmov, bmi2
-#define multiplyA24Adx(z,x) \
- MOVQ $CTE_A24, DX; \
- MULXQ 0+x, R8, R10; \
- MULXQ 8+x, R9, R11; ADDQ R10, R9; \
- MULXQ 16+x, R10, AX; ADCQ R11, R10; \
- MULXQ 24+x, R11, R12; ADCQ AX, R11; \
- ;;;;;;;;;;;;;;;;;;;;; ADCQ $0, R12; \
- MOVL $38, DX; /* 2*C = 38 = 2^256 MOD 2^255-19*/ \
- IMULQ DX, R12; \
- ADDQ R12, R8; \
- ADCQ $0, R9; MOVQ R9, 8+z; \
- ADCQ $0, R10; MOVQ R10, 16+z; \
- ADCQ $0, R11; MOVQ R11, 24+z; \
- MOVQ $0, R12; \
- CMOVQCS DX, R12; \
- ADDQ R12, R8; MOVQ R8, 0+z;
-
-#define mulA24Legacy \
- multiplyA24Leg(0(DI),0(SI))
-#define mulA24Bmi2Adx \
- multiplyA24Adx(0(DI),0(SI))
-
-// func mulA24Amd64(z, x *fp255.Elt)
-TEXT ·mulA24Amd64(SB),NOSPLIT,$0-16
- MOVQ z+0(FP), DI
- MOVQ x+8(FP), SI
- CHECK_BMI2ADX(LMA24, mulA24Legacy, mulA24Bmi2Adx)
-
-
-// func ladderStepAmd64(w *[5]fp255.Elt, b uint)
-// ladderStepAmd64 calculates a point addition and doubling as follows:
-// (x2,z2) = 2*(x2,z2) and (x3,z3) = (x2,z2)+(x3,z3) using as a difference (x1,-).
-// work = (x1,x2,z2,x3,z3) are five fp255.Elt of 32 bytes.
-// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and
-// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes.
-TEXT ·ladderStepAmd64(SB),NOSPLIT,$192-16
- // Parameters
- #define regWork DI
- #define regMove SI
- #define x1 0*Size(regWork)
- #define x2 1*Size(regWork)
- #define z2 2*Size(regWork)
- #define x3 3*Size(regWork)
- #define z3 4*Size(regWork)
- // Local variables
- #define t0 0*Size(SP)
- #define t1 1*Size(SP)
- #define b0 2*Size(SP)
- #define b1 4*Size(SP)
- MOVQ w+0(FP), regWork
- MOVQ b+8(FP), regMove
- CHECK_BMI2ADX(LLADSTEP, ladderStepLeg, ladderStepBmi2Adx)
- #undef regWork
- #undef regMove
- #undef x1
- #undef x2
- #undef z2
- #undef x3
- #undef z3
- #undef t0
- #undef t1
- #undef b0
- #undef b1
-
-// func diffAddAmd64(w *[5]fp255.Elt, b uint)
-// diffAddAmd64 calculates a differential point addition using a precomputed point.
-// (x1,z1) = (x1,z1)+(mu) using a difference point (x2,z2)
-// w = (mu,x1,z1,x2,z2) are five fp.Elt, and
-// stack = (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes.
-TEXT ·diffAddAmd64(SB),NOSPLIT,$128-16
- // Parameters
- #define regWork DI
- #define regSwap SI
- #define ui 0*Size(regWork)
- #define x1 1*Size(regWork)
- #define z1 2*Size(regWork)
- #define x2 3*Size(regWork)
- #define z2 4*Size(regWork)
- // Local variables
- #define b0 0*Size(SP)
- #define b1 2*Size(SP)
- MOVQ w+0(FP), regWork
- MOVQ b+8(FP), regSwap
- cswap(x1,x2,regSwap)
- cswap(z1,z2,regSwap)
- CHECK_BMI2ADX(LDIFADD, difAddLeg, difAddBmi2Adx)
- #undef regWork
- #undef regSwap
- #undef ui
- #undef x1
- #undef z1
- #undef x2
- #undef z2
- #undef b0
- #undef b1
-
-// func doubleAmd64(x, z *fp255.Elt)
-// doubleAmd64 calculates a point doubling (x1,z1) = 2*(x1,z1).
-// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and
-// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes.
-TEXT ·doubleAmd64(SB),NOSPLIT,$192-16
- // Parameters
- #define x1 0(DI)
- #define z1 0(SI)
- // Local variables
- #define t0 0*Size(SP)
- #define t1 1*Size(SP)
- #define b0 2*Size(SP)
- #define b1 4*Size(SP)
- MOVQ x+0(FP), DI
- MOVQ z+8(FP), SI
- CHECK_BMI2ADX(LDOUB,doubleLeg,doubleBmi2Adx)
- #undef x1
- #undef z1
- #undef t0
- #undef t1
- #undef b0
- #undef b1
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go
deleted file mode 100644
index dae67ea3..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/curve_generic.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package x25519
-
-import (
- "encoding/binary"
- "math/bits"
-
- fp "github.com/cloudflare/circl/math/fp25519"
-)
-
-func doubleGeneric(x, z *fp.Elt) {
- t0, t1 := &fp.Elt{}, &fp.Elt{}
- fp.AddSub(x, z)
- fp.Sqr(x, x)
- fp.Sqr(z, z)
- fp.Sub(t0, x, z)
- mulA24Generic(t1, t0)
- fp.Add(t1, t1, z)
- fp.Mul(x, x, z)
- fp.Mul(z, t0, t1)
-}
-
-func diffAddGeneric(w *[5]fp.Elt, b uint) {
- mu, x1, z1, x2, z2 := &w[0], &w[1], &w[2], &w[3], &w[4]
- fp.Cswap(x1, x2, b)
- fp.Cswap(z1, z2, b)
- fp.AddSub(x1, z1)
- fp.Mul(z1, z1, mu)
- fp.AddSub(x1, z1)
- fp.Sqr(x1, x1)
- fp.Sqr(z1, z1)
- fp.Mul(x1, x1, z2)
- fp.Mul(z1, z1, x2)
-}
-
-func ladderStepGeneric(w *[5]fp.Elt, b uint) {
- x1, x2, z2, x3, z3 := &w[0], &w[1], &w[2], &w[3], &w[4]
- t0 := &fp.Elt{}
- t1 := &fp.Elt{}
- fp.AddSub(x2, z2)
- fp.AddSub(x3, z3)
- fp.Mul(t0, x2, z3)
- fp.Mul(t1, x3, z2)
- fp.AddSub(t0, t1)
- fp.Cmov(x2, x3, b)
- fp.Cmov(z2, z3, b)
- fp.Sqr(x3, t0)
- fp.Sqr(z3, t1)
- fp.Mul(z3, x1, z3)
- fp.Sqr(x2, x2)
- fp.Sqr(z2, z2)
- fp.Sub(t0, x2, z2)
- mulA24Generic(t1, t0)
- fp.Add(t1, t1, z2)
- fp.Mul(x2, x2, z2)
- fp.Mul(z2, t0, t1)
-}
-
-func mulA24Generic(z, x *fp.Elt) {
- const A24 = 121666
- const n = 8
- var xx [4]uint64
- for i := range xx {
- xx[i] = binary.LittleEndian.Uint64(x[i*n : (i+1)*n])
- }
-
- h0, l0 := bits.Mul64(xx[0], A24)
- h1, l1 := bits.Mul64(xx[1], A24)
- h2, l2 := bits.Mul64(xx[2], A24)
- h3, l3 := bits.Mul64(xx[3], A24)
-
- var c3 uint64
- l1, c0 := bits.Add64(h0, l1, 0)
- l2, c1 := bits.Add64(h1, l2, c0)
- l3, c2 := bits.Add64(h2, l3, c1)
- l4, _ := bits.Add64(h3, 0, c2)
- _, l4 = bits.Mul64(l4, 38)
- l0, c0 = bits.Add64(l0, l4, 0)
- xx[1], c1 = bits.Add64(l1, 0, c0)
- xx[2], c2 = bits.Add64(l2, 0, c1)
- xx[3], c3 = bits.Add64(l3, 0, c2)
- xx[0], _ = bits.Add64(l0, (-c3)&38, 0)
- for i := range xx {
- binary.LittleEndian.PutUint64(z[i*n:(i+1)*n], xx[i])
- }
-}
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go b/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go
deleted file mode 100644
index 07fab97d..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/curve_noasm.go
+++ /dev/null
@@ -1,11 +0,0 @@
-//go:build !amd64 || purego
-// +build !amd64 purego
-
-package x25519
-
-import fp "github.com/cloudflare/circl/math/fp25519"
-
-func double(x, z *fp.Elt) { doubleGeneric(x, z) }
-func diffAdd(w *[5]fp.Elt, b uint) { diffAddGeneric(w, b) }
-func ladderStep(w *[5]fp.Elt, b uint) { ladderStepGeneric(w, b) }
-func mulA24(z, x *fp.Elt) { mulA24Generic(z, x) }
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/doc.go b/vendor/github.com/cloudflare/circl/dh/x25519/doc.go
deleted file mode 100644
index 3ce102d1..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
-Package x25519 provides Diffie-Hellman functions as specified in RFC-7748.
-
-Validation of public keys.
-
-The Diffie-Hellman function, as described in RFC-7748 [1], works for any
-public key. However, if a different protocol requires contributory
-behaviour [2,3], then the public keys must be validated against low-order
-points [3,4]. To do that, the Shared function performs this validation
-internally and returns false when the public key is invalid (i.e., it
-is a low-order point).
-
-References:
- - [1] RFC7748 by Langley, Hamburg, Turner (https://rfc-editor.org/rfc/rfc7748.txt)
- - [2] Curve25519 by Bernstein (https://cr.yp.to/ecdh.html)
- - [3] Bernstein (https://cr.yp.to/ecdh.html#validate)
- - [4] Cremers&Jackson (https://eprint.iacr.org/2019/526)
-*/
-package x25519
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/key.go b/vendor/github.com/cloudflare/circl/dh/x25519/key.go
deleted file mode 100644
index c76f72ac..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/key.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package x25519
-
-import (
- "crypto/subtle"
-
- fp "github.com/cloudflare/circl/math/fp25519"
-)
-
-// Size is the length in bytes of a X25519 key.
-const Size = 32
-
-// Key represents a X25519 key.
-type Key [Size]byte
-
-func (k *Key) clamp(in *Key) *Key {
- *k = *in
- k[0] &= 248
- k[31] = (k[31] & 127) | 64
- return k
-}
-
-// isValidPubKey verifies if the public key is not a low-order point.
-func (k *Key) isValidPubKey() bool {
- fp.Modp((*fp.Elt)(k))
- var isLowOrder int
- for _, P := range lowOrderPoints {
- isLowOrder |= subtle.ConstantTimeCompare(P[:], k[:])
- }
- return isLowOrder == 0
-}
-
-// KeyGen obtains a public key given a secret key.
-func KeyGen(public, secret *Key) {
- ladderJoye(public.clamp(secret))
-}
-
-// Shared calculates Alice's shared key from Alice's secret key and Bob's
-// public key returning true on success. A failure case happens when the public
-// key is a low-order point, thus the shared key is all-zeros and the function
-// returns false.
-func Shared(shared, secret, public *Key) bool {
- validPk := *public
- validPk[31] &= (1 << (255 % 8)) - 1
- ok := validPk.isValidPubKey()
- ladderMontgomery(shared.clamp(secret), &validPk)
- return ok
-}
diff --git a/vendor/github.com/cloudflare/circl/dh/x25519/table.go b/vendor/github.com/cloudflare/circl/dh/x25519/table.go
deleted file mode 100644
index 28c8c4ac..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x25519/table.go
+++ /dev/null
@@ -1,268 +0,0 @@
-package x25519
-
-import "github.com/cloudflare/circl/math/fp25519"
-
-// tableGenerator contains the set of points:
-//
-// t[i] = (xi+1)/(xi-1),
-//
-// where (xi,yi) = 2^iG and G is the generator point
-// Size = (256)*(256/8) = 8192 bytes.
-var tableGenerator = [256 * fp25519.Size]byte{
- /* (2^ 0)P */ 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5f,
- /* (2^ 1)P */ 0x96, 0xfe, 0xaa, 0x16, 0xf4, 0x20, 0x82, 0x6b, 0x34, 0x6a, 0x56, 0x4f, 0x2b, 0xeb, 0xeb, 0x82, 0x0f, 0x95, 0xa5, 0x75, 0xb0, 0xa5, 0xa9, 0xd5, 0xf4, 0x88, 0x24, 0x4b, 0xcf, 0xb2, 0x42, 0x51,
- /* (2^ 2)P */ 0x0c, 0x68, 0x69, 0x00, 0x75, 0xbc, 0xae, 0x6a, 0x41, 0x9c, 0xf9, 0xa0, 0x20, 0x78, 0xcf, 0x89, 0xf4, 0xd0, 0x56, 0x3b, 0x18, 0xd9, 0x58, 0x2a, 0xa4, 0x11, 0x60, 0xe3, 0x80, 0xca, 0x5a, 0x4b,
- /* (2^ 3)P */ 0x5d, 0x74, 0x29, 0x8c, 0x34, 0x32, 0x91, 0x32, 0xd7, 0x2f, 0x64, 0xe1, 0x16, 0xe6, 0xa2, 0xf4, 0x34, 0xbc, 0x67, 0xff, 0x03, 0xbb, 0x45, 0x1e, 0x4a, 0x9b, 0x2a, 0xf4, 0xd0, 0x12, 0x69, 0x30,
- /* (2^ 4)P */ 0x54, 0x71, 0xaf, 0xe6, 0x07, 0x65, 0x88, 0xff, 0x2f, 0xc8, 0xee, 0xdf, 0x13, 0x0e, 0xf5, 0x04, 0xce, 0xb5, 0xba, 0x2a, 0xe8, 0x2f, 0x51, 0xaa, 0x22, 0xf2, 0xd5, 0x68, 0x1a, 0x25, 0x4e, 0x17,
- /* (2^ 5)P */ 0x98, 0x88, 0x02, 0x82, 0x0d, 0x70, 0x96, 0xcf, 0xc5, 0x02, 0x2c, 0x0a, 0x37, 0xe3, 0x43, 0x17, 0xaa, 0x6e, 0xe8, 0xb4, 0x98, 0xec, 0x9e, 0x37, 0x2e, 0x48, 0xe0, 0x51, 0x8a, 0x88, 0x59, 0x0c,
- /* (2^ 6)P */ 0x89, 0xd1, 0xb5, 0x99, 0xd6, 0xf1, 0xcb, 0xfb, 0x84, 0xdc, 0x9f, 0x8e, 0xd5, 0xf0, 0xae, 0xac, 0x14, 0x76, 0x1f, 0x23, 0x06, 0x0d, 0xc2, 0xc1, 0x72, 0xf9, 0x74, 0xa2, 0x8d, 0x21, 0x38, 0x29,
- /* (2^ 7)P */ 0x18, 0x7f, 0x1d, 0xff, 0xbe, 0x49, 0xaf, 0xf6, 0xc2, 0xc9, 0x7a, 0x38, 0x22, 0x1c, 0x54, 0xcc, 0x6b, 0xc5, 0x15, 0x40, 0xef, 0xc9, 0xfc, 0x96, 0xa9, 0x13, 0x09, 0x69, 0x7c, 0x62, 0xc1, 0x69,
- /* (2^ 8)P */ 0x0e, 0xdb, 0x33, 0x47, 0x2f, 0xfd, 0x86, 0x7a, 0xe9, 0x7d, 0x08, 0x9e, 0xf2, 0xc4, 0xb8, 0xfd, 0x29, 0xa2, 0xa2, 0x8e, 0x1a, 0x4b, 0x5e, 0x09, 0x79, 0x7a, 0xb3, 0x29, 0xc8, 0xa7, 0xd7, 0x1a,
- /* (2^ 9)P */ 0xc0, 0xa0, 0x7e, 0xd1, 0xca, 0x89, 0x2d, 0x34, 0x51, 0x20, 0xed, 0xcc, 0xa6, 0xdd, 0xbe, 0x67, 0x74, 0x2f, 0xb4, 0x2b, 0xbf, 0x31, 0xca, 0x19, 0xbb, 0xac, 0x80, 0x49, 0xc8, 0xb4, 0xf7, 0x3d,
- /* (2^ 10)P */ 0x83, 0xd8, 0x0a, 0xc8, 0x4d, 0x44, 0xc6, 0xa8, 0x85, 0xab, 0xe3, 0x66, 0x03, 0x44, 0x1e, 0xb9, 0xd8, 0xf6, 0x64, 0x01, 0xa0, 0xcd, 0x15, 0xc2, 0x68, 0xe6, 0x47, 0xf2, 0x6e, 0x7c, 0x86, 0x3d,
- /* (2^ 11)P */ 0x8c, 0x65, 0x3e, 0xcc, 0x2b, 0x58, 0xdd, 0xc7, 0x28, 0x55, 0x0e, 0xee, 0x48, 0x47, 0x2c, 0xfd, 0x71, 0x4f, 0x9f, 0xcc, 0x95, 0x9b, 0xfd, 0xa0, 0xdf, 0x5d, 0x67, 0xb0, 0x71, 0xd8, 0x29, 0x75,
- /* (2^ 12)P */ 0x78, 0xbd, 0x3c, 0x2d, 0xb4, 0x68, 0xf5, 0xb8, 0x82, 0xda, 0xf3, 0x91, 0x1b, 0x01, 0x33, 0x12, 0x62, 0x3b, 0x7c, 0x4a, 0xcd, 0x6c, 0xce, 0x2d, 0x03, 0x86, 0x49, 0x9e, 0x8e, 0xfc, 0xe7, 0x75,
- /* (2^ 13)P */ 0xec, 0xb6, 0xd0, 0xfc, 0xf1, 0x13, 0x4f, 0x2f, 0x45, 0x7a, 0xff, 0x29, 0x1f, 0xca, 0xa8, 0xf1, 0x9b, 0xe2, 0x81, 0x29, 0xa7, 0xc1, 0x49, 0xc2, 0x6a, 0xb5, 0x83, 0x8c, 0xbb, 0x0d, 0xbe, 0x6e,
- /* (2^ 14)P */ 0x22, 0xb2, 0x0b, 0x17, 0x8d, 0xfa, 0x14, 0x71, 0x5f, 0x93, 0x93, 0xbf, 0xd5, 0xdc, 0xa2, 0x65, 0x9a, 0x97, 0x9c, 0xb5, 0x68, 0x1f, 0xc4, 0xbd, 0x89, 0x92, 0xce, 0xa2, 0x79, 0xef, 0x0e, 0x2f,
- /* (2^ 15)P */ 0xce, 0x37, 0x3c, 0x08, 0x0c, 0xbf, 0xec, 0x42, 0x22, 0x63, 0x49, 0xec, 0x09, 0xbc, 0x30, 0x29, 0x0d, 0xac, 0xfe, 0x9c, 0xc1, 0xb0, 0x94, 0xf2, 0x80, 0xbb, 0xfa, 0xed, 0x4b, 0xaa, 0x80, 0x37,
- /* (2^ 16)P */ 0x29, 0xd9, 0xea, 0x7c, 0x3e, 0x7d, 0xc1, 0x56, 0xc5, 0x22, 0x57, 0x2e, 0xeb, 0x4b, 0xcb, 0xe7, 0x5a, 0xe1, 0xbf, 0x2d, 0x73, 0x31, 0xe9, 0x0c, 0xf8, 0x52, 0x10, 0x62, 0xc7, 0x83, 0xb8, 0x41,
- /* (2^ 17)P */ 0x50, 0x53, 0xd2, 0xc3, 0xa0, 0x5c, 0xf7, 0xdb, 0x51, 0xe3, 0xb1, 0x6e, 0x08, 0xbe, 0x36, 0x29, 0x12, 0xb2, 0xa9, 0xb4, 0x3c, 0xe0, 0x36, 0xc9, 0xaa, 0x25, 0x22, 0x32, 0x82, 0xbf, 0x45, 0x1d,
- /* (2^ 18)P */ 0xc5, 0x4c, 0x02, 0x6a, 0x03, 0xb1, 0x1a, 0xe8, 0x72, 0x9a, 0x4c, 0x30, 0x1c, 0x20, 0x12, 0xe2, 0xfc, 0xb1, 0x32, 0x68, 0xba, 0x3f, 0xd7, 0xc5, 0x81, 0x95, 0x83, 0x4d, 0x5a, 0xdb, 0xff, 0x20,
- /* (2^ 19)P */ 0xad, 0x0f, 0x5d, 0xbe, 0x67, 0xd3, 0x83, 0xa2, 0x75, 0x44, 0x16, 0x8b, 0xca, 0x25, 0x2b, 0x6c, 0x2e, 0xf2, 0xaa, 0x7c, 0x46, 0x35, 0x49, 0x9d, 0x49, 0xff, 0x85, 0xee, 0x8e, 0x40, 0x66, 0x51,
- /* (2^ 20)P */ 0x61, 0xe3, 0xb4, 0xfa, 0xa2, 0xba, 0x67, 0x3c, 0xef, 0x5c, 0xf3, 0x7e, 0xc6, 0x33, 0xe4, 0xb3, 0x1c, 0x9b, 0x15, 0x41, 0x92, 0x72, 0x59, 0x52, 0x33, 0xab, 0xb0, 0xd5, 0x92, 0x18, 0x62, 0x6a,
- /* (2^ 21)P */ 0xcb, 0xcd, 0x55, 0x75, 0x38, 0x4a, 0xb7, 0x20, 0x3f, 0x92, 0x08, 0x12, 0x0e, 0xa1, 0x2a, 0x53, 0xd1, 0x1d, 0x28, 0x62, 0x77, 0x7b, 0xa1, 0xea, 0xbf, 0x44, 0x5c, 0xf0, 0x43, 0x34, 0xab, 0x61,
- /* (2^ 22)P */ 0xf8, 0xde, 0x24, 0x23, 0x42, 0x6c, 0x7a, 0x25, 0x7f, 0xcf, 0xe3, 0x17, 0x10, 0x6c, 0x1c, 0x13, 0x57, 0xa2, 0x30, 0xf6, 0x39, 0x87, 0x75, 0x23, 0x80, 0x85, 0xa7, 0x01, 0x7a, 0x40, 0x5a, 0x29,
- /* (2^ 23)P */ 0xd9, 0xa8, 0x5d, 0x6d, 0x24, 0x43, 0xc4, 0xf8, 0x5d, 0xfa, 0x52, 0x0c, 0x45, 0x75, 0xd7, 0x19, 0x3d, 0xf8, 0x1b, 0x73, 0x92, 0xfc, 0xfc, 0x2a, 0x00, 0x47, 0x2b, 0x1b, 0xe8, 0xc8, 0x10, 0x7d,
- /* (2^ 24)P */ 0x0b, 0xa2, 0xba, 0x70, 0x1f, 0x27, 0xe0, 0xc8, 0x57, 0x39, 0xa6, 0x7c, 0x86, 0x48, 0x37, 0x99, 0xbb, 0xd4, 0x7e, 0xcb, 0xb3, 0xef, 0x12, 0x54, 0x75, 0x29, 0xe6, 0x73, 0x61, 0xd3, 0x96, 0x31,
- /* (2^ 25)P */ 0xfc, 0xdf, 0xc7, 0x41, 0xd1, 0xca, 0x5b, 0xde, 0x48, 0xc8, 0x95, 0xb3, 0xd2, 0x8c, 0xcc, 0x47, 0xcb, 0xf3, 0x1a, 0xe1, 0x42, 0xd9, 0x4c, 0xa3, 0xc2, 0xce, 0x4e, 0xd0, 0xf2, 0xdb, 0x56, 0x02,
- /* (2^ 26)P */ 0x7f, 0x66, 0x0e, 0x4b, 0xe9, 0xb7, 0x5a, 0x87, 0x10, 0x0d, 0x85, 0xc0, 0x83, 0xdd, 0xd4, 0xca, 0x9f, 0xc7, 0x72, 0x4e, 0x8f, 0x2e, 0xf1, 0x47, 0x9b, 0xb1, 0x85, 0x8c, 0xbb, 0x87, 0x1a, 0x5f,
- /* (2^ 27)P */ 0xb8, 0x51, 0x7f, 0x43, 0xb6, 0xd0, 0xe9, 0x7a, 0x65, 0x90, 0x87, 0x18, 0x55, 0xce, 0xc7, 0x12, 0xee, 0x7a, 0xf7, 0x5c, 0xfe, 0x09, 0xde, 0x2a, 0x27, 0x56, 0x2c, 0x7d, 0x2f, 0x5a, 0xa0, 0x23,
- /* (2^ 28)P */ 0x9a, 0x16, 0x7c, 0xf1, 0x28, 0xe1, 0x08, 0x59, 0x2d, 0x85, 0xd0, 0x8a, 0xdd, 0x98, 0x74, 0xf7, 0x64, 0x2f, 0x10, 0xab, 0xce, 0xc4, 0xb4, 0x74, 0x45, 0x98, 0x13, 0x10, 0xdd, 0xba, 0x3a, 0x18,
- /* (2^ 29)P */ 0xac, 0xaa, 0x92, 0xaa, 0x8d, 0xba, 0x65, 0xb1, 0x05, 0x67, 0x38, 0x99, 0x95, 0xef, 0xc5, 0xd5, 0xd1, 0x40, 0xfc, 0xf8, 0x0c, 0x8f, 0x2f, 0xbe, 0x14, 0x45, 0x20, 0xee, 0x35, 0xe6, 0x01, 0x27,
- /* (2^ 30)P */ 0x14, 0x65, 0x15, 0x20, 0x00, 0xa8, 0x9f, 0x62, 0xce, 0xc1, 0xa8, 0x64, 0x87, 0x86, 0x23, 0xf2, 0x0e, 0x06, 0x3f, 0x0b, 0xff, 0x4f, 0x89, 0x5b, 0xfa, 0xa3, 0x08, 0xf7, 0x4c, 0x94, 0xd9, 0x60,
- /* (2^ 31)P */ 0x1f, 0x20, 0x7a, 0x1c, 0x1a, 0x00, 0xea, 0xae, 0x63, 0xce, 0xe2, 0x3e, 0x63, 0x6a, 0xf1, 0xeb, 0xe1, 0x07, 0x7a, 0x4c, 0x59, 0x09, 0x77, 0x6f, 0xcb, 0x08, 0x02, 0x0d, 0x15, 0x58, 0xb9, 0x79,
- /* (2^ 32)P */ 0xe7, 0x10, 0xd4, 0x01, 0x53, 0x5e, 0xb5, 0x24, 0x4d, 0xc8, 0xfd, 0xf3, 0xdf, 0x4e, 0xa3, 0xe3, 0xd8, 0x32, 0x40, 0x90, 0xe4, 0x68, 0x87, 0xd8, 0xec, 0xae, 0x3a, 0x7b, 0x42, 0x84, 0x13, 0x13,
- /* (2^ 33)P */ 0x14, 0x4f, 0x23, 0x86, 0x12, 0xe5, 0x05, 0x84, 0x29, 0xc5, 0xb4, 0xad, 0x39, 0x47, 0xdc, 0x14, 0xfd, 0x4f, 0x63, 0x50, 0xb2, 0xb5, 0xa2, 0xb8, 0x93, 0xff, 0xa7, 0xd8, 0x4a, 0xa9, 0xe2, 0x2f,
- /* (2^ 34)P */ 0xdd, 0xfa, 0x43, 0xe8, 0xef, 0x57, 0x5c, 0xec, 0x18, 0x99, 0xbb, 0xf0, 0x40, 0xce, 0x43, 0x28, 0x05, 0x63, 0x3d, 0xcf, 0xd6, 0x61, 0xb5, 0xa4, 0x7e, 0x77, 0xfb, 0xe8, 0xbd, 0x29, 0x36, 0x74,
- /* (2^ 35)P */ 0x8f, 0x73, 0xaf, 0xbb, 0x46, 0xdd, 0x3e, 0x34, 0x51, 0xa6, 0x01, 0xb1, 0x28, 0x18, 0x98, 0xed, 0x7a, 0x79, 0x2c, 0x88, 0x0b, 0x76, 0x01, 0xa4, 0x30, 0x87, 0xc8, 0x8d, 0xe2, 0x23, 0xc2, 0x1f,
- /* (2^ 36)P */ 0x0e, 0xba, 0x0f, 0xfc, 0x91, 0x4e, 0x60, 0x48, 0xa4, 0x6f, 0x2c, 0x05, 0x8f, 0xf7, 0x37, 0xb6, 0x9c, 0x23, 0xe9, 0x09, 0x3d, 0xac, 0xcc, 0x91, 0x7c, 0x68, 0x7a, 0x43, 0xd4, 0xee, 0xf7, 0x23,
- /* (2^ 37)P */ 0x00, 0xd8, 0x9b, 0x8d, 0x11, 0xb1, 0x73, 0x51, 0xa7, 0xd4, 0x89, 0x31, 0xb6, 0x41, 0xd6, 0x29, 0x86, 0xc5, 0xbb, 0x88, 0x79, 0x17, 0xbf, 0xfd, 0xf5, 0x1d, 0xd8, 0xca, 0x4f, 0x89, 0x59, 0x29,
- /* (2^ 38)P */ 0x99, 0xc8, 0xbb, 0xb4, 0xf3, 0x8e, 0xbc, 0xae, 0xb9, 0x92, 0x69, 0xb2, 0x5a, 0x99, 0x48, 0x41, 0xfb, 0x2c, 0xf9, 0x34, 0x01, 0x0b, 0xe2, 0x24, 0xe8, 0xde, 0x05, 0x4a, 0x89, 0x58, 0xd1, 0x40,
- /* (2^ 39)P */ 0xf6, 0x76, 0xaf, 0x85, 0x11, 0x0b, 0xb0, 0x46, 0x79, 0x7a, 0x18, 0x73, 0x78, 0xc7, 0xba, 0x26, 0x5f, 0xff, 0x8f, 0xab, 0x95, 0xbf, 0xc0, 0x3d, 0xd7, 0x24, 0x55, 0x94, 0xd8, 0x8b, 0x60, 0x2a,
- /* (2^ 40)P */ 0x02, 0x63, 0x44, 0xbd, 0x88, 0x95, 0x44, 0x26, 0x9c, 0x43, 0x88, 0x03, 0x1c, 0xc2, 0x4b, 0x7c, 0xb2, 0x11, 0xbd, 0x83, 0xf3, 0xa4, 0x98, 0x8e, 0xb9, 0x76, 0xd8, 0xc9, 0x7b, 0x8d, 0x21, 0x26,
- /* (2^ 41)P */ 0x8a, 0x17, 0x7c, 0x99, 0x42, 0x15, 0x08, 0xe3, 0x6f, 0x60, 0xb6, 0x6f, 0xa8, 0x29, 0x2d, 0x3c, 0x74, 0x93, 0x27, 0xfa, 0x36, 0x77, 0x21, 0x5c, 0xfa, 0xb1, 0xfe, 0x4a, 0x73, 0x05, 0xde, 0x7d,
- /* (2^ 42)P */ 0xab, 0x2b, 0xd4, 0x06, 0x39, 0x0e, 0xf1, 0x3b, 0x9c, 0x64, 0x80, 0x19, 0x3e, 0x80, 0xf7, 0xe4, 0x7a, 0xbf, 0x95, 0x95, 0xf8, 0x3b, 0x05, 0xe6, 0x30, 0x55, 0x24, 0xda, 0x38, 0xaf, 0x4f, 0x39,
- /* (2^ 43)P */ 0xf4, 0x28, 0x69, 0x89, 0x58, 0xfb, 0x8e, 0x7a, 0x3c, 0x11, 0x6a, 0xcc, 0xe9, 0x78, 0xc7, 0xfb, 0x6f, 0x59, 0xaf, 0x30, 0xe3, 0x0c, 0x67, 0x72, 0xf7, 0x6c, 0x3d, 0x1d, 0xa8, 0x22, 0xf2, 0x48,
- /* (2^ 44)P */ 0xa7, 0xca, 0x72, 0x0d, 0x41, 0xce, 0x1f, 0xf0, 0x95, 0x55, 0x3b, 0x21, 0xc7, 0xec, 0x20, 0x5a, 0x83, 0x14, 0xfa, 0xc1, 0x65, 0x11, 0xc2, 0x7b, 0x41, 0xa7, 0xa8, 0x1d, 0xe3, 0x9a, 0xf8, 0x07,
- /* (2^ 45)P */ 0xf9, 0x0f, 0x83, 0xc6, 0xb4, 0xc2, 0xd2, 0x05, 0x93, 0x62, 0x31, 0xc6, 0x0f, 0x33, 0x3e, 0xd4, 0x04, 0xa9, 0xd3, 0x96, 0x0a, 0x59, 0xa5, 0xa5, 0xb6, 0x33, 0x53, 0xa6, 0x91, 0xdb, 0x5e, 0x70,
- /* (2^ 46)P */ 0xf7, 0xa5, 0xb9, 0x0b, 0x5e, 0xe1, 0x8e, 0x04, 0x5d, 0xaf, 0x0a, 0x9e, 0xca, 0xcf, 0x40, 0x32, 0x0b, 0xa4, 0xc4, 0xed, 0xce, 0x71, 0x4b, 0x8f, 0x6d, 0x4a, 0x54, 0xde, 0xa3, 0x0d, 0x1c, 0x62,
- /* (2^ 47)P */ 0x91, 0x40, 0x8c, 0xa0, 0x36, 0x28, 0x87, 0x92, 0x45, 0x14, 0xc9, 0x10, 0xb0, 0x75, 0x83, 0xce, 0x94, 0x63, 0x27, 0x4f, 0x52, 0xeb, 0x72, 0x8a, 0x35, 0x36, 0xc8, 0x7e, 0xfa, 0xfc, 0x67, 0x26,
- /* (2^ 48)P */ 0x2a, 0x75, 0xe8, 0x45, 0x33, 0x17, 0x4c, 0x7f, 0xa5, 0x79, 0x70, 0xee, 0xfe, 0x47, 0x1b, 0x06, 0x34, 0xff, 0x86, 0x9f, 0xfa, 0x9a, 0xdd, 0x25, 0x9c, 0xc8, 0x5d, 0x42, 0xf5, 0xce, 0x80, 0x37,
- /* (2^ 49)P */ 0xe9, 0xb4, 0x3b, 0x51, 0x5a, 0x03, 0x46, 0x1a, 0xda, 0x5a, 0x57, 0xac, 0x79, 0xf3, 0x1e, 0x3e, 0x50, 0x4b, 0xa2, 0x5f, 0x1c, 0x5f, 0x8c, 0xc7, 0x22, 0x9f, 0xfd, 0x34, 0x76, 0x96, 0x1a, 0x32,
- /* (2^ 50)P */ 0xfa, 0x27, 0x6e, 0x82, 0xb8, 0x07, 0x67, 0x94, 0xd0, 0x6f, 0x50, 0x4c, 0xd6, 0x84, 0xca, 0x3d, 0x36, 0x14, 0xe9, 0x75, 0x80, 0x21, 0x89, 0xc1, 0x84, 0x84, 0x3b, 0x9b, 0x16, 0x84, 0x92, 0x6d,
- /* (2^ 51)P */ 0xdf, 0x2d, 0x3f, 0x38, 0x40, 0xe8, 0x67, 0x3a, 0x75, 0x9b, 0x4f, 0x0c, 0xa3, 0xc9, 0xee, 0x33, 0x47, 0xef, 0x83, 0xa7, 0x6f, 0xc8, 0xc7, 0x3e, 0xc4, 0xfb, 0xc9, 0xba, 0x9f, 0x44, 0xec, 0x26,
- /* (2^ 52)P */ 0x7d, 0x9e, 0x9b, 0xa0, 0xcb, 0x38, 0x0f, 0x5c, 0x8c, 0x47, 0xa3, 0x62, 0xc7, 0x8c, 0x16, 0x81, 0x1c, 0x12, 0xfc, 0x06, 0xd3, 0xb0, 0x23, 0x3e, 0xdd, 0xdc, 0xef, 0xa5, 0xa0, 0x8a, 0x23, 0x5a,
- /* (2^ 53)P */ 0xff, 0x43, 0xea, 0xc4, 0x21, 0x61, 0xa2, 0x1b, 0xb5, 0x32, 0x88, 0x7c, 0x7f, 0xc7, 0xf8, 0x36, 0x9a, 0xf9, 0xdc, 0x0a, 0x0b, 0xea, 0xfb, 0x88, 0xf9, 0xeb, 0x5b, 0xc2, 0x8e, 0x93, 0xa9, 0x5c,
- /* (2^ 54)P */ 0xa0, 0xcd, 0xfc, 0x51, 0x5e, 0x6a, 0x43, 0xd5, 0x3b, 0x89, 0xcd, 0xc2, 0x97, 0x47, 0xbc, 0x1d, 0x08, 0x4a, 0x22, 0xd3, 0x65, 0x6a, 0x34, 0x19, 0x66, 0xf4, 0x9a, 0x9b, 0xe4, 0x34, 0x50, 0x0f,
- /* (2^ 55)P */ 0x6e, 0xb9, 0xe0, 0xa1, 0x67, 0x39, 0x3c, 0xf2, 0x88, 0x4d, 0x7a, 0x86, 0xfa, 0x08, 0x8b, 0xe5, 0x79, 0x16, 0x34, 0xa7, 0xc6, 0xab, 0x2f, 0xfb, 0x46, 0x69, 0x02, 0xb6, 0x1e, 0x38, 0x75, 0x2a,
- /* (2^ 56)P */ 0xac, 0x20, 0x94, 0xc1, 0xe4, 0x3b, 0x0a, 0xc8, 0xdc, 0xb6, 0xf2, 0x81, 0xc6, 0xf6, 0xb1, 0x66, 0x88, 0x33, 0xe9, 0x61, 0x67, 0x03, 0xf7, 0x7c, 0xc4, 0xa4, 0x60, 0xa6, 0xd8, 0xbb, 0xab, 0x25,
- /* (2^ 57)P */ 0x98, 0x51, 0xfd, 0x14, 0xba, 0x12, 0xea, 0x91, 0xa9, 0xff, 0x3c, 0x4a, 0xfc, 0x50, 0x49, 0x68, 0x28, 0xad, 0xf5, 0x30, 0x21, 0x84, 0x26, 0xf8, 0x41, 0xa4, 0x01, 0x53, 0xf7, 0x88, 0xa9, 0x3e,
- /* (2^ 58)P */ 0x6f, 0x8c, 0x5f, 0x69, 0x9a, 0x10, 0x78, 0xc9, 0xf3, 0xc3, 0x30, 0x05, 0x4a, 0xeb, 0x46, 0x17, 0x95, 0x99, 0x45, 0xb4, 0x77, 0x6d, 0x4d, 0x44, 0xc7, 0x5c, 0x4e, 0x05, 0x8c, 0x2b, 0x95, 0x75,
- /* (2^ 59)P */ 0xaa, 0xd6, 0xf4, 0x15, 0x79, 0x3f, 0x70, 0xa3, 0xd8, 0x47, 0x26, 0x2f, 0x20, 0x46, 0xc3, 0x66, 0x4b, 0x64, 0x1d, 0x81, 0xdf, 0x69, 0x14, 0xd0, 0x1f, 0xd7, 0xa5, 0x81, 0x7d, 0xa4, 0xfe, 0x77,
- /* (2^ 60)P */ 0x81, 0xa3, 0x7c, 0xf5, 0x9e, 0x52, 0xe9, 0xc5, 0x1a, 0x88, 0x2f, 0xce, 0xb9, 0xb4, 0xee, 0x6e, 0xd6, 0x9b, 0x00, 0xe8, 0x28, 0x1a, 0xe9, 0xb6, 0xec, 0x3f, 0xfc, 0x9a, 0x3e, 0xbe, 0x80, 0x4b,
- /* (2^ 61)P */ 0xc5, 0xd2, 0xae, 0x26, 0xc5, 0x73, 0x37, 0x7e, 0x9d, 0xa4, 0xc9, 0x53, 0xb4, 0xfc, 0x4a, 0x1b, 0x4d, 0xb2, 0xff, 0xba, 0xd7, 0xbd, 0x20, 0xa9, 0x0e, 0x40, 0x2d, 0x12, 0x9f, 0x69, 0x54, 0x7c,
- /* (2^ 62)P */ 0xc8, 0x4b, 0xa9, 0x4f, 0xe1, 0xc8, 0x46, 0xef, 0x5e, 0xed, 0x52, 0x29, 0xce, 0x74, 0xb0, 0xe0, 0xd5, 0x85, 0xd8, 0xdb, 0xe1, 0x50, 0xa4, 0xbe, 0x2c, 0x71, 0x0f, 0x32, 0x49, 0x86, 0xb6, 0x61,
- /* (2^ 63)P */ 0xd1, 0xbd, 0xcc, 0x09, 0x73, 0x5f, 0x48, 0x8a, 0x2d, 0x1a, 0x4d, 0x7d, 0x0d, 0x32, 0x06, 0xbd, 0xf4, 0xbe, 0x2d, 0x32, 0x73, 0x29, 0x23, 0x25, 0x70, 0xf7, 0x17, 0x8c, 0x75, 0xc4, 0x5d, 0x44,
- /* (2^ 64)P */ 0x3c, 0x93, 0xc8, 0x7c, 0x17, 0x34, 0x04, 0xdb, 0x9f, 0x05, 0xea, 0x75, 0x21, 0xe8, 0x6f, 0xed, 0x34, 0xdb, 0x53, 0xc0, 0xfd, 0xbe, 0xfe, 0x1e, 0x99, 0xaf, 0x5d, 0xc6, 0x67, 0xe8, 0xdb, 0x4a,
- /* (2^ 65)P */ 0xdf, 0x09, 0x06, 0xa9, 0xa2, 0x71, 0xcd, 0x3a, 0x50, 0x40, 0xd0, 0x6d, 0x85, 0x91, 0xe9, 0xe5, 0x3c, 0xc2, 0x57, 0x81, 0x68, 0x9b, 0xc6, 0x1e, 0x4d, 0xfe, 0x5c, 0x88, 0xf6, 0x27, 0x74, 0x69,
- /* (2^ 66)P */ 0x51, 0xa8, 0xe1, 0x65, 0x9b, 0x7b, 0xbe, 0xd7, 0xdd, 0x36, 0xc5, 0x22, 0xd5, 0x28, 0x3d, 0xa0, 0x45, 0xb6, 0xd2, 0x8f, 0x65, 0x9d, 0x39, 0x28, 0xe1, 0x41, 0x26, 0x7c, 0xe1, 0xb7, 0xe5, 0x49,
- /* (2^ 67)P */ 0xa4, 0x57, 0x04, 0x70, 0x98, 0x3a, 0x8c, 0x6f, 0x78, 0x67, 0xbb, 0x5e, 0xa2, 0xf0, 0x78, 0x50, 0x0f, 0x96, 0x82, 0xc3, 0xcb, 0x3c, 0x3c, 0xd1, 0xb1, 0x84, 0xdf, 0xa7, 0x58, 0x32, 0x00, 0x2e,
- /* (2^ 68)P */ 0x1c, 0x6a, 0x29, 0xe6, 0x9b, 0xf3, 0xd1, 0x8a, 0xb2, 0xbf, 0x5f, 0x2a, 0x65, 0xaa, 0xee, 0xc1, 0xcb, 0xf3, 0x26, 0xfd, 0x73, 0x06, 0xee, 0x33, 0xcc, 0x2c, 0x9d, 0xa6, 0x73, 0x61, 0x25, 0x59,
- /* (2^ 69)P */ 0x41, 0xfc, 0x18, 0x4e, 0xaa, 0x07, 0xea, 0x41, 0x1e, 0xa5, 0x87, 0x7c, 0x52, 0x19, 0xfc, 0xd9, 0x6f, 0xca, 0x31, 0x58, 0x80, 0xcb, 0xaa, 0xbd, 0x4f, 0x69, 0x16, 0xc9, 0x2d, 0x65, 0x5b, 0x44,
- /* (2^ 70)P */ 0x15, 0x23, 0x17, 0xf2, 0xa7, 0xa3, 0x92, 0xce, 0x64, 0x99, 0x1b, 0xe1, 0x2d, 0x28, 0xdc, 0x1e, 0x4a, 0x31, 0x4c, 0xe0, 0xaf, 0x3a, 0x82, 0xa1, 0x86, 0xf5, 0x7c, 0x43, 0x94, 0x2d, 0x0a, 0x79,
- /* (2^ 71)P */ 0x09, 0xe0, 0xf6, 0x93, 0xfb, 0x47, 0xc4, 0x71, 0x76, 0x52, 0x84, 0x22, 0x67, 0xa5, 0x22, 0x89, 0x69, 0x51, 0x4f, 0x20, 0x3b, 0x90, 0x70, 0xbf, 0xfe, 0x19, 0xa3, 0x1b, 0x89, 0x89, 0x7a, 0x2f,
- /* (2^ 72)P */ 0x0c, 0x14, 0xe2, 0x77, 0xb5, 0x8e, 0xa0, 0x02, 0xf4, 0xdc, 0x7b, 0x42, 0xd4, 0x4e, 0x9a, 0xed, 0xd1, 0x3c, 0x32, 0xe4, 0x44, 0xec, 0x53, 0x52, 0x5b, 0x35, 0xe9, 0x14, 0x3c, 0x36, 0x88, 0x3e,
- /* (2^ 73)P */ 0x8c, 0x0b, 0x11, 0x77, 0x42, 0xc1, 0x66, 0xaa, 0x90, 0x33, 0xa2, 0x10, 0x16, 0x39, 0xe0, 0x1a, 0xa2, 0xc2, 0x3f, 0xc9, 0x12, 0xbd, 0x30, 0x20, 0xab, 0xc7, 0x55, 0x95, 0x57, 0x41, 0xe1, 0x3e,
- /* (2^ 74)P */ 0x41, 0x7d, 0x6e, 0x6d, 0x3a, 0xde, 0x14, 0x92, 0xfe, 0x7e, 0xf1, 0x07, 0x86, 0xd8, 0xcd, 0x3c, 0x17, 0x12, 0xe1, 0xf8, 0x88, 0x12, 0x4f, 0x67, 0xd0, 0x93, 0x9f, 0x32, 0x0f, 0x25, 0x82, 0x56,
- /* (2^ 75)P */ 0x6e, 0x39, 0x2e, 0x6d, 0x13, 0x0b, 0xf0, 0x6c, 0xbf, 0xde, 0x14, 0x10, 0x6f, 0xf8, 0x4c, 0x6e, 0x83, 0x4e, 0xcc, 0xbf, 0xb5, 0xb1, 0x30, 0x59, 0xb6, 0x16, 0xba, 0x8a, 0xb4, 0x69, 0x70, 0x04,
- /* (2^ 76)P */ 0x93, 0x07, 0xb2, 0x69, 0xab, 0xe4, 0x4c, 0x0d, 0x9e, 0xfb, 0xd0, 0x97, 0x1a, 0xb9, 0x4d, 0xb2, 0x1d, 0xd0, 0x00, 0x4e, 0xf5, 0x50, 0xfa, 0xcd, 0xb5, 0xdd, 0x8b, 0x36, 0x85, 0x10, 0x1b, 0x22,
- /* (2^ 77)P */ 0xd2, 0xd8, 0xe3, 0xb1, 0x68, 0x94, 0xe5, 0xe7, 0x93, 0x2f, 0x12, 0xbd, 0x63, 0x65, 0xc5, 0x53, 0x09, 0x3f, 0x66, 0xe0, 0x03, 0xa9, 0xe8, 0xee, 0x42, 0x3d, 0xbe, 0xcb, 0x62, 0xa6, 0xef, 0x61,
- /* (2^ 78)P */ 0x2a, 0xab, 0x6e, 0xde, 0xdd, 0xdd, 0xf8, 0x2c, 0x31, 0xf2, 0x35, 0x14, 0xd5, 0x0a, 0xf8, 0x9b, 0x73, 0x49, 0xf0, 0xc9, 0xce, 0xda, 0xea, 0x5d, 0x27, 0x9b, 0xd2, 0x41, 0x5d, 0x5b, 0x27, 0x29,
- /* (2^ 79)P */ 0x4f, 0xf1, 0xeb, 0x95, 0x08, 0x0f, 0xde, 0xcf, 0xa7, 0x05, 0x49, 0x05, 0x6b, 0xb9, 0xaa, 0xb9, 0xfd, 0x20, 0xc4, 0xa1, 0xd9, 0x0d, 0xe8, 0xca, 0xc7, 0xbb, 0x73, 0x16, 0x2f, 0xbf, 0x63, 0x0a,
- /* (2^ 80)P */ 0x8c, 0xbc, 0x8f, 0x95, 0x11, 0x6e, 0x2f, 0x09, 0xad, 0x2f, 0x82, 0x04, 0xe8, 0x81, 0x2a, 0x67, 0x17, 0x25, 0xd5, 0x60, 0x15, 0x35, 0xc8, 0xca, 0xf8, 0x92, 0xf1, 0xc8, 0x22, 0x77, 0x3f, 0x6f,
- /* (2^ 81)P */ 0xb7, 0x94, 0xe8, 0xc2, 0xcc, 0x90, 0xba, 0xf8, 0x0d, 0x9f, 0xff, 0x38, 0xa4, 0x57, 0x75, 0x2c, 0x59, 0x23, 0xe5, 0x5a, 0x85, 0x1d, 0x4d, 0x89, 0x69, 0x3d, 0x74, 0x7b, 0x15, 0x22, 0xe1, 0x68,
- /* (2^ 82)P */ 0xf3, 0x19, 0xb9, 0xcf, 0x70, 0x55, 0x7e, 0xd8, 0xb9, 0x8d, 0x79, 0x95, 0xcd, 0xde, 0x2c, 0x3f, 0xce, 0xa2, 0xc0, 0x10, 0x47, 0x15, 0x21, 0x21, 0xb2, 0xc5, 0x6d, 0x24, 0x15, 0xa1, 0x66, 0x3c,
- /* (2^ 83)P */ 0x72, 0xcb, 0x4e, 0x29, 0x62, 0xc5, 0xed, 0xcb, 0x16, 0x0b, 0x28, 0x6a, 0xc3, 0x43, 0x71, 0xba, 0x67, 0x8b, 0x07, 0xd4, 0xef, 0xc2, 0x10, 0x96, 0x1e, 0x4b, 0x6a, 0x94, 0x5d, 0x73, 0x44, 0x61,
- /* (2^ 84)P */ 0x50, 0x33, 0x5b, 0xd7, 0x1e, 0x11, 0x6f, 0x53, 0x1b, 0xd8, 0x41, 0x20, 0x8c, 0xdb, 0x11, 0x02, 0x3c, 0x41, 0x10, 0x0e, 0x00, 0xb1, 0x3c, 0xf9, 0x76, 0x88, 0x9e, 0x03, 0x3c, 0xfd, 0x9d, 0x14,
- /* (2^ 85)P */ 0x5b, 0x15, 0x63, 0x6b, 0xe4, 0xdd, 0x79, 0xd4, 0x76, 0x79, 0x83, 0x3c, 0xe9, 0x15, 0x6e, 0xb6, 0x38, 0xe0, 0x13, 0x1f, 0x3b, 0xe4, 0xfd, 0xda, 0x35, 0x0b, 0x4b, 0x2e, 0x1a, 0xda, 0xaf, 0x5f,
- /* (2^ 86)P */ 0x81, 0x75, 0x19, 0x17, 0xdf, 0xbb, 0x00, 0x36, 0xc2, 0xd2, 0x3c, 0xbe, 0x0b, 0x05, 0x72, 0x39, 0x86, 0xbe, 0xd5, 0xbd, 0x6d, 0x90, 0x38, 0x59, 0x0f, 0x86, 0x9b, 0x3f, 0xe4, 0xe5, 0xfc, 0x34,
- /* (2^ 87)P */ 0x02, 0x4d, 0xd1, 0x42, 0xcd, 0xa4, 0xa8, 0x75, 0x65, 0xdf, 0x41, 0x34, 0xc5, 0xab, 0x8d, 0x82, 0xd3, 0x31, 0xe1, 0xd2, 0xed, 0xab, 0xdc, 0x33, 0x5f, 0xd2, 0x14, 0xb8, 0x6f, 0xd7, 0xba, 0x3e,
- /* (2^ 88)P */ 0x0f, 0xe1, 0x70, 0x6f, 0x56, 0x6f, 0x90, 0xd4, 0x5a, 0x0f, 0x69, 0x51, 0xaa, 0xf7, 0x12, 0x5d, 0xf2, 0xfc, 0xce, 0x76, 0x6e, 0xb1, 0xad, 0x45, 0x99, 0x29, 0x23, 0xad, 0xae, 0x68, 0xf7, 0x01,
- /* (2^ 89)P */ 0xbd, 0xfe, 0x48, 0x62, 0x7b, 0xc7, 0x6c, 0x2b, 0xfd, 0xaf, 0x3a, 0xec, 0x28, 0x06, 0xd3, 0x3c, 0x6a, 0x48, 0xef, 0xd4, 0x80, 0x0b, 0x1c, 0xce, 0x23, 0x6c, 0xf6, 0xa6, 0x2e, 0xff, 0x3b, 0x4c,
- /* (2^ 90)P */ 0x5f, 0xeb, 0xea, 0x4a, 0x09, 0xc4, 0x2e, 0x3f, 0xa7, 0x2c, 0x37, 0x6e, 0x28, 0x9b, 0xb1, 0x61, 0x1d, 0x70, 0x2a, 0xde, 0x66, 0xa9, 0xef, 0x5e, 0xef, 0xe3, 0x55, 0xde, 0x65, 0x05, 0xb2, 0x23,
- /* (2^ 91)P */ 0x57, 0x85, 0xd5, 0x79, 0x52, 0xca, 0x01, 0xe3, 0x4f, 0x87, 0xc2, 0x27, 0xce, 0xd4, 0xb2, 0x07, 0x67, 0x1d, 0xcf, 0x9d, 0x8a, 0xcd, 0x32, 0xa5, 0x56, 0xff, 0x2b, 0x3f, 0xe2, 0xfe, 0x52, 0x2a,
- /* (2^ 92)P */ 0x3d, 0x66, 0xd8, 0x7c, 0xb3, 0xef, 0x24, 0x86, 0x94, 0x75, 0xbd, 0xff, 0x20, 0xac, 0xc7, 0xbb, 0x45, 0x74, 0xd3, 0x82, 0x9c, 0x5e, 0xb8, 0x57, 0x66, 0xec, 0xa6, 0x86, 0xcb, 0x52, 0x30, 0x7b,
- /* (2^ 93)P */ 0x1e, 0xe9, 0x25, 0x25, 0xad, 0xf0, 0x82, 0x34, 0xa0, 0xdc, 0x8e, 0xd2, 0x43, 0x80, 0xb6, 0x2c, 0x3a, 0x00, 0x1b, 0x2e, 0x05, 0x6d, 0x4f, 0xaf, 0x0a, 0x1b, 0x78, 0x29, 0x25, 0x8c, 0x5f, 0x18,
- /* (2^ 94)P */ 0xd6, 0xe0, 0x0c, 0xd8, 0x5b, 0xde, 0x41, 0xaa, 0xd6, 0xe9, 0x53, 0x68, 0x41, 0xb2, 0x07, 0x94, 0x3a, 0x4c, 0x7f, 0x35, 0x6e, 0xc3, 0x3e, 0x56, 0xce, 0x7b, 0x29, 0x0e, 0xdd, 0xb8, 0xc4, 0x4c,
- /* (2^ 95)P */ 0x0e, 0x73, 0xb8, 0xff, 0x52, 0x1a, 0xfc, 0xa2, 0x37, 0x8e, 0x05, 0x67, 0x6e, 0xf1, 0x11, 0x18, 0xe1, 0x4e, 0xdf, 0xcd, 0x66, 0xa3, 0xf9, 0x10, 0x99, 0xf0, 0xb9, 0xa0, 0xc4, 0xa0, 0xf4, 0x72,
- /* (2^ 96)P */ 0xa7, 0x4e, 0x3f, 0x66, 0x6f, 0xc0, 0x16, 0x8c, 0xba, 0x0f, 0x97, 0x4e, 0xf7, 0x3a, 0x3b, 0x69, 0x45, 0xc3, 0x9e, 0xd6, 0xf1, 0xe7, 0x02, 0x21, 0x89, 0x80, 0x8a, 0x96, 0xbc, 0x3c, 0xa5, 0x0b,
- /* (2^ 97)P */ 0x37, 0x55, 0xa1, 0xfe, 0xc7, 0x9d, 0x3d, 0xca, 0x93, 0x64, 0x53, 0x51, 0xbb, 0x24, 0x68, 0x4c, 0xb1, 0x06, 0x40, 0x84, 0x14, 0x63, 0x88, 0xb9, 0x60, 0xcc, 0x54, 0xb4, 0x2a, 0xa7, 0xd2, 0x40,
- /* (2^ 98)P */ 0x75, 0x09, 0x57, 0x12, 0xb7, 0xa1, 0x36, 0x59, 0x57, 0xa6, 0xbd, 0xde, 0x48, 0xd6, 0xb9, 0x91, 0xea, 0x30, 0x43, 0xb6, 0x4b, 0x09, 0x44, 0x33, 0xd0, 0x51, 0xee, 0x12, 0x0d, 0xa1, 0x6b, 0x00,
- /* (2^ 99)P */ 0x58, 0x5d, 0xde, 0xf5, 0x68, 0x84, 0x22, 0x19, 0xb0, 0x05, 0xcc, 0x38, 0x4c, 0x2f, 0xb1, 0x0e, 0x90, 0x19, 0x60, 0xd5, 0x9d, 0x9f, 0x03, 0xa1, 0x0b, 0x0e, 0xff, 0x4f, 0xce, 0xd4, 0x02, 0x45,
- /* (2^100)P */ 0x89, 0xc1, 0x37, 0x68, 0x10, 0x54, 0x20, 0xeb, 0x3c, 0xb9, 0xd3, 0x6d, 0x4c, 0x54, 0xf6, 0xd0, 0x4f, 0xd7, 0x16, 0xc4, 0x64, 0x70, 0x72, 0x40, 0xf0, 0x2e, 0x50, 0x4b, 0x11, 0xc6, 0x15, 0x6e,
- /* (2^101)P */ 0x6b, 0xa7, 0xb1, 0xcf, 0x98, 0xa3, 0xf2, 0x4d, 0xb1, 0xf6, 0xf2, 0x19, 0x74, 0x6c, 0x25, 0x11, 0x43, 0x60, 0x6e, 0x06, 0x62, 0x79, 0x49, 0x4a, 0x44, 0x5b, 0x35, 0x41, 0xab, 0x3a, 0x5b, 0x70,
- /* (2^102)P */ 0xd8, 0xb1, 0x97, 0xd7, 0x36, 0xf5, 0x5e, 0x36, 0xdb, 0xf0, 0xdd, 0x22, 0xd6, 0x6b, 0x07, 0x00, 0x88, 0x5a, 0x57, 0xe0, 0xb0, 0x33, 0xbf, 0x3b, 0x4d, 0xca, 0xe4, 0xc8, 0x05, 0xaa, 0x77, 0x37,
- /* (2^103)P */ 0x5f, 0xdb, 0x78, 0x55, 0xc8, 0x45, 0x27, 0x39, 0xe2, 0x5a, 0xae, 0xdb, 0x49, 0x41, 0xda, 0x6f, 0x67, 0x98, 0xdc, 0x8a, 0x0b, 0xb0, 0xf0, 0xb1, 0xa3, 0x1d, 0x6f, 0xd3, 0x37, 0x34, 0x96, 0x09,
- /* (2^104)P */ 0x53, 0x38, 0xdc, 0xa5, 0x90, 0x4e, 0x82, 0x7e, 0xbd, 0x5c, 0x13, 0x1f, 0x64, 0xf6, 0xb5, 0xcc, 0xcc, 0x8f, 0xce, 0x87, 0x6c, 0xd8, 0x36, 0x67, 0x9f, 0x24, 0x04, 0x66, 0xe2, 0x3c, 0x5f, 0x62,
- /* (2^105)P */ 0x3f, 0xf6, 0x02, 0x95, 0x05, 0xc8, 0x8a, 0xaf, 0x69, 0x14, 0x35, 0x2e, 0x0a, 0xe7, 0x05, 0x0c, 0x05, 0x63, 0x4b, 0x76, 0x9c, 0x2e, 0x29, 0x35, 0xc3, 0x3a, 0xe2, 0xc7, 0x60, 0x43, 0x39, 0x1a,
- /* (2^106)P */ 0x64, 0x32, 0x18, 0x51, 0x32, 0xd5, 0xc6, 0xd5, 0x4f, 0xb7, 0xc2, 0x43, 0xbd, 0x5a, 0x06, 0x62, 0x9b, 0x3f, 0x97, 0x3b, 0xd0, 0xf5, 0xfb, 0xb5, 0x5e, 0x6e, 0x20, 0x61, 0x36, 0xda, 0xa3, 0x13,
- /* (2^107)P */ 0xe5, 0x94, 0x5d, 0x72, 0x37, 0x58, 0xbd, 0xc6, 0xc5, 0x16, 0x50, 0x20, 0x12, 0x09, 0xe3, 0x18, 0x68, 0x3c, 0x03, 0x70, 0x15, 0xce, 0x88, 0x20, 0x87, 0x79, 0x83, 0x5c, 0x49, 0x1f, 0xba, 0x7f,
- /* (2^108)P */ 0x9d, 0x07, 0xf9, 0xf2, 0x23, 0x74, 0x8c, 0x5a, 0xc5, 0x3f, 0x02, 0x34, 0x7b, 0x15, 0x35, 0x17, 0x51, 0xb3, 0xfa, 0xd2, 0x9a, 0xb4, 0xf9, 0xe4, 0x3c, 0xe3, 0x78, 0xc8, 0x72, 0xff, 0x91, 0x66,
- /* (2^109)P */ 0x3e, 0xff, 0x5e, 0xdc, 0xde, 0x2a, 0x2c, 0x12, 0xf4, 0x6c, 0x95, 0xd8, 0xf1, 0x4b, 0xdd, 0xf8, 0xda, 0x5b, 0x9e, 0x9e, 0x5d, 0x20, 0x86, 0xeb, 0x43, 0xc7, 0x75, 0xd9, 0xb9, 0x92, 0x9b, 0x04,
- /* (2^110)P */ 0x5a, 0xc0, 0xf6, 0xb0, 0x30, 0x97, 0x37, 0xa5, 0x53, 0xa5, 0xf3, 0xc6, 0xac, 0xff, 0xa0, 0x72, 0x6d, 0xcd, 0x0d, 0xb2, 0x34, 0x2c, 0x03, 0xb0, 0x4a, 0x16, 0xd5, 0x88, 0xbc, 0x9d, 0x0e, 0x47,
- /* (2^111)P */ 0x47, 0xc0, 0x37, 0xa2, 0x0c, 0xf1, 0x9c, 0xb1, 0xa2, 0x81, 0x6c, 0x1f, 0x71, 0x66, 0x54, 0xb6, 0x43, 0x0b, 0xd8, 0x6d, 0xd1, 0x1b, 0x32, 0xb3, 0x8e, 0xbe, 0x5f, 0x0c, 0x60, 0x4f, 0xc1, 0x48,
- /* (2^112)P */ 0x03, 0xc8, 0xa6, 0x4a, 0x26, 0x1c, 0x45, 0x66, 0xa6, 0x7d, 0xfa, 0xa4, 0x04, 0x39, 0x6e, 0xb6, 0x95, 0x83, 0x12, 0xb3, 0xb0, 0x19, 0x5f, 0xd4, 0x10, 0xbc, 0xc9, 0xc3, 0x27, 0x26, 0x60, 0x31,
- /* (2^113)P */ 0x0d, 0xe1, 0xe4, 0x32, 0x48, 0xdc, 0x20, 0x31, 0xf7, 0x17, 0xc7, 0x56, 0x67, 0xc4, 0x20, 0xeb, 0x94, 0x02, 0x28, 0x67, 0x3f, 0x2e, 0xf5, 0x00, 0x09, 0xc5, 0x30, 0x47, 0xc1, 0x4f, 0x6d, 0x56,
- /* (2^114)P */ 0x06, 0x72, 0x83, 0xfd, 0x40, 0x5d, 0x3a, 0x7e, 0x7a, 0x54, 0x59, 0x71, 0xdc, 0x26, 0xe9, 0xc1, 0x95, 0x60, 0x8d, 0xa6, 0xfb, 0x30, 0x67, 0x21, 0xa7, 0xce, 0x69, 0x3f, 0x84, 0xc3, 0xe8, 0x22,
- /* (2^115)P */ 0x2b, 0x4b, 0x0e, 0x93, 0xe8, 0x74, 0xd0, 0x33, 0x16, 0x58, 0xd1, 0x84, 0x0e, 0x35, 0xe4, 0xb6, 0x65, 0x23, 0xba, 0xd6, 0x6a, 0xc2, 0x34, 0x55, 0xf3, 0xf3, 0xf1, 0x89, 0x2f, 0xc1, 0x73, 0x77,
- /* (2^116)P */ 0xaa, 0x62, 0x79, 0xa5, 0x4d, 0x40, 0xba, 0x8c, 0x56, 0xce, 0x99, 0x19, 0xa8, 0x97, 0x98, 0x5b, 0xfc, 0x92, 0x16, 0x12, 0x2f, 0x86, 0x8e, 0x50, 0x91, 0xc2, 0x93, 0xa0, 0x7f, 0x90, 0x81, 0x3a,
- /* (2^117)P */ 0x10, 0xa5, 0x25, 0x47, 0xff, 0xd0, 0xde, 0x0d, 0x03, 0xc5, 0x3f, 0x67, 0x10, 0xcc, 0xd8, 0x10, 0x89, 0x4e, 0x1f, 0x9f, 0x1c, 0x15, 0x9d, 0x5b, 0x4c, 0xa4, 0x09, 0xcb, 0xd5, 0xc1, 0xa5, 0x32,
- /* (2^118)P */ 0xfb, 0x41, 0x05, 0xb9, 0x42, 0xa4, 0x0a, 0x1e, 0xdb, 0x85, 0xb4, 0xc1, 0x7c, 0xeb, 0x85, 0x5f, 0xe5, 0xf2, 0x9d, 0x8a, 0xce, 0x95, 0xe5, 0xbe, 0x36, 0x22, 0x42, 0x22, 0xc7, 0x96, 0xe4, 0x25,
- /* (2^119)P */ 0xb9, 0xe5, 0x0f, 0xcd, 0x46, 0x3c, 0xdf, 0x5e, 0x88, 0x33, 0xa4, 0xd2, 0x7e, 0x5a, 0xe7, 0x34, 0x52, 0xe3, 0x61, 0xd7, 0x11, 0xde, 0x88, 0xe4, 0x5c, 0x54, 0x85, 0xa0, 0x01, 0x8a, 0x87, 0x0e,
- /* (2^120)P */ 0x04, 0xbb, 0x21, 0xe0, 0x77, 0x3c, 0x49, 0xba, 0x9a, 0x89, 0xdf, 0xc7, 0x43, 0x18, 0x4d, 0x2b, 0x67, 0x0d, 0xe8, 0x7a, 0x48, 0x7a, 0xa3, 0x9e, 0x94, 0x17, 0xe4, 0x11, 0x80, 0x95, 0xa9, 0x67,
- /* (2^121)P */ 0x65, 0xb0, 0x97, 0x66, 0x1a, 0x05, 0x58, 0x4b, 0xd4, 0xa6, 0x6b, 0x8d, 0x7d, 0x3f, 0xe3, 0x47, 0xc1, 0x46, 0xca, 0x83, 0xd4, 0xa8, 0x4d, 0xbb, 0x0d, 0xdb, 0xc2, 0x81, 0xa1, 0xca, 0xbe, 0x68,
- /* (2^122)P */ 0xa5, 0x9a, 0x98, 0x0b, 0xe9, 0x80, 0x89, 0x8d, 0x9b, 0xc9, 0x93, 0x2c, 0x4a, 0xb1, 0x5e, 0xf9, 0xa2, 0x73, 0x6e, 0x79, 0xc4, 0xc7, 0xc6, 0x51, 0x69, 0xb5, 0xef, 0xb5, 0x63, 0x83, 0x22, 0x6e,
- /* (2^123)P */ 0xc8, 0x24, 0xd6, 0x2d, 0xb0, 0xc0, 0xbb, 0xc6, 0xee, 0x70, 0x81, 0xec, 0x7d, 0xb4, 0x7e, 0x77, 0xa9, 0xaf, 0xcf, 0x04, 0xa0, 0x15, 0xde, 0x3c, 0x9b, 0xbf, 0x60, 0x71, 0x08, 0xbc, 0xc6, 0x1d,
- /* (2^124)P */ 0x02, 0x40, 0xc3, 0xee, 0x43, 0xe0, 0x07, 0x2e, 0x7f, 0xdc, 0x68, 0x7a, 0x67, 0xfc, 0xe9, 0x18, 0x9a, 0x5b, 0xd1, 0x8b, 0x18, 0x03, 0xda, 0xd8, 0x53, 0x82, 0x56, 0x00, 0xbb, 0xc3, 0xfb, 0x48,
- /* (2^125)P */ 0xe1, 0x4c, 0x65, 0xfb, 0x4c, 0x7d, 0x54, 0x57, 0xad, 0xe2, 0x58, 0xa0, 0x82, 0x5b, 0x56, 0xd3, 0x78, 0x44, 0x15, 0xbf, 0x0b, 0xaf, 0x3e, 0xf6, 0x18, 0xbb, 0xdf, 0x14, 0xf1, 0x1e, 0x53, 0x47,
- /* (2^126)P */ 0x87, 0xc5, 0x78, 0x42, 0x0a, 0x63, 0xec, 0xe1, 0xf3, 0x83, 0x8e, 0xca, 0x46, 0xd5, 0x07, 0x55, 0x2b, 0x0c, 0xdc, 0x3a, 0xc6, 0x35, 0xe1, 0x85, 0x4e, 0x84, 0x82, 0x56, 0xa8, 0xef, 0xa7, 0x0a,
- /* (2^127)P */ 0x15, 0xf6, 0xe1, 0xb3, 0xa8, 0x1b, 0x69, 0x72, 0xfa, 0x3f, 0xbe, 0x1f, 0x70, 0xe9, 0xb4, 0x32, 0x68, 0x78, 0xbb, 0x39, 0x2e, 0xd9, 0xb6, 0x97, 0xe8, 0x39, 0x2e, 0xa0, 0xde, 0x53, 0xfe, 0x2c,
- /* (2^128)P */ 0xb0, 0x52, 0xcd, 0x85, 0xcd, 0x92, 0x73, 0x68, 0x31, 0x98, 0xe2, 0x10, 0xc9, 0x66, 0xff, 0x27, 0x06, 0x2d, 0x83, 0xa9, 0x56, 0x45, 0x13, 0x97, 0xa0, 0xf8, 0x84, 0x0a, 0x36, 0xb0, 0x9b, 0x26,
- /* (2^129)P */ 0x5c, 0xf8, 0x43, 0x76, 0x45, 0x55, 0x6e, 0x70, 0x1b, 0x7d, 0x59, 0x9b, 0x8c, 0xa4, 0x34, 0x37, 0x72, 0xa4, 0xef, 0xc6, 0xe8, 0x91, 0xee, 0x7a, 0xe0, 0xd9, 0xa9, 0x98, 0xc1, 0xab, 0xd6, 0x5c,
- /* (2^130)P */ 0x1a, 0xe4, 0x3c, 0xcb, 0x06, 0xde, 0x04, 0x0e, 0x38, 0xe1, 0x02, 0x34, 0x89, 0xeb, 0xc6, 0xd8, 0x72, 0x37, 0x6e, 0x68, 0xbb, 0x59, 0x46, 0x90, 0xc8, 0xa8, 0x6b, 0x74, 0x71, 0xc3, 0x15, 0x72,
- /* (2^131)P */ 0xd9, 0xa2, 0xe4, 0xea, 0x7e, 0xa9, 0x12, 0xfd, 0xc5, 0xf2, 0x94, 0x63, 0x51, 0xb7, 0x14, 0x95, 0x94, 0xf2, 0x08, 0x92, 0x80, 0xd5, 0x6f, 0x26, 0xb9, 0x26, 0x9a, 0x61, 0x85, 0x70, 0x84, 0x5c,
- /* (2^132)P */ 0xea, 0x94, 0xd6, 0xfe, 0x10, 0x54, 0x98, 0x52, 0x54, 0xd2, 0x2e, 0x4a, 0x93, 0x5b, 0x90, 0x3c, 0x67, 0xe4, 0x3b, 0x2d, 0x69, 0x47, 0xbb, 0x10, 0xe1, 0xe9, 0xe5, 0x69, 0x2d, 0x3d, 0x3b, 0x06,
- /* (2^133)P */ 0xeb, 0x7d, 0xa5, 0xdd, 0xee, 0x26, 0x27, 0x47, 0x91, 0x18, 0xf4, 0x10, 0xae, 0xc4, 0xb6, 0xef, 0x14, 0x76, 0x30, 0x7b, 0x91, 0x41, 0x16, 0x2b, 0x7c, 0x5b, 0xf4, 0xc4, 0x4f, 0x55, 0x7c, 0x11,
- /* (2^134)P */ 0x12, 0x88, 0x9d, 0x8f, 0x11, 0xf3, 0x7c, 0xc0, 0x39, 0x79, 0x01, 0x50, 0x20, 0xd8, 0xdb, 0x01, 0x27, 0x28, 0x1b, 0x17, 0xf4, 0x03, 0xe8, 0xd7, 0xea, 0x25, 0xd2, 0x87, 0x74, 0xe8, 0x15, 0x10,
- /* (2^135)P */ 0x4d, 0xcc, 0x3a, 0xd2, 0xfe, 0xe3, 0x8d, 0xc5, 0x2d, 0xbe, 0xa7, 0x94, 0xc2, 0x91, 0xdb, 0x50, 0x57, 0xf4, 0x9c, 0x1c, 0x3d, 0xd4, 0x94, 0x0b, 0x4a, 0x52, 0x37, 0x6e, 0xfa, 0x40, 0x16, 0x6b,
- /* (2^136)P */ 0x09, 0x0d, 0xda, 0x5f, 0x6c, 0x34, 0x2f, 0x69, 0x51, 0x31, 0x4d, 0xfa, 0x59, 0x1c, 0x0b, 0x20, 0x96, 0xa2, 0x77, 0x07, 0x76, 0x6f, 0xc4, 0xb8, 0xcf, 0xfb, 0xfd, 0x3f, 0x5f, 0x39, 0x38, 0x4b,
- /* (2^137)P */ 0x71, 0xd6, 0x54, 0xbe, 0x00, 0x5e, 0xd2, 0x18, 0xa6, 0xab, 0xc8, 0xbe, 0x82, 0x05, 0xd5, 0x60, 0x82, 0xb9, 0x78, 0x3b, 0x26, 0x8f, 0xad, 0x87, 0x32, 0x04, 0xda, 0x9c, 0x4e, 0xf6, 0xfd, 0x50,
- /* (2^138)P */ 0xf0, 0xdc, 0x78, 0xc5, 0xaa, 0x67, 0xf5, 0x90, 0x3b, 0x13, 0xa3, 0xf2, 0x0e, 0x9b, 0x1e, 0xef, 0x71, 0xde, 0xd9, 0x42, 0x92, 0xba, 0xeb, 0x0e, 0xc7, 0x01, 0x31, 0xf0, 0x9b, 0x3c, 0x47, 0x15,
- /* (2^139)P */ 0x95, 0x80, 0xb7, 0x56, 0xae, 0xe8, 0x77, 0x7c, 0x8e, 0x07, 0x6f, 0x6e, 0x66, 0xe7, 0x78, 0xb6, 0x1f, 0xba, 0x48, 0x53, 0x61, 0xb9, 0xa0, 0x2d, 0x0b, 0x3f, 0x73, 0xff, 0xc1, 0x31, 0xf9, 0x7c,
- /* (2^140)P */ 0x6c, 0x36, 0x0a, 0x0a, 0xf5, 0x57, 0xb3, 0x26, 0x32, 0xd7, 0x87, 0x2b, 0xf4, 0x8c, 0x70, 0xe9, 0xc0, 0xb2, 0x1c, 0xf9, 0xa5, 0xee, 0x3a, 0xc1, 0x4c, 0xbb, 0x43, 0x11, 0x99, 0x0c, 0xd9, 0x35,
- /* (2^141)P */ 0xdc, 0xd9, 0xa0, 0xa9, 0x04, 0xc4, 0xc1, 0x47, 0x51, 0xd2, 0x72, 0x19, 0x45, 0x58, 0x9e, 0x65, 0x31, 0x8c, 0xb3, 0x73, 0xc4, 0xa8, 0x75, 0x38, 0x24, 0x1f, 0x56, 0x79, 0xd3, 0x9e, 0xbd, 0x1f,
- /* (2^142)P */ 0x8d, 0xc2, 0x1e, 0xd4, 0x6f, 0xbc, 0xfa, 0x11, 0xca, 0x2d, 0x2a, 0xcd, 0xe3, 0xdf, 0xf8, 0x7e, 0x95, 0x45, 0x40, 0x8c, 0x5d, 0x3b, 0xe7, 0x72, 0x27, 0x2f, 0xb7, 0x54, 0x49, 0xfa, 0x35, 0x61,
- /* (2^143)P */ 0x9c, 0xb6, 0x24, 0xde, 0xa2, 0x32, 0xfc, 0xcc, 0x88, 0x5d, 0x09, 0x1f, 0x8c, 0x69, 0x55, 0x3f, 0x29, 0xf9, 0xc3, 0x5a, 0xed, 0x50, 0x33, 0xbe, 0xeb, 0x7e, 0x47, 0xca, 0x06, 0xf8, 0x9b, 0x5e,
- /* (2^144)P */ 0x68, 0x9f, 0x30, 0x3c, 0xb6, 0x8f, 0xce, 0xe9, 0xf4, 0xf9, 0xe1, 0x65, 0x35, 0xf6, 0x76, 0x53, 0xf1, 0x93, 0x63, 0x5a, 0xb3, 0xcf, 0xaf, 0xd1, 0x06, 0x35, 0x62, 0xe5, 0xed, 0xa1, 0x32, 0x66,
- /* (2^145)P */ 0x4c, 0xed, 0x2d, 0x0c, 0x39, 0x6c, 0x7d, 0x0b, 0x1f, 0xcb, 0x04, 0xdf, 0x81, 0x32, 0xcb, 0x56, 0xc7, 0xc3, 0xec, 0x49, 0x12, 0x5a, 0x30, 0x66, 0x2a, 0xa7, 0x8c, 0xa3, 0x60, 0x8b, 0x58, 0x5d,
- /* (2^146)P */ 0x2d, 0xf4, 0xe5, 0xe8, 0x78, 0xbf, 0xec, 0xa6, 0xec, 0x3e, 0x8a, 0x3c, 0x4b, 0xb4, 0xee, 0x86, 0x04, 0x16, 0xd2, 0xfb, 0x48, 0x9c, 0x21, 0xec, 0x31, 0x67, 0xc3, 0x17, 0xf5, 0x1a, 0xaf, 0x1a,
- /* (2^147)P */ 0xe7, 0xbd, 0x69, 0x67, 0x83, 0xa2, 0x06, 0xc3, 0xdb, 0x2a, 0x1e, 0x2b, 0x62, 0x80, 0x82, 0x20, 0xa6, 0x94, 0xff, 0xfb, 0x1f, 0xf5, 0x27, 0x80, 0x6b, 0xf2, 0x24, 0x11, 0xce, 0xa1, 0xcf, 0x76,
- /* (2^148)P */ 0xb6, 0xab, 0x22, 0x24, 0x56, 0x00, 0xeb, 0x18, 0xc3, 0x29, 0x8c, 0x8f, 0xd5, 0xc4, 0x77, 0xf3, 0x1a, 0x56, 0x31, 0xf5, 0x07, 0xc2, 0xbb, 0x4d, 0x27, 0x8a, 0x12, 0x82, 0xf0, 0xb7, 0x53, 0x02,
- /* (2^149)P */ 0xe0, 0x17, 0x2c, 0xb6, 0x1c, 0x09, 0x1f, 0x3d, 0xa9, 0x28, 0x46, 0xd6, 0xab, 0xe1, 0x60, 0x48, 0x53, 0x42, 0x9d, 0x30, 0x36, 0x74, 0xd1, 0x52, 0x76, 0xe5, 0xfa, 0x3e, 0xe1, 0x97, 0x6f, 0x35,
- /* (2^150)P */ 0x5b, 0x53, 0x50, 0xa1, 0x1a, 0xe1, 0x51, 0xd3, 0xcc, 0x78, 0xd8, 0x1d, 0xbb, 0x45, 0x6b, 0x3e, 0x98, 0x2c, 0xd9, 0xbe, 0x28, 0x61, 0x77, 0x0c, 0xb8, 0x85, 0x28, 0x03, 0x93, 0xae, 0x34, 0x1d,
- /* (2^151)P */ 0xc3, 0xa4, 0x5b, 0xa8, 0x8c, 0x48, 0xa0, 0x4b, 0xce, 0xe6, 0x9c, 0x3c, 0xc3, 0x48, 0x53, 0x98, 0x70, 0xa7, 0xbd, 0x97, 0x6f, 0x4c, 0x12, 0x66, 0x4a, 0x12, 0x54, 0x06, 0x29, 0xa0, 0x81, 0x0f,
- /* (2^152)P */ 0xfd, 0x86, 0x9b, 0x56, 0xa6, 0x9c, 0xd0, 0x9e, 0x2d, 0x9a, 0xaf, 0x18, 0xfd, 0x09, 0x10, 0x81, 0x0a, 0xc2, 0xd8, 0x93, 0x3f, 0xd0, 0x08, 0xff, 0x6b, 0xf2, 0xae, 0x9f, 0x19, 0x48, 0xa1, 0x52,
- /* (2^153)P */ 0x73, 0x1b, 0x8d, 0x2d, 0xdc, 0xf9, 0x03, 0x3e, 0x70, 0x1a, 0x96, 0x73, 0x18, 0x80, 0x05, 0x42, 0x70, 0x59, 0xa3, 0x41, 0xf0, 0x87, 0xd9, 0xc0, 0x49, 0xd5, 0xc0, 0xa1, 0x15, 0x1f, 0xaa, 0x07,
- /* (2^154)P */ 0x24, 0x72, 0xd2, 0x8c, 0xe0, 0x6c, 0xd4, 0xdf, 0x39, 0x42, 0x4e, 0x93, 0x4f, 0x02, 0x0a, 0x6d, 0x59, 0x7b, 0x89, 0x99, 0x63, 0x7a, 0x8a, 0x80, 0xa2, 0x95, 0x3d, 0xe1, 0xe9, 0x56, 0x45, 0x0a,
- /* (2^155)P */ 0x45, 0x30, 0xc1, 0xe9, 0x1f, 0x99, 0x1a, 0xd2, 0xb8, 0x51, 0x77, 0xfe, 0x48, 0x85, 0x0e, 0x9b, 0x35, 0x00, 0xf3, 0x4b, 0xcb, 0x43, 0xa6, 0x5d, 0x21, 0xf7, 0x40, 0x39, 0xd6, 0x28, 0xdb, 0x77,
- /* (2^156)P */ 0x11, 0x90, 0xdc, 0x4a, 0x61, 0xeb, 0x5e, 0xfc, 0xeb, 0x11, 0xc4, 0xe8, 0x9a, 0x41, 0x29, 0x52, 0x74, 0xcf, 0x1d, 0x7d, 0x78, 0xe7, 0xc3, 0x9e, 0xb5, 0x4c, 0x6e, 0x21, 0x3e, 0x05, 0x0d, 0x34,
- /* (2^157)P */ 0xb4, 0xf2, 0x8d, 0xb4, 0x39, 0xaf, 0xc7, 0xca, 0x94, 0x0a, 0xa1, 0x71, 0x28, 0xec, 0xfa, 0xc0, 0xed, 0x75, 0xa5, 0x5c, 0x24, 0x69, 0x0a, 0x14, 0x4c, 0x3a, 0x27, 0x34, 0x71, 0xc3, 0xf1, 0x0c,
- /* (2^158)P */ 0xa5, 0xb8, 0x24, 0xc2, 0x6a, 0x30, 0xee, 0xc8, 0xb0, 0x30, 0x49, 0xcb, 0x7c, 0xee, 0xea, 0x57, 0x4f, 0xe7, 0xcb, 0xaa, 0xbd, 0x06, 0xe8, 0xa1, 0x7d, 0x65, 0xeb, 0x2e, 0x74, 0x62, 0x9a, 0x7d,
- /* (2^159)P */ 0x30, 0x48, 0x6c, 0x54, 0xef, 0xb6, 0xb6, 0x9e, 0x2e, 0x6e, 0xb3, 0xdd, 0x1f, 0xca, 0x5c, 0x88, 0x05, 0x71, 0x0d, 0xef, 0x83, 0xf3, 0xb9, 0xe6, 0x12, 0x04, 0x2e, 0x9d, 0xef, 0x4f, 0x65, 0x58,
- /* (2^160)P */ 0x26, 0x8e, 0x0e, 0xbe, 0xff, 0xc4, 0x05, 0xa9, 0x6e, 0x81, 0x31, 0x9b, 0xdf, 0xe5, 0x2d, 0x94, 0xe1, 0x88, 0x2e, 0x80, 0x3f, 0x72, 0x7d, 0x49, 0x8d, 0x40, 0x2f, 0x60, 0xea, 0x4d, 0x68, 0x30,
- /* (2^161)P */ 0x34, 0xcb, 0xe6, 0xa3, 0x78, 0xa2, 0xe5, 0x21, 0xc4, 0x1d, 0x15, 0x5b, 0x6f, 0x6e, 0xfb, 0xae, 0x15, 0xca, 0x77, 0x9d, 0x04, 0x8e, 0x0b, 0xb3, 0x81, 0x89, 0xb9, 0x53, 0xcf, 0xc9, 0xc3, 0x28,
- /* (2^162)P */ 0x2a, 0xdd, 0x6c, 0x55, 0x21, 0xb7, 0x7f, 0x28, 0x74, 0x22, 0x02, 0x97, 0xa8, 0x7c, 0x31, 0x0d, 0x58, 0x32, 0x54, 0x3a, 0x42, 0xc7, 0x68, 0x74, 0x2f, 0x64, 0xb5, 0x4e, 0x46, 0x11, 0x7f, 0x4a,
- /* (2^163)P */ 0xa6, 0x3a, 0x19, 0x4d, 0x77, 0xa4, 0x37, 0xa2, 0xa1, 0x29, 0x21, 0xa9, 0x6e, 0x98, 0x65, 0xd8, 0x88, 0x1a, 0x7c, 0xf8, 0xec, 0x15, 0xc5, 0x24, 0xeb, 0xf5, 0x39, 0x5f, 0x57, 0x03, 0x40, 0x60,
- /* (2^164)P */ 0x27, 0x9b, 0x0a, 0x57, 0x89, 0xf1, 0xb9, 0x47, 0x78, 0x4b, 0x5e, 0x46, 0xde, 0xce, 0x98, 0x2b, 0x20, 0x5c, 0xb8, 0xdb, 0x51, 0xf5, 0x6d, 0x02, 0x01, 0x19, 0xe2, 0x47, 0x10, 0xd9, 0xfc, 0x74,
- /* (2^165)P */ 0xa3, 0xbf, 0xc1, 0x23, 0x0a, 0xa9, 0xe2, 0x13, 0xf6, 0x19, 0x85, 0x47, 0x4e, 0x07, 0xb0, 0x0c, 0x44, 0xcf, 0xf6, 0x3a, 0xbe, 0xcb, 0xf1, 0x5f, 0xbe, 0x2d, 0x81, 0xbe, 0x38, 0x54, 0xfe, 0x67,
- /* (2^166)P */ 0xb0, 0x05, 0x0f, 0xa4, 0x4f, 0xf6, 0x3c, 0xd1, 0x87, 0x37, 0x28, 0x32, 0x2f, 0xfb, 0x4d, 0x05, 0xea, 0x2a, 0x0d, 0x7f, 0x5b, 0x91, 0x73, 0x41, 0x4e, 0x0d, 0x61, 0x1f, 0x4f, 0x14, 0x2f, 0x48,
- /* (2^167)P */ 0x34, 0x82, 0x7f, 0xb4, 0x01, 0x02, 0x21, 0xf6, 0x90, 0xb9, 0x70, 0x9e, 0x92, 0xe1, 0x0a, 0x5d, 0x7c, 0x56, 0x49, 0xb0, 0x55, 0xf4, 0xd7, 0xdc, 0x01, 0x6f, 0x91, 0xf0, 0xf1, 0xd0, 0x93, 0x7e,
- /* (2^168)P */ 0xfa, 0xb4, 0x7d, 0x8a, 0xf1, 0xcb, 0x79, 0xdd, 0x2f, 0xc6, 0x74, 0x6f, 0xbf, 0x91, 0x83, 0xbe, 0xbd, 0x91, 0x82, 0x4b, 0xd1, 0x45, 0x71, 0x02, 0x05, 0x17, 0xbf, 0x2c, 0xea, 0x73, 0x5a, 0x58,
- /* (2^169)P */ 0xb2, 0x0d, 0x8a, 0x92, 0x3e, 0xa0, 0x5c, 0x48, 0xe7, 0x57, 0x28, 0x74, 0xa5, 0x01, 0xfc, 0x10, 0xa7, 0x51, 0xd5, 0xd6, 0xdb, 0x2e, 0x48, 0x2f, 0x8a, 0xdb, 0x8f, 0x04, 0xb5, 0x33, 0x04, 0x0f,
- /* (2^170)P */ 0x47, 0x62, 0xdc, 0xd7, 0x8d, 0x2e, 0xda, 0x60, 0x9a, 0x81, 0xd4, 0x8c, 0xd3, 0xc9, 0xb4, 0x88, 0x97, 0x66, 0xf6, 0x01, 0xc0, 0x3a, 0x03, 0x13, 0x75, 0x7d, 0x36, 0x3b, 0xfe, 0x24, 0x3b, 0x27,
- /* (2^171)P */ 0xd4, 0xb9, 0xb3, 0x31, 0x6a, 0xf6, 0xe8, 0xc6, 0xd5, 0x49, 0xdf, 0x94, 0xa4, 0x14, 0x15, 0x28, 0xa7, 0x3d, 0xb2, 0xc8, 0xdf, 0x6f, 0x72, 0xd1, 0x48, 0xe5, 0xde, 0x03, 0xd1, 0xe7, 0x3a, 0x4b,
- /* (2^172)P */ 0x7e, 0x9d, 0x4b, 0xce, 0x19, 0x6e, 0x25, 0xc6, 0x1c, 0xc6, 0xe3, 0x86, 0xf1, 0x5c, 0x5c, 0xff, 0x45, 0xc1, 0x8e, 0x4b, 0xa3, 0x3c, 0xc6, 0xac, 0x74, 0x65, 0xe6, 0xfe, 0x88, 0x18, 0x62, 0x74,
- /* (2^173)P */ 0x1e, 0x0a, 0x29, 0x45, 0x96, 0x40, 0x6f, 0x95, 0x2e, 0x96, 0x3a, 0x26, 0xe3, 0xf8, 0x0b, 0xef, 0x7b, 0x64, 0xc2, 0x5e, 0xeb, 0x50, 0x6a, 0xed, 0x02, 0x75, 0xca, 0x9d, 0x3a, 0x28, 0x94, 0x06,
- /* (2^174)P */ 0xd1, 0xdc, 0xa2, 0x43, 0x36, 0x96, 0x9b, 0x76, 0x53, 0x53, 0xfc, 0x09, 0xea, 0xc8, 0xb7, 0x42, 0xab, 0x7e, 0x39, 0x13, 0xee, 0x2a, 0x00, 0x4f, 0x3a, 0xd6, 0xb7, 0x19, 0x2c, 0x5e, 0x00, 0x63,
- /* (2^175)P */ 0xea, 0x3b, 0x02, 0x63, 0xda, 0x36, 0x67, 0xca, 0xb7, 0x99, 0x2a, 0xb1, 0x6d, 0x7f, 0x6c, 0x96, 0xe1, 0xc5, 0x37, 0xc5, 0x90, 0x93, 0xe0, 0xac, 0xee, 0x89, 0xaa, 0xa1, 0x63, 0x60, 0x69, 0x0b,
- /* (2^176)P */ 0xe5, 0x56, 0x8c, 0x28, 0x97, 0x3e, 0xb0, 0xeb, 0xe8, 0x8b, 0x8c, 0x93, 0x9f, 0x9f, 0x2a, 0x43, 0x71, 0x7f, 0x71, 0x5b, 0x3d, 0xa9, 0xa5, 0xa6, 0x97, 0x9d, 0x8f, 0xe1, 0xc3, 0xb4, 0x5f, 0x1a,
- /* (2^177)P */ 0xce, 0xcd, 0x60, 0x1c, 0xad, 0xe7, 0x94, 0x1c, 0xa0, 0xc4, 0x02, 0xfc, 0x43, 0x2a, 0x20, 0xee, 0x20, 0x6a, 0xc4, 0x67, 0xd8, 0xe4, 0xaf, 0x8d, 0x58, 0x7b, 0xc2, 0x8a, 0x3c, 0x26, 0x10, 0x0a,
- /* (2^178)P */ 0x4a, 0x2a, 0x43, 0xe4, 0xdf, 0xa9, 0xde, 0xd0, 0xc5, 0x77, 0x92, 0xbe, 0x7b, 0xf8, 0x6a, 0x85, 0x1a, 0xc7, 0x12, 0xc2, 0xac, 0x72, 0x84, 0xce, 0x91, 0x1e, 0xbb, 0x9b, 0x6d, 0x1b, 0x15, 0x6f,
- /* (2^179)P */ 0x6a, 0xd5, 0xee, 0x7c, 0x52, 0x6c, 0x77, 0x26, 0xec, 0xfa, 0xf8, 0xfb, 0xb7, 0x1c, 0x21, 0x7d, 0xcc, 0x09, 0x46, 0xfd, 0xa6, 0x66, 0xae, 0x37, 0x42, 0x0c, 0x77, 0xd2, 0x02, 0xb7, 0x81, 0x1f,
- /* (2^180)P */ 0x92, 0x83, 0xc5, 0xea, 0x57, 0xb0, 0xb0, 0x2f, 0x9d, 0x4e, 0x74, 0x29, 0xfe, 0x89, 0xdd, 0xe1, 0xf8, 0xb4, 0xbe, 0x17, 0xeb, 0xf8, 0x64, 0xc9, 0x1e, 0xd4, 0xa2, 0xc9, 0x73, 0x10, 0x57, 0x29,
- /* (2^181)P */ 0x54, 0xe2, 0xc0, 0x81, 0x89, 0xa1, 0x48, 0xa9, 0x30, 0x28, 0xb2, 0x65, 0x9b, 0x36, 0xf6, 0x2d, 0xc6, 0xd3, 0xcf, 0x5f, 0xd7, 0xb2, 0x3e, 0xa3, 0x1f, 0xa0, 0x99, 0x41, 0xec, 0xd6, 0x8c, 0x07,
- /* (2^182)P */ 0x2f, 0x0d, 0x90, 0xad, 0x41, 0x4a, 0x58, 0x4a, 0x52, 0x4c, 0xc7, 0xe2, 0x78, 0x2b, 0x14, 0x32, 0x78, 0xc9, 0x31, 0x84, 0x33, 0xe8, 0xc4, 0x68, 0xc2, 0x9f, 0x68, 0x08, 0x90, 0xea, 0x69, 0x7f,
- /* (2^183)P */ 0x65, 0x82, 0xa3, 0x46, 0x1e, 0xc8, 0xf2, 0x52, 0xfd, 0x32, 0xa8, 0x04, 0x2d, 0x07, 0x78, 0xfd, 0x94, 0x9e, 0x35, 0x25, 0xfa, 0xd5, 0xd7, 0x8c, 0xd2, 0x29, 0xcc, 0x54, 0x74, 0x1b, 0xe7, 0x4d,
- /* (2^184)P */ 0xc9, 0x6a, 0xda, 0x1e, 0xad, 0x60, 0xeb, 0x42, 0x3a, 0x9c, 0xc0, 0xdb, 0xdf, 0x37, 0xad, 0x0a, 0x91, 0xc1, 0x3c, 0xe3, 0x71, 0x4b, 0x00, 0x81, 0x3c, 0x80, 0x22, 0x51, 0x34, 0xbe, 0xe6, 0x44,
- /* (2^185)P */ 0xdb, 0x20, 0x19, 0xba, 0x88, 0x83, 0xfe, 0x03, 0x08, 0xb0, 0x0d, 0x15, 0x32, 0x7c, 0xd5, 0xf5, 0x29, 0x0c, 0xf6, 0x1a, 0x28, 0xc4, 0xc8, 0x49, 0xee, 0x1a, 0x70, 0xde, 0x18, 0xb5, 0xed, 0x21,
- /* (2^186)P */ 0x99, 0xdc, 0x06, 0x8f, 0x41, 0x3e, 0xb6, 0x7f, 0xb8, 0xd7, 0x66, 0xc1, 0x99, 0x0d, 0x46, 0xa4, 0x83, 0x0a, 0x52, 0xce, 0x48, 0x52, 0xdd, 0x24, 0x58, 0x83, 0x92, 0x2b, 0x71, 0xad, 0xc3, 0x5e,
- /* (2^187)P */ 0x0f, 0x93, 0x17, 0xbd, 0x5f, 0x2a, 0x02, 0x15, 0xe3, 0x70, 0x25, 0xd8, 0x77, 0x4a, 0xf6, 0xa4, 0x12, 0x37, 0x78, 0x15, 0x69, 0x8d, 0xbc, 0x12, 0xbb, 0x0a, 0x62, 0xfc, 0xc0, 0x94, 0x81, 0x49,
- /* (2^188)P */ 0x82, 0x6c, 0x68, 0x55, 0xd2, 0xd9, 0xa2, 0x38, 0xf0, 0x21, 0x3e, 0x19, 0xd9, 0x6b, 0x5c, 0x78, 0x84, 0x54, 0x4a, 0xb2, 0x1a, 0xc8, 0xd5, 0xe4, 0x89, 0x09, 0xe2, 0xb2, 0x60, 0x78, 0x30, 0x56,
- /* (2^189)P */ 0xc4, 0x74, 0x4d, 0x8b, 0xf7, 0x55, 0x9d, 0x42, 0x31, 0x01, 0x35, 0x43, 0x46, 0x83, 0xf1, 0x22, 0xff, 0x1f, 0xc7, 0x98, 0x45, 0xc2, 0x60, 0x1e, 0xef, 0x83, 0x99, 0x97, 0x14, 0xf0, 0xf2, 0x59,
- /* (2^190)P */ 0x44, 0x4a, 0x49, 0xeb, 0x56, 0x7d, 0xa4, 0x46, 0x8e, 0xa1, 0x36, 0xd6, 0x54, 0xa8, 0x22, 0x3e, 0x3b, 0x1c, 0x49, 0x74, 0x52, 0xe1, 0x46, 0xb3, 0xe7, 0xcd, 0x90, 0x53, 0x4e, 0xfd, 0xea, 0x2c,
- /* (2^191)P */ 0x75, 0x66, 0x0d, 0xbe, 0x38, 0x85, 0x8a, 0xba, 0x23, 0x8e, 0x81, 0x50, 0xbb, 0x74, 0x90, 0x4b, 0xc3, 0x04, 0xd3, 0x85, 0x90, 0xb8, 0xda, 0xcb, 0xc4, 0x92, 0x61, 0xe5, 0xe0, 0x4f, 0xa2, 0x61,
- /* (2^192)P */ 0xcb, 0x5b, 0x52, 0xdb, 0xe6, 0x15, 0x76, 0xcb, 0xca, 0xe4, 0x67, 0xa5, 0x35, 0x8c, 0x7d, 0xdd, 0x69, 0xdd, 0xfc, 0xca, 0x3a, 0x15, 0xb4, 0xe6, 0x66, 0x97, 0x3c, 0x7f, 0x09, 0x8e, 0x66, 0x2d,
- /* (2^193)P */ 0xf0, 0x5e, 0xe5, 0x5c, 0x26, 0x7e, 0x7e, 0xa5, 0x67, 0xb9, 0xd4, 0x7c, 0x52, 0x4e, 0x9f, 0x5d, 0xe5, 0xd1, 0x2f, 0x49, 0x06, 0x36, 0xc8, 0xfb, 0xae, 0xf7, 0xc3, 0xb7, 0xbe, 0x52, 0x0d, 0x09,
- /* (2^194)P */ 0x7c, 0x4d, 0x7b, 0x1e, 0x5a, 0x51, 0xb9, 0x09, 0xc0, 0x44, 0xda, 0x99, 0x25, 0x6a, 0x26, 0x1f, 0x04, 0x55, 0xc5, 0xe2, 0x48, 0x95, 0xc4, 0xa1, 0xcc, 0x15, 0x6f, 0x12, 0x87, 0x42, 0xf0, 0x7e,
- /* (2^195)P */ 0x15, 0xef, 0x30, 0xbd, 0x9d, 0x65, 0xd1, 0xfe, 0x7b, 0x27, 0xe0, 0xc4, 0xee, 0xb9, 0x4a, 0x8b, 0x91, 0x32, 0xdf, 0xa5, 0x36, 0x62, 0x4d, 0x88, 0x88, 0xf7, 0x5c, 0xbf, 0xa6, 0x6e, 0xd9, 0x1f,
- /* (2^196)P */ 0x9a, 0x0d, 0x19, 0x1f, 0x98, 0x61, 0xa1, 0x42, 0xc1, 0x52, 0x60, 0x7e, 0x50, 0x49, 0xd8, 0x61, 0xd5, 0x2c, 0x5a, 0x28, 0xbf, 0x13, 0xe1, 0x9f, 0xd8, 0x85, 0xad, 0xdb, 0x76, 0xd6, 0x22, 0x7c,
- /* (2^197)P */ 0x7d, 0xd2, 0xfb, 0x2b, 0xed, 0x70, 0xe7, 0x82, 0xa5, 0xf5, 0x96, 0xe9, 0xec, 0xb2, 0x05, 0x4c, 0x50, 0x01, 0x90, 0xb0, 0xc2, 0xa9, 0x40, 0xcd, 0x64, 0xbf, 0xd9, 0x13, 0x92, 0x31, 0x95, 0x58,
- /* (2^198)P */ 0x08, 0x2e, 0xea, 0x3f, 0x70, 0x5d, 0xcc, 0xe7, 0x8c, 0x18, 0xe2, 0x58, 0x12, 0x49, 0x0c, 0xb5, 0xf0, 0x5b, 0x20, 0x48, 0xaa, 0x0b, 0xe3, 0xcc, 0x62, 0x2d, 0xa3, 0xcf, 0x9c, 0x65, 0x7c, 0x53,
- /* (2^199)P */ 0x88, 0xc0, 0xcf, 0x98, 0x3a, 0x62, 0xb6, 0x37, 0xa4, 0xac, 0xd6, 0xa4, 0x1f, 0xed, 0x9b, 0xfe, 0xb0, 0xd1, 0xa8, 0x56, 0x8e, 0x9b, 0xd2, 0x04, 0x75, 0x95, 0x51, 0x0b, 0xc4, 0x71, 0x5f, 0x72,
- /* (2^200)P */ 0xe6, 0x9c, 0x33, 0xd0, 0x9c, 0xf8, 0xc7, 0x28, 0x8b, 0xc1, 0xdd, 0x69, 0x44, 0xb1, 0x67, 0x83, 0x2c, 0x65, 0xa1, 0xa6, 0x83, 0xda, 0x3a, 0x88, 0x17, 0x6c, 0x4d, 0x03, 0x74, 0x19, 0x5f, 0x58,
- /* (2^201)P */ 0x88, 0x91, 0xb1, 0xf1, 0x66, 0xb2, 0xcf, 0x89, 0x17, 0x52, 0xc3, 0xe7, 0x63, 0x48, 0x3b, 0xe6, 0x6a, 0x52, 0xc0, 0xb4, 0xa6, 0x9d, 0x8c, 0xd8, 0x35, 0x46, 0x95, 0xf0, 0x9d, 0x5c, 0x03, 0x3e,
- /* (2^202)P */ 0x9d, 0xde, 0x45, 0xfb, 0x12, 0x54, 0x9d, 0xdd, 0x0d, 0xf4, 0xcf, 0xe4, 0x32, 0x45, 0x68, 0xdd, 0x1c, 0x67, 0x1d, 0x15, 0x9b, 0x99, 0x5c, 0x4b, 0x90, 0xf6, 0xe7, 0x11, 0xc8, 0x2c, 0x8c, 0x2d,
- /* (2^203)P */ 0x40, 0x5d, 0x05, 0x90, 0x1d, 0xbe, 0x54, 0x7f, 0x40, 0xaf, 0x4a, 0x46, 0xdf, 0xc5, 0x64, 0xa4, 0xbe, 0x17, 0xe9, 0xf0, 0x24, 0x96, 0x97, 0x33, 0x30, 0x6b, 0x35, 0x27, 0xc5, 0x8d, 0x01, 0x2c,
- /* (2^204)P */ 0xd4, 0xb3, 0x30, 0xe3, 0x24, 0x50, 0x41, 0xa5, 0xd3, 0x52, 0x16, 0x69, 0x96, 0x3d, 0xff, 0x73, 0xf1, 0x59, 0x9b, 0xef, 0xc4, 0x42, 0xec, 0x94, 0x5a, 0x8e, 0xd0, 0x18, 0x16, 0x20, 0x47, 0x07,
- /* (2^205)P */ 0x53, 0x1c, 0x41, 0xca, 0x8a, 0xa4, 0x6c, 0x4d, 0x19, 0x61, 0xa6, 0xcf, 0x2f, 0x5f, 0x41, 0x66, 0xff, 0x27, 0xe2, 0x51, 0x00, 0xd4, 0x4d, 0x9c, 0xeb, 0xf7, 0x02, 0x9a, 0xc0, 0x0b, 0x81, 0x59,
- /* (2^206)P */ 0x1d, 0x10, 0xdc, 0xb3, 0x71, 0xb1, 0x7e, 0x2a, 0x8e, 0xf6, 0xfe, 0x9f, 0xb9, 0x5a, 0x1c, 0x44, 0xea, 0x59, 0xb3, 0x93, 0x9b, 0x5c, 0x02, 0x32, 0x2f, 0x11, 0x9d, 0x1e, 0xa7, 0xe0, 0x8c, 0x5e,
- /* (2^207)P */ 0xfd, 0x03, 0x95, 0x42, 0x92, 0xcb, 0xcc, 0xbf, 0x55, 0x5d, 0x09, 0x2f, 0x75, 0xba, 0x71, 0xd2, 0x1e, 0x09, 0x2d, 0x97, 0x5e, 0xad, 0x5e, 0x34, 0xba, 0x03, 0x31, 0xa8, 0x11, 0xdf, 0xc8, 0x18,
- /* (2^208)P */ 0x4c, 0x0f, 0xed, 0x9a, 0x9a, 0x94, 0xcd, 0x90, 0x7e, 0xe3, 0x60, 0x66, 0xcb, 0xf4, 0xd1, 0xc5, 0x0b, 0x2e, 0xc5, 0x56, 0x2d, 0xc5, 0xca, 0xb8, 0x0d, 0x8e, 0x80, 0xc5, 0x00, 0xe4, 0x42, 0x6e,
- /* (2^209)P */ 0x23, 0xfd, 0xae, 0xee, 0x66, 0x69, 0xb4, 0xa3, 0xca, 0xcd, 0x9e, 0xe3, 0x0b, 0x1f, 0x4f, 0x0c, 0x1d, 0xa5, 0x83, 0xd6, 0xc9, 0xc8, 0x9d, 0x18, 0x1b, 0x35, 0x09, 0x4c, 0x05, 0x7f, 0xf2, 0x51,
- /* (2^210)P */ 0x82, 0x06, 0x32, 0x2a, 0xcd, 0x7c, 0x48, 0x4c, 0x96, 0x1c, 0xdf, 0xb3, 0x5b, 0xa9, 0x7e, 0x58, 0xe8, 0xb8, 0x5c, 0x55, 0x9e, 0xf7, 0xcc, 0xc8, 0x3d, 0xd7, 0x06, 0xa2, 0x29, 0xc8, 0x7d, 0x54,
- /* (2^211)P */ 0x06, 0x9b, 0xc3, 0x80, 0xcd, 0xa6, 0x22, 0xb8, 0xc6, 0xd4, 0x00, 0x20, 0x73, 0x54, 0x6d, 0xe9, 0x4d, 0x3b, 0x46, 0x91, 0x6f, 0x5b, 0x53, 0x28, 0x1d, 0x6e, 0x48, 0xe2, 0x60, 0x46, 0x8f, 0x22,
- /* (2^212)P */ 0xbf, 0x3a, 0x8d, 0xde, 0x38, 0x95, 0x79, 0x98, 0x6e, 0xca, 0xeb, 0x45, 0x00, 0x33, 0xd8, 0x8c, 0x38, 0xe7, 0x21, 0x82, 0x00, 0x2a, 0x95, 0x79, 0xbb, 0xd2, 0x5c, 0x53, 0xa7, 0xe1, 0x22, 0x43,
- /* (2^213)P */ 0x1c, 0x80, 0xd1, 0x19, 0x18, 0xc1, 0x14, 0xb1, 0xc7, 0x5e, 0x3f, 0x4f, 0xd8, 0xe4, 0x16, 0x20, 0x4c, 0x0f, 0x26, 0x09, 0xf4, 0x2d, 0x0e, 0xdd, 0x66, 0x72, 0x5f, 0xae, 0xc0, 0x62, 0xc3, 0x5e,
- /* (2^214)P */ 0xee, 0xb4, 0xb2, 0xb8, 0x18, 0x2b, 0x46, 0xc0, 0xfb, 0x1a, 0x4d, 0x27, 0x50, 0xd9, 0xc8, 0x7c, 0xd2, 0x02, 0x6b, 0x43, 0x05, 0x71, 0x5f, 0xf2, 0xd3, 0xcc, 0xf9, 0xbf, 0xdc, 0xf8, 0xbb, 0x43,
- /* (2^215)P */ 0xdf, 0xe9, 0x39, 0xa0, 0x67, 0x17, 0xad, 0xb6, 0x83, 0x35, 0x9d, 0xf6, 0xa8, 0x4d, 0x71, 0xb0, 0xf5, 0x31, 0x29, 0xb4, 0x18, 0xfa, 0x55, 0x5e, 0x61, 0x09, 0xc6, 0x33, 0x8f, 0x55, 0xd5, 0x4e,
- /* (2^216)P */ 0xdd, 0xa5, 0x47, 0xc6, 0x01, 0x79, 0xe3, 0x1f, 0x57, 0xd3, 0x81, 0x80, 0x1f, 0xdf, 0x3d, 0x59, 0xa6, 0xd7, 0x3f, 0x81, 0xfd, 0xa4, 0x49, 0x02, 0x61, 0xaf, 0x9c, 0x4e, 0x27, 0xca, 0xac, 0x69,
- /* (2^217)P */ 0xc9, 0x21, 0x07, 0x33, 0xea, 0xa3, 0x7b, 0x04, 0xa0, 0x1e, 0x7e, 0x0e, 0xc2, 0x3f, 0x42, 0x83, 0x60, 0x4a, 0x31, 0x01, 0xaf, 0xc0, 0xf4, 0x1d, 0x27, 0x95, 0x28, 0x89, 0xab, 0x2d, 0xa6, 0x09,
- /* (2^218)P */ 0x00, 0xcb, 0xc6, 0x9c, 0xa4, 0x25, 0xb3, 0xa5, 0xb6, 0x6c, 0xb5, 0x54, 0xc6, 0x5d, 0x4b, 0xe9, 0xa0, 0x94, 0xc9, 0xad, 0x79, 0x87, 0xe2, 0x3b, 0xad, 0x4a, 0x3a, 0xba, 0xf8, 0xe8, 0x96, 0x42,
- /* (2^219)P */ 0xab, 0x1e, 0x45, 0x1e, 0x76, 0x89, 0x86, 0x32, 0x4a, 0x59, 0x59, 0xff, 0x8b, 0x59, 0x4d, 0x2e, 0x4a, 0x08, 0xa7, 0xd7, 0x53, 0x68, 0xb9, 0x49, 0xa8, 0x20, 0x14, 0x60, 0x19, 0xa3, 0x80, 0x49,
- /* (2^220)P */ 0x42, 0x2c, 0x55, 0x2f, 0xe1, 0xb9, 0x65, 0x95, 0x96, 0xfe, 0x00, 0x71, 0xdb, 0x18, 0x53, 0x8a, 0xd7, 0xd0, 0xad, 0x43, 0x4d, 0x0b, 0xc9, 0x05, 0xda, 0x4e, 0x5d, 0x6a, 0xd6, 0x4c, 0x8b, 0x53,
- /* (2^221)P */ 0x9f, 0x03, 0x9f, 0xe8, 0xc3, 0x4f, 0xe9, 0xf4, 0x45, 0x80, 0x61, 0x6f, 0xf2, 0x9a, 0x2c, 0x59, 0x50, 0x95, 0x4b, 0xfd, 0xb5, 0x6e, 0xa3, 0x08, 0x19, 0x14, 0xed, 0xc2, 0xf6, 0xfa, 0xff, 0x25,
- /* (2^222)P */ 0x54, 0xd3, 0x79, 0xcc, 0x59, 0x44, 0x43, 0x34, 0x6b, 0x47, 0xd5, 0xb1, 0xb4, 0xbf, 0xec, 0xee, 0x99, 0x5d, 0x61, 0x61, 0xa0, 0x34, 0xeb, 0xdd, 0x73, 0xb7, 0x64, 0xeb, 0xcc, 0xce, 0x29, 0x51,
- /* (2^223)P */ 0x20, 0x35, 0x99, 0x94, 0x58, 0x21, 0x43, 0xee, 0x3b, 0x0b, 0x4c, 0xf1, 0x7c, 0x9c, 0x2f, 0x77, 0xd5, 0xda, 0xbe, 0x06, 0xe3, 0xfc, 0xe2, 0xd2, 0x97, 0x6a, 0xf0, 0x46, 0xb5, 0x42, 0x5f, 0x71,
- /* (2^224)P */ 0x1a, 0x5f, 0x5b, 0xda, 0xce, 0xcd, 0x4e, 0x43, 0xa9, 0x41, 0x97, 0xa4, 0x15, 0x71, 0xa1, 0x0d, 0x2e, 0xad, 0xed, 0x73, 0x7c, 0xd7, 0x0b, 0x68, 0x41, 0x90, 0xdd, 0x4e, 0x35, 0x02, 0x7c, 0x48,
- /* (2^225)P */ 0xc4, 0xd9, 0x0e, 0xa7, 0xf3, 0xef, 0xef, 0xb8, 0x02, 0xe3, 0x57, 0xe8, 0xa3, 0x2a, 0xa3, 0x56, 0xa0, 0xa5, 0xa2, 0x48, 0xbd, 0x68, 0x3a, 0xdf, 0x44, 0xc4, 0x76, 0x31, 0xb7, 0x50, 0xf6, 0x07,
- /* (2^226)P */ 0xb1, 0xcc, 0xe0, 0x26, 0x16, 0x9b, 0x8b, 0xe3, 0x36, 0xfb, 0x09, 0x8b, 0xc1, 0x53, 0xe0, 0x79, 0x64, 0x49, 0xf9, 0xc9, 0x19, 0x03, 0xd9, 0x56, 0xc4, 0xf5, 0x9f, 0xac, 0xe7, 0x41, 0xa9, 0x1c,
- /* (2^227)P */ 0xbb, 0xa0, 0x2f, 0x16, 0x29, 0xdf, 0xc4, 0x49, 0x05, 0x33, 0xb3, 0x82, 0x32, 0xcf, 0x88, 0x84, 0x7d, 0x43, 0xbb, 0xca, 0x14, 0xda, 0xdf, 0x95, 0x86, 0xad, 0xd5, 0x64, 0x82, 0xf7, 0x91, 0x33,
- /* (2^228)P */ 0x5d, 0x09, 0xb5, 0xe2, 0x6a, 0xe0, 0x9a, 0x72, 0x46, 0xa9, 0x59, 0x32, 0xd7, 0x58, 0x8a, 0xd5, 0xed, 0x21, 0x39, 0xd1, 0x62, 0x42, 0x83, 0xe9, 0x92, 0xb5, 0x4b, 0xa5, 0xfa, 0xda, 0xfe, 0x27,
- /* (2^229)P */ 0xbb, 0x48, 0xad, 0x29, 0xb8, 0xc5, 0x9d, 0xa9, 0x60, 0xe2, 0x9e, 0x49, 0x42, 0x57, 0x02, 0x5f, 0xfd, 0x13, 0x75, 0x5d, 0xcd, 0x8e, 0x2c, 0x80, 0x38, 0xd9, 0x6d, 0x3f, 0xef, 0xb3, 0xce, 0x78,
- /* (2^230)P */ 0x94, 0x5d, 0x13, 0x8a, 0x4f, 0xf4, 0x42, 0xc3, 0xa3, 0xdd, 0x8c, 0x82, 0x44, 0xdb, 0x9e, 0x7b, 0xe7, 0xcf, 0x37, 0x05, 0x1a, 0xd1, 0x36, 0x94, 0xc8, 0xb4, 0x1a, 0xec, 0x64, 0xb1, 0x64, 0x50,
- /* (2^231)P */ 0xfc, 0xb2, 0x7e, 0xd3, 0xcf, 0xec, 0x20, 0x70, 0xfc, 0x25, 0x0d, 0xd9, 0x3e, 0xea, 0x31, 0x1f, 0x34, 0xbb, 0xa1, 0xdf, 0x7b, 0x0d, 0x93, 0x1b, 0x44, 0x30, 0x11, 0x48, 0x7a, 0x46, 0x44, 0x53,
- /* (2^232)P */ 0xfb, 0x6d, 0x5e, 0xf2, 0x70, 0x31, 0x07, 0x70, 0xc8, 0x4c, 0x11, 0x50, 0x1a, 0xdc, 0x85, 0xe3, 0x00, 0x4f, 0xfc, 0xc8, 0x8a, 0x69, 0x48, 0x23, 0xd8, 0x40, 0xdd, 0x84, 0x52, 0xa5, 0x77, 0x2a,
- /* (2^233)P */ 0xe4, 0x6c, 0x8c, 0xc9, 0xe0, 0xaf, 0x06, 0xfe, 0xe4, 0xd6, 0xdf, 0xdd, 0x96, 0xdf, 0x35, 0xc2, 0xd3, 0x1e, 0xbf, 0x33, 0x1e, 0xd0, 0x28, 0x14, 0xaf, 0xbd, 0x00, 0x93, 0xec, 0x68, 0x57, 0x78,
- /* (2^234)P */ 0x3b, 0xb6, 0xde, 0x91, 0x7a, 0xe5, 0x02, 0x97, 0x80, 0x8b, 0xce, 0xe5, 0xbf, 0xb8, 0xbd, 0x61, 0xac, 0x58, 0x1d, 0x3d, 0x6f, 0x42, 0x5b, 0x64, 0xbc, 0x57, 0xa5, 0x27, 0x22, 0xa8, 0x04, 0x48,
- /* (2^235)P */ 0x01, 0x26, 0x4d, 0xb4, 0x8a, 0x04, 0x57, 0x8e, 0x35, 0x69, 0x3a, 0x4b, 0x1a, 0x50, 0xd6, 0x68, 0x93, 0xc2, 0xe1, 0xf9, 0xc3, 0x9e, 0x9c, 0xc3, 0xe2, 0x63, 0xde, 0xd4, 0x57, 0xf2, 0x72, 0x41,
- /* (2^236)P */ 0x01, 0x64, 0x0c, 0x33, 0x50, 0xb4, 0x68, 0xd3, 0x91, 0x23, 0x8f, 0x41, 0x17, 0x30, 0x0d, 0x04, 0x0d, 0xd9, 0xb7, 0x90, 0x60, 0xbb, 0x34, 0x2c, 0x1f, 0xd5, 0xdf, 0x8f, 0x22, 0x49, 0xf6, 0x16,
- /* (2^237)P */ 0xf5, 0x8e, 0x92, 0x2b, 0x8e, 0x81, 0xa6, 0xbe, 0x72, 0x1e, 0xc1, 0xcd, 0x91, 0xcf, 0x8c, 0xe2, 0xcd, 0x36, 0x7a, 0xe7, 0x68, 0xaa, 0x4a, 0x59, 0x0f, 0xfd, 0x7f, 0x6c, 0x80, 0x34, 0x30, 0x31,
- /* (2^238)P */ 0x65, 0xbd, 0x49, 0x22, 0xac, 0x27, 0x9d, 0x8a, 0x12, 0x95, 0x8e, 0x01, 0x64, 0xb4, 0xa3, 0x19, 0xc7, 0x7e, 0xb3, 0x52, 0xf3, 0xcf, 0x6c, 0xc2, 0x21, 0x7b, 0x79, 0x1d, 0x34, 0x68, 0x6f, 0x05,
- /* (2^239)P */ 0x27, 0x23, 0xfd, 0x7e, 0x75, 0xd6, 0x79, 0x5e, 0x15, 0xfe, 0x3a, 0x55, 0xb6, 0xbc, 0xbd, 0xfa, 0x60, 0x5a, 0xaf, 0x6e, 0x2c, 0x22, 0xe7, 0xd3, 0x3b, 0x74, 0xae, 0x4d, 0x6d, 0xc7, 0x46, 0x70,
- /* (2^240)P */ 0x55, 0x4a, 0x8d, 0xb1, 0x72, 0xe8, 0x0b, 0x66, 0x96, 0x14, 0x4e, 0x57, 0x18, 0x25, 0x99, 0x19, 0xbb, 0xdc, 0x2b, 0x30, 0x3a, 0x05, 0x03, 0xc1, 0x8e, 0x8e, 0x21, 0x0b, 0x80, 0xe9, 0xd8, 0x3e,
- /* (2^241)P */ 0x3e, 0xe0, 0x75, 0xfa, 0x39, 0x92, 0x0b, 0x7b, 0x83, 0xc0, 0x33, 0x46, 0x68, 0xfb, 0xe9, 0xef, 0x93, 0x77, 0x1a, 0x39, 0xbe, 0x5f, 0xa3, 0x98, 0x34, 0xfe, 0xd0, 0xe2, 0x0f, 0x51, 0x65, 0x60,
- /* (2^242)P */ 0x0c, 0xad, 0xab, 0x48, 0x85, 0x66, 0xcb, 0x55, 0x27, 0xe5, 0x87, 0xda, 0x48, 0x45, 0x58, 0xb4, 0xdd, 0xc1, 0x07, 0x01, 0xea, 0xec, 0x43, 0x2c, 0x35, 0xde, 0x72, 0x93, 0x80, 0x28, 0x60, 0x52,
- /* (2^243)P */ 0x1f, 0x3b, 0x21, 0xf9, 0x6a, 0xc5, 0x15, 0x34, 0xdb, 0x98, 0x7e, 0x01, 0x4d, 0x1a, 0xee, 0x5b, 0x9b, 0x70, 0xcf, 0xb5, 0x05, 0xb1, 0xf6, 0x13, 0xb6, 0x9a, 0xb2, 0x82, 0x34, 0x0e, 0xf2, 0x5f,
- /* (2^244)P */ 0x90, 0x6c, 0x2e, 0xcc, 0x75, 0x9c, 0xa2, 0x0a, 0x06, 0xe2, 0x70, 0x3a, 0xca, 0x73, 0x7d, 0xfc, 0x15, 0xc5, 0xb5, 0xc4, 0x8f, 0xc3, 0x9f, 0x89, 0x07, 0xc2, 0xff, 0x24, 0xb1, 0x86, 0x03, 0x25,
- /* (2^245)P */ 0x56, 0x2b, 0x3d, 0xae, 0xd5, 0x28, 0xea, 0x54, 0xce, 0x60, 0xde, 0xd6, 0x9d, 0x14, 0x13, 0x99, 0xc1, 0xd6, 0x06, 0x8f, 0xc5, 0x4f, 0x69, 0x16, 0xc7, 0x8f, 0x01, 0xeb, 0x75, 0x39, 0xb2, 0x46,
- /* (2^246)P */ 0xe2, 0xb4, 0xb7, 0xb4, 0x0f, 0x6a, 0x0a, 0x47, 0xde, 0x53, 0x72, 0x8f, 0x5a, 0x47, 0x92, 0x5d, 0xdb, 0x3a, 0xbd, 0x2f, 0xb5, 0xe5, 0xee, 0xab, 0x68, 0x69, 0x80, 0xa0, 0x01, 0x08, 0xa2, 0x7f,
- /* (2^247)P */ 0xd2, 0x14, 0x77, 0x9f, 0xf1, 0xfa, 0xf3, 0x76, 0xc3, 0x60, 0x46, 0x2f, 0xc1, 0x40, 0xe8, 0xb3, 0x4e, 0x74, 0x12, 0xf2, 0x8d, 0xcd, 0xb4, 0x0f, 0xd2, 0x2d, 0x3a, 0x1d, 0x25, 0x5a, 0x06, 0x4b,
- /* (2^248)P */ 0x4a, 0xcd, 0x77, 0x3d, 0x38, 0xde, 0xeb, 0x5c, 0xb1, 0x9c, 0x2c, 0x88, 0xdf, 0x39, 0xdf, 0x6a, 0x59, 0xf7, 0x9a, 0xb0, 0x2e, 0x24, 0xdd, 0xa2, 0x22, 0x64, 0x5f, 0x0e, 0xe5, 0xc0, 0x47, 0x31,
- /* (2^249)P */ 0xdb, 0x50, 0x13, 0x1d, 0x10, 0xa5, 0x4c, 0x16, 0x62, 0xc9, 0x3f, 0xc3, 0x79, 0x34, 0xd1, 0xf8, 0x08, 0xda, 0xe5, 0x13, 0x4d, 0xce, 0x40, 0xe6, 0xba, 0xf8, 0x61, 0x50, 0xc4, 0xe0, 0xde, 0x4b,
- /* (2^250)P */ 0xc9, 0xb1, 0xed, 0xa4, 0xc1, 0x6d, 0xc4, 0xd7, 0x8a, 0xd9, 0x7f, 0x43, 0xb6, 0xd7, 0x14, 0x55, 0x0b, 0xc0, 0xa1, 0xb2, 0x6b, 0x2f, 0x94, 0x58, 0x0e, 0x71, 0x70, 0x1d, 0xab, 0xb2, 0xff, 0x2d,
- /* (2^251)P */ 0x68, 0x6d, 0x8b, 0xc1, 0x2f, 0xcf, 0xdf, 0xcc, 0x67, 0x61, 0x80, 0xb7, 0xa8, 0xcb, 0xeb, 0xa8, 0xe3, 0x37, 0x29, 0x5e, 0xf9, 0x97, 0x06, 0x98, 0x8c, 0x6e, 0x12, 0xd0, 0x1c, 0xba, 0xfb, 0x02,
- /* (2^252)P */ 0x65, 0x45, 0xff, 0xad, 0x60, 0xc3, 0x98, 0xcb, 0x19, 0x15, 0xdb, 0x4b, 0xd2, 0x01, 0x71, 0x44, 0xd5, 0x15, 0xfb, 0x75, 0x74, 0xc8, 0xc4, 0x98, 0x7d, 0xa2, 0x22, 0x6e, 0x6d, 0xc7, 0xf8, 0x05,
- /* (2^253)P */ 0x94, 0xf4, 0xb9, 0xfe, 0xdf, 0xe5, 0x69, 0xab, 0x75, 0x6b, 0x40, 0x18, 0x9d, 0xc7, 0x09, 0xae, 0x1d, 0x2d, 0xa4, 0x94, 0xfb, 0x45, 0x9b, 0x19, 0x84, 0xfa, 0x2a, 0xae, 0xeb, 0x0a, 0x71, 0x79,
- /* (2^254)P */ 0xdf, 0xd2, 0x34, 0xf3, 0xa7, 0xed, 0xad, 0xa6, 0xb4, 0x57, 0x2a, 0xaf, 0x51, 0x9c, 0xde, 0x7b, 0xa8, 0xea, 0xdc, 0x86, 0x4f, 0xc6, 0x8f, 0xa9, 0x7b, 0xd0, 0x0e, 0xc2, 0x35, 0x03, 0xbe, 0x6b,
- /* (2^255)P */ 0x44, 0x43, 0x98, 0x53, 0xbe, 0xdc, 0x7f, 0x66, 0xa8, 0x49, 0x59, 0x00, 0x1c, 0xbc, 0x72, 0x07, 0x8e, 0xd6, 0xbe, 0x4e, 0x9f, 0xa4, 0x07, 0xba, 0xbf, 0x30, 0xdf, 0xba, 0x85, 0xb0, 0xa7, 0x1f,
-}
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve.go b/vendor/github.com/cloudflare/circl/dh/x448/curve.go
deleted file mode 100644
index d59564e4..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/curve.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package x448
-
-import (
- fp "github.com/cloudflare/circl/math/fp448"
-)
-
-// ladderJoye calculates a fixed-point multiplication with the generator point.
-// The algorithm is the right-to-left Joye's ladder as described
-// in "How to precompute a ladder" in SAC'2017.
-func ladderJoye(k *Key) {
- w := [5]fp.Elt{} // [mu,x1,z1,x2,z2] order must be preserved.
- w[1] = fp.Elt{ // x1 = S
- 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- }
- fp.SetOne(&w[2]) // z1 = 1
- w[3] = fp.Elt{ // x2 = G-S
- 0x20, 0x27, 0x9d, 0xc9, 0x7d, 0x19, 0xb1, 0xac,
- 0xf8, 0xba, 0x69, 0x1c, 0xff, 0x33, 0xac, 0x23,
- 0x51, 0x1b, 0xce, 0x3a, 0x64, 0x65, 0xbd, 0xf1,
- 0x23, 0xf8, 0xc1, 0x84, 0x9d, 0x45, 0x54, 0x29,
- 0x67, 0xb9, 0x81, 0x1c, 0x03, 0xd1, 0xcd, 0xda,
- 0x7b, 0xeb, 0xff, 0x1a, 0x88, 0x03, 0xcf, 0x3a,
- 0x42, 0x44, 0x32, 0x01, 0x25, 0xb7, 0xfa, 0xf0,
- }
- fp.SetOne(&w[4]) // z2 = 1
-
- const n = 448
- const h = 2
- swap := uint(1)
- for s := 0; s < n-h; s++ {
- i := (s + h) / 8
- j := (s + h) % 8
- bit := uint((k[i] >> uint(j)) & 1)
- copy(w[0][:], tableGenerator[s*Size:(s+1)*Size])
- diffAdd(&w, swap^bit)
- swap = bit
- }
- for s := 0; s < h; s++ {
- double(&w[1], &w[2])
- }
- toAffine((*[fp.Size]byte)(k), &w[1], &w[2])
-}
-
-// ladderMontgomery calculates a generic scalar point multiplication
-// The algorithm implemented is the left-to-right Montgomery's ladder.
-func ladderMontgomery(k, xP *Key) {
- w := [5]fp.Elt{} // [x1, x2, z2, x3, z3] order must be preserved.
- w[0] = *(*fp.Elt)(xP) // x1 = xP
- fp.SetOne(&w[1]) // x2 = 1
- w[3] = *(*fp.Elt)(xP) // x3 = xP
- fp.SetOne(&w[4]) // z3 = 1
-
- move := uint(0)
- for s := 448 - 1; s >= 0; s-- {
- i := s / 8
- j := s % 8
- bit := uint((k[i] >> uint(j)) & 1)
- ladderStep(&w, move^bit)
- move = bit
- }
- toAffine((*[fp.Size]byte)(k), &w[1], &w[2])
-}
-
-func toAffine(k *[fp.Size]byte, x, z *fp.Elt) {
- fp.Inv(z, z)
- fp.Mul(x, x, z)
- _ = fp.ToBytes(k[:], x)
-}
-
-var lowOrderPoints = [3]fp.Elt{
- { /* (0,_,1) point of order 2 on Curve448 */
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- },
- { /* (1,_,1) a point of order 4 on the twist of Curve448 */
- 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- },
- { /* (-1,_,1) point of order 4 on Curve448 */
- 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- },
-}
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go
deleted file mode 100644
index a0622666..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.go
+++ /dev/null
@@ -1,30 +0,0 @@
-//go:build amd64 && !purego
-// +build amd64,!purego
-
-package x448
-
-import (
- fp "github.com/cloudflare/circl/math/fp448"
- "golang.org/x/sys/cpu"
-)
-
-var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX
-
-var _ = hasBmi2Adx
-
-func double(x, z *fp.Elt) { doubleAmd64(x, z) }
-func diffAdd(w *[5]fp.Elt, b uint) { diffAddAmd64(w, b) }
-func ladderStep(w *[5]fp.Elt, b uint) { ladderStepAmd64(w, b) }
-func mulA24(z, x *fp.Elt) { mulA24Amd64(z, x) }
-
-//go:noescape
-func doubleAmd64(x, z *fp.Elt)
-
-//go:noescape
-func diffAddAmd64(w *[5]fp.Elt, b uint)
-
-//go:noescape
-func ladderStepAmd64(w *[5]fp.Elt, b uint)
-
-//go:noescape
-func mulA24Amd64(z, x *fp.Elt)
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h
deleted file mode 100644
index 8c1ae4d0..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.h
+++ /dev/null
@@ -1,111 +0,0 @@
-#define ladderStepLeg \
- addSub(x2,z2) \
- addSub(x3,z3) \
- integerMulLeg(b0,x2,z3) \
- integerMulLeg(b1,x3,z2) \
- reduceFromDoubleLeg(t0,b0) \
- reduceFromDoubleLeg(t1,b1) \
- addSub(t0,t1) \
- cselect(x2,x3,regMove) \
- cselect(z2,z3,regMove) \
- integerSqrLeg(b0,t0) \
- integerSqrLeg(b1,t1) \
- reduceFromDoubleLeg(x3,b0) \
- reduceFromDoubleLeg(z3,b1) \
- integerMulLeg(b0,x1,z3) \
- reduceFromDoubleLeg(z3,b0) \
- integerSqrLeg(b0,x2) \
- integerSqrLeg(b1,z2) \
- reduceFromDoubleLeg(x2,b0) \
- reduceFromDoubleLeg(z2,b1) \
- subtraction(t0,x2,z2) \
- multiplyA24Leg(t1,t0) \
- additionLeg(t1,t1,z2) \
- integerMulLeg(b0,x2,z2) \
- integerMulLeg(b1,t0,t1) \
- reduceFromDoubleLeg(x2,b0) \
- reduceFromDoubleLeg(z2,b1)
-
-#define ladderStepBmi2Adx \
- addSub(x2,z2) \
- addSub(x3,z3) \
- integerMulAdx(b0,x2,z3) \
- integerMulAdx(b1,x3,z2) \
- reduceFromDoubleAdx(t0,b0) \
- reduceFromDoubleAdx(t1,b1) \
- addSub(t0,t1) \
- cselect(x2,x3,regMove) \
- cselect(z2,z3,regMove) \
- integerSqrAdx(b0,t0) \
- integerSqrAdx(b1,t1) \
- reduceFromDoubleAdx(x3,b0) \
- reduceFromDoubleAdx(z3,b1) \
- integerMulAdx(b0,x1,z3) \
- reduceFromDoubleAdx(z3,b0) \
- integerSqrAdx(b0,x2) \
- integerSqrAdx(b1,z2) \
- reduceFromDoubleAdx(x2,b0) \
- reduceFromDoubleAdx(z2,b1) \
- subtraction(t0,x2,z2) \
- multiplyA24Adx(t1,t0) \
- additionAdx(t1,t1,z2) \
- integerMulAdx(b0,x2,z2) \
- integerMulAdx(b1,t0,t1) \
- reduceFromDoubleAdx(x2,b0) \
- reduceFromDoubleAdx(z2,b1)
-
-#define difAddLeg \
- addSub(x1,z1) \
- integerMulLeg(b0,z1,ui) \
- reduceFromDoubleLeg(z1,b0) \
- addSub(x1,z1) \
- integerSqrLeg(b0,x1) \
- integerSqrLeg(b1,z1) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1) \
- integerMulLeg(b0,x1,z2) \
- integerMulLeg(b1,z1,x2) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1)
-
-#define difAddBmi2Adx \
- addSub(x1,z1) \
- integerMulAdx(b0,z1,ui) \
- reduceFromDoubleAdx(z1,b0) \
- addSub(x1,z1) \
- integerSqrAdx(b0,x1) \
- integerSqrAdx(b1,z1) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1) \
- integerMulAdx(b0,x1,z2) \
- integerMulAdx(b1,z1,x2) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1)
-
-#define doubleLeg \
- addSub(x1,z1) \
- integerSqrLeg(b0,x1) \
- integerSqrLeg(b1,z1) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1) \
- subtraction(t0,x1,z1) \
- multiplyA24Leg(t1,t0) \
- additionLeg(t1,t1,z1) \
- integerMulLeg(b0,x1,z1) \
- integerMulLeg(b1,t0,t1) \
- reduceFromDoubleLeg(x1,b0) \
- reduceFromDoubleLeg(z1,b1)
-
-#define doubleBmi2Adx \
- addSub(x1,z1) \
- integerSqrAdx(b0,x1) \
- integerSqrAdx(b1,z1) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1) \
- subtraction(t0,x1,z1) \
- multiplyA24Adx(t1,t0) \
- additionAdx(t1,t1,z1) \
- integerMulAdx(b0,x1,z1) \
- integerMulAdx(b1,t0,t1) \
- reduceFromDoubleAdx(x1,b0) \
- reduceFromDoubleAdx(z1,b1)
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s b/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s
deleted file mode 100644
index ed33ba3d..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/curve_amd64.s
+++ /dev/null
@@ -1,194 +0,0 @@
-//go:build amd64 && !purego
-// +build amd64,!purego
-
-#include "textflag.h"
-
-// Depends on circl/math/fp448 package
-#include "../../math/fp448/fp_amd64.h"
-#include "curve_amd64.h"
-
-// CTE_A24 is (A+2)/4 from Curve448
-#define CTE_A24 39082
-
-#define Size 56
-
-// multiplyA24Leg multiplies x times CTE_A24 and stores in z
-// Uses: AX, DX, R8-R15, FLAGS
-// Instr: x86_64, cmov, adx
-#define multiplyA24Leg(z,x) \
- MOVQ $CTE_A24, R15; \
- MOVQ 0+x, AX; MULQ R15; MOVQ AX, R8; ;;;;;;;;;;;; MOVQ DX, R9; \
- MOVQ 8+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \
- MOVQ 16+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \
- MOVQ 24+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \
- MOVQ 32+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \
- MOVQ 40+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \
- MOVQ 48+x, AX; MULQ R15; ADDQ AX, R14; ADCQ $0, DX; \
- MOVQ DX, AX; \
- SHLQ $32, AX; \
- ADDQ DX, R8; MOVQ $0, DX; \
- ADCQ $0, R9; \
- ADCQ $0, R10; \
- ADCQ AX, R11; \
- ADCQ $0, R12; \
- ADCQ $0, R13; \
- ADCQ $0, R14; \
- ADCQ $0, DX; \
- MOVQ DX, AX; \
- SHLQ $32, AX; \
- ADDQ DX, R8; \
- ADCQ $0, R9; \
- ADCQ $0, R10; \
- ADCQ AX, R11; \
- ADCQ $0, R12; \
- ADCQ $0, R13; \
- ADCQ $0, R14; \
- MOVQ R8, 0+z; \
- MOVQ R9, 8+z; \
- MOVQ R10, 16+z; \
- MOVQ R11, 24+z; \
- MOVQ R12, 32+z; \
- MOVQ R13, 40+z; \
- MOVQ R14, 48+z;
-
-// multiplyA24Adx multiplies x times CTE_A24 and stores in z
-// Uses: AX, DX, R8-R14, FLAGS
-// Instr: x86_64, bmi2
-#define multiplyA24Adx(z,x) \
- MOVQ $CTE_A24, DX; \
- MULXQ 0+x, R8, R9; \
- MULXQ 8+x, AX, R10; ADDQ AX, R9; \
- MULXQ 16+x, AX, R11; ADCQ AX, R10; \
- MULXQ 24+x, AX, R12; ADCQ AX, R11; \
- MULXQ 32+x, AX, R13; ADCQ AX, R12; \
- MULXQ 40+x, AX, R14; ADCQ AX, R13; \
- MULXQ 48+x, AX, DX; ADCQ AX, R14; \
- ;;;;;;;;;;;;;;;;;;;; ADCQ $0, DX; \
- MOVQ DX, AX; \
- SHLQ $32, AX; \
- ADDQ DX, R8; MOVQ $0, DX; \
- ADCQ $0, R9; \
- ADCQ $0, R10; \
- ADCQ AX, R11; \
- ADCQ $0, R12; \
- ADCQ $0, R13; \
- ADCQ $0, R14; \
- ADCQ $0, DX; \
- MOVQ DX, AX; \
- SHLQ $32, AX; \
- ADDQ DX, R8; \
- ADCQ $0, R9; \
- ADCQ $0, R10; \
- ADCQ AX, R11; \
- ADCQ $0, R12; \
- ADCQ $0, R13; \
- ADCQ $0, R14; \
- MOVQ R8, 0+z; \
- MOVQ R9, 8+z; \
- MOVQ R10, 16+z; \
- MOVQ R11, 24+z; \
- MOVQ R12, 32+z; \
- MOVQ R13, 40+z; \
- MOVQ R14, 48+z;
-
-#define mulA24Legacy \
- multiplyA24Leg(0(DI),0(SI))
-#define mulA24Bmi2Adx \
- multiplyA24Adx(0(DI),0(SI))
-
-// func mulA24Amd64(z, x *fp448.Elt)
-TEXT ·mulA24Amd64(SB),NOSPLIT,$0-16
- MOVQ z+0(FP), DI
- MOVQ x+8(FP), SI
- CHECK_BMI2ADX(LMA24, mulA24Legacy, mulA24Bmi2Adx)
-
-// func ladderStepAmd64(w *[5]fp448.Elt, b uint)
-// ladderStepAmd64 calculates a point addition and doubling as follows:
-// (x2,z2) = 2*(x2,z2) and (x3,z3) = (x2,z2)+(x3,z3) using as a difference (x1,-).
-// w = {x1,x2,z2,x3,z4} are five fp255.Elt of 56 bytes.
-// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and
-// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes.
-TEXT ·ladderStepAmd64(SB),NOSPLIT,$336-16
- // Parameters
- #define regWork DI
- #define regMove SI
- #define x1 0*Size(regWork)
- #define x2 1*Size(regWork)
- #define z2 2*Size(regWork)
- #define x3 3*Size(regWork)
- #define z3 4*Size(regWork)
- // Local variables
- #define t0 0*Size(SP)
- #define t1 1*Size(SP)
- #define b0 2*Size(SP)
- #define b1 4*Size(SP)
- MOVQ w+0(FP), regWork
- MOVQ b+8(FP), regMove
- CHECK_BMI2ADX(LLADSTEP, ladderStepLeg, ladderStepBmi2Adx)
- #undef regWork
- #undef regMove
- #undef x1
- #undef x2
- #undef z2
- #undef x3
- #undef z3
- #undef t0
- #undef t1
- #undef b0
- #undef b1
-
-// func diffAddAmd64(work *[5]fp.Elt, swap uint)
-// diffAddAmd64 calculates a differential point addition using a precomputed point.
-// (x1,z1) = (x1,z1)+(mu) using a difference point (x2,z2)
-// work = {mu,x1,z1,x2,z2} are five fp448.Elt of 56 bytes, and
-// stack = (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes.
-// This is Equation 7 at https://eprint.iacr.org/2017/264.
-TEXT ·diffAddAmd64(SB),NOSPLIT,$224-16
- // Parameters
- #define regWork DI
- #define regSwap SI
- #define ui 0*Size(regWork)
- #define x1 1*Size(regWork)
- #define z1 2*Size(regWork)
- #define x2 3*Size(regWork)
- #define z2 4*Size(regWork)
- // Local variables
- #define b0 0*Size(SP)
- #define b1 2*Size(SP)
- MOVQ w+0(FP), regWork
- MOVQ b+8(FP), regSwap
- cswap(x1,x2,regSwap)
- cswap(z1,z2,regSwap)
- CHECK_BMI2ADX(LDIFADD, difAddLeg, difAddBmi2Adx)
- #undef regWork
- #undef regSwap
- #undef ui
- #undef x1
- #undef z1
- #undef x2
- #undef z2
- #undef b0
- #undef b1
-
-// func doubleAmd64(x, z *fp448.Elt)
-// doubleAmd64 calculates a point doubling (x1,z1) = 2*(x1,z1).
-// stack = (t0,t1) are two fp.Elt of fp.Size bytes, and
-// (b0,b1) are two-double precision fp.Elt of 2*fp.Size bytes.
-TEXT ·doubleAmd64(SB),NOSPLIT,$336-16
- // Parameters
- #define x1 0(DI)
- #define z1 0(SI)
- // Local variables
- #define t0 0*Size(SP)
- #define t1 1*Size(SP)
- #define b0 2*Size(SP)
- #define b1 4*Size(SP)
- MOVQ x+0(FP), DI
- MOVQ z+8(FP), SI
- CHECK_BMI2ADX(LDOUB,doubleLeg,doubleBmi2Adx)
- #undef x1
- #undef z1
- #undef t0
- #undef t1
- #undef b0
- #undef b1
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go b/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go
deleted file mode 100644
index b0b65ccf..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/curve_generic.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package x448
-
-import (
- "encoding/binary"
- "math/bits"
-
- "github.com/cloudflare/circl/math/fp448"
-)
-
-func doubleGeneric(x, z *fp448.Elt) {
- t0, t1 := &fp448.Elt{}, &fp448.Elt{}
- fp448.AddSub(x, z)
- fp448.Sqr(x, x)
- fp448.Sqr(z, z)
- fp448.Sub(t0, x, z)
- mulA24Generic(t1, t0)
- fp448.Add(t1, t1, z)
- fp448.Mul(x, x, z)
- fp448.Mul(z, t0, t1)
-}
-
-func diffAddGeneric(w *[5]fp448.Elt, b uint) {
- mu, x1, z1, x2, z2 := &w[0], &w[1], &w[2], &w[3], &w[4]
- fp448.Cswap(x1, x2, b)
- fp448.Cswap(z1, z2, b)
- fp448.AddSub(x1, z1)
- fp448.Mul(z1, z1, mu)
- fp448.AddSub(x1, z1)
- fp448.Sqr(x1, x1)
- fp448.Sqr(z1, z1)
- fp448.Mul(x1, x1, z2)
- fp448.Mul(z1, z1, x2)
-}
-
-func ladderStepGeneric(w *[5]fp448.Elt, b uint) {
- x1, x2, z2, x3, z3 := &w[0], &w[1], &w[2], &w[3], &w[4]
- t0 := &fp448.Elt{}
- t1 := &fp448.Elt{}
- fp448.AddSub(x2, z2)
- fp448.AddSub(x3, z3)
- fp448.Mul(t0, x2, z3)
- fp448.Mul(t1, x3, z2)
- fp448.AddSub(t0, t1)
- fp448.Cmov(x2, x3, b)
- fp448.Cmov(z2, z3, b)
- fp448.Sqr(x3, t0)
- fp448.Sqr(z3, t1)
- fp448.Mul(z3, x1, z3)
- fp448.Sqr(x2, x2)
- fp448.Sqr(z2, z2)
- fp448.Sub(t0, x2, z2)
- mulA24Generic(t1, t0)
- fp448.Add(t1, t1, z2)
- fp448.Mul(x2, x2, z2)
- fp448.Mul(z2, t0, t1)
-}
-
-func mulA24Generic(z, x *fp448.Elt) {
- const A24 = 39082
- const n = 8
- var xx [7]uint64
- for i := range xx {
- xx[i] = binary.LittleEndian.Uint64(x[i*n : (i+1)*n])
- }
- h0, l0 := bits.Mul64(xx[0], A24)
- h1, l1 := bits.Mul64(xx[1], A24)
- h2, l2 := bits.Mul64(xx[2], A24)
- h3, l3 := bits.Mul64(xx[3], A24)
- h4, l4 := bits.Mul64(xx[4], A24)
- h5, l5 := bits.Mul64(xx[5], A24)
- h6, l6 := bits.Mul64(xx[6], A24)
-
- l1, c0 := bits.Add64(h0, l1, 0)
- l2, c1 := bits.Add64(h1, l2, c0)
- l3, c2 := bits.Add64(h2, l3, c1)
- l4, c3 := bits.Add64(h3, l4, c2)
- l5, c4 := bits.Add64(h4, l5, c3)
- l6, c5 := bits.Add64(h5, l6, c4)
- l7, _ := bits.Add64(h6, 0, c5)
-
- l0, c0 = bits.Add64(l0, l7, 0)
- l1, c1 = bits.Add64(l1, 0, c0)
- l2, c2 = bits.Add64(l2, 0, c1)
- l3, c3 = bits.Add64(l3, l7<<32, c2)
- l4, c4 = bits.Add64(l4, 0, c3)
- l5, c5 = bits.Add64(l5, 0, c4)
- l6, l7 = bits.Add64(l6, 0, c5)
-
- xx[0], c0 = bits.Add64(l0, l7, 0)
- xx[1], c1 = bits.Add64(l1, 0, c0)
- xx[2], c2 = bits.Add64(l2, 0, c1)
- xx[3], c3 = bits.Add64(l3, l7<<32, c2)
- xx[4], c4 = bits.Add64(l4, 0, c3)
- xx[5], c5 = bits.Add64(l5, 0, c4)
- xx[6], _ = bits.Add64(l6, 0, c5)
-
- for i := range xx {
- binary.LittleEndian.PutUint64(z[i*n:(i+1)*n], xx[i])
- }
-}
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go b/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go
deleted file mode 100644
index 3755b7c8..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/curve_noasm.go
+++ /dev/null
@@ -1,11 +0,0 @@
-//go:build !amd64 || purego
-// +build !amd64 purego
-
-package x448
-
-import fp "github.com/cloudflare/circl/math/fp448"
-
-func double(x, z *fp.Elt) { doubleGeneric(x, z) }
-func diffAdd(w *[5]fp.Elt, b uint) { diffAddGeneric(w, b) }
-func ladderStep(w *[5]fp.Elt, b uint) { ladderStepGeneric(w, b) }
-func mulA24(z, x *fp.Elt) { mulA24Generic(z, x) }
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/doc.go b/vendor/github.com/cloudflare/circl/dh/x448/doc.go
deleted file mode 100644
index c02904fe..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/doc.go
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
-Package x448 provides Diffie-Hellman functions as specified in RFC-7748.
-
-Validation of public keys.
-
-The Diffie-Hellman function, as described in RFC-7748 [1], works for any
-public key. However, if a different protocol requires contributory
-behaviour [2,3], then the public keys must be validated against low-order
-points [3,4]. To do that, the Shared function performs this validation
-internally and returns false when the public key is invalid (i.e., it
-is a low-order point).
-
-References:
- - [1] RFC7748 by Langley, Hamburg, Turner (https://rfc-editor.org/rfc/rfc7748.txt)
- - [2] Curve25519 by Bernstein (https://cr.yp.to/ecdh.html)
- - [3] Bernstein (https://cr.yp.to/ecdh.html#validate)
- - [4] Cremers&Jackson (https://eprint.iacr.org/2019/526)
-*/
-package x448
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/key.go b/vendor/github.com/cloudflare/circl/dh/x448/key.go
deleted file mode 100644
index 2fdde511..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/key.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package x448
-
-import (
- "crypto/subtle"
-
- fp "github.com/cloudflare/circl/math/fp448"
-)
-
-// Size is the length in bytes of a X448 key.
-const Size = 56
-
-// Key represents a X448 key.
-type Key [Size]byte
-
-func (k *Key) clamp(in *Key) *Key {
- *k = *in
- k[0] &= 252
- k[55] |= 128
- return k
-}
-
-// isValidPubKey verifies if the public key is not a low-order point.
-func (k *Key) isValidPubKey() bool {
- fp.Modp((*fp.Elt)(k))
- var isLowOrder int
- for _, P := range lowOrderPoints {
- isLowOrder |= subtle.ConstantTimeCompare(P[:], k[:])
- }
- return isLowOrder == 0
-}
-
-// KeyGen obtains a public key given a secret key.
-func KeyGen(public, secret *Key) {
- ladderJoye(public.clamp(secret))
-}
-
-// Shared calculates Alice's shared key from Alice's secret key and Bob's
-// public key returning true on success. A failure case happens when the public
-// key is a low-order point, thus the shared key is all-zeros and the function
-// returns false.
-func Shared(shared, secret, public *Key) bool {
- validPk := *public
- ok := validPk.isValidPubKey()
- ladderMontgomery(shared.clamp(secret), &validPk)
- return ok
-}
diff --git a/vendor/github.com/cloudflare/circl/dh/x448/table.go b/vendor/github.com/cloudflare/circl/dh/x448/table.go
deleted file mode 100644
index eef53c30..00000000
--- a/vendor/github.com/cloudflare/circl/dh/x448/table.go
+++ /dev/null
@@ -1,460 +0,0 @@
-package x448
-
-import fp "github.com/cloudflare/circl/math/fp448"
-
-// tableGenerator contains the set of points:
-//
-// t[i] = (xi+1)/(xi-1),
-//
-// where (xi,yi) = 2^iG and G is the generator point
-// Size = (448)*(448/8) = 25088 bytes.
-var tableGenerator = [448 * fp.Size]byte{
- /* (2^ 0)P */ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f,
- /* (2^ 1)P */ 0x37, 0xfa, 0xaa, 0x0d, 0x86, 0xa6, 0x24, 0xe9, 0x6c, 0x95, 0x08, 0x34, 0xba, 0x1a, 0x81, 0x3a, 0xae, 0x01, 0xa5, 0xa7, 0x05, 0x85, 0x96, 0x00, 0x06, 0x5a, 0xd7, 0xff, 0xee, 0x8e, 0x8f, 0x94, 0xd2, 0xdc, 0xd7, 0xfc, 0xe7, 0xe5, 0x99, 0x1d, 0x05, 0x46, 0x43, 0xe8, 0xbc, 0x12, 0xb7, 0xeb, 0x30, 0x5e, 0x7a, 0x85, 0x68, 0xed, 0x9d, 0x28,
- /* (2^ 2)P */ 0xf1, 0x7d, 0x08, 0x2b, 0x32, 0x4a, 0x62, 0x80, 0x36, 0xe7, 0xa4, 0x76, 0x5a, 0x2a, 0x1e, 0xf7, 0x9e, 0x3c, 0x40, 0x46, 0x9a, 0x1b, 0x61, 0xc1, 0xbf, 0x1a, 0x1b, 0xae, 0x91, 0x80, 0xa3, 0x76, 0x6c, 0xd4, 0x8f, 0xa4, 0xee, 0x26, 0x39, 0x23, 0xa4, 0x80, 0xf4, 0x66, 0x92, 0xe4, 0xe1, 0x18, 0x76, 0xc5, 0xe2, 0x19, 0x87, 0xd5, 0xc3, 0xe8,
- /* (2^ 3)P */ 0xfb, 0xc9, 0xf0, 0x07, 0xf2, 0x93, 0xd8, 0x50, 0x36, 0xed, 0xfb, 0xbd, 0xb2, 0xd3, 0xfc, 0xdf, 0xd5, 0x2a, 0x6e, 0x26, 0x09, 0xce, 0xd4, 0x07, 0x64, 0x9f, 0x40, 0x74, 0xad, 0x98, 0x2f, 0x1c, 0xb6, 0xdc, 0x2d, 0x42, 0xff, 0xbf, 0x97, 0xd8, 0xdb, 0xef, 0x99, 0xca, 0x73, 0x99, 0x1a, 0x04, 0x3b, 0x56, 0x2c, 0x1f, 0x87, 0x9d, 0x9f, 0x03,
- /* (2^ 4)P */ 0x4c, 0x35, 0x97, 0xf7, 0x81, 0x2c, 0x84, 0xa6, 0xe0, 0xcb, 0xce, 0x37, 0x4c, 0x21, 0x1c, 0x67, 0xfa, 0xab, 0x18, 0x4d, 0xef, 0xd0, 0xf0, 0x44, 0xa9, 0xfb, 0xc0, 0x8e, 0xda, 0x57, 0xa1, 0xd8, 0xeb, 0x87, 0xf4, 0x17, 0xea, 0x66, 0x0f, 0x16, 0xea, 0xcd, 0x5f, 0x3e, 0x88, 0xea, 0x09, 0x68, 0x40, 0xdf, 0x43, 0xcc, 0x54, 0x61, 0x58, 0xaa,
- /* (2^ 5)P */ 0x8d, 0xe7, 0x59, 0xd7, 0x5e, 0x63, 0x37, 0xa7, 0x3f, 0xd1, 0x49, 0x85, 0x01, 0xdd, 0x5e, 0xb3, 0xe6, 0x29, 0xcb, 0x25, 0x93, 0xdd, 0x08, 0x96, 0x83, 0x52, 0x76, 0x85, 0xf5, 0x5d, 0x02, 0xbf, 0xe9, 0x6d, 0x15, 0x27, 0xc1, 0x09, 0xd1, 0x14, 0x4d, 0x6e, 0xe8, 0xaf, 0x59, 0x58, 0x34, 0x9d, 0x2a, 0x99, 0x85, 0x26, 0xbe, 0x4b, 0x1e, 0xb9,
- /* (2^ 6)P */ 0x8d, 0xce, 0x94, 0xe2, 0x18, 0x56, 0x0d, 0x82, 0x8e, 0xdf, 0x85, 0x01, 0x8f, 0x93, 0x3c, 0xc6, 0xbd, 0x61, 0xfb, 0xf4, 0x22, 0xc5, 0x16, 0x87, 0xd1, 0xb1, 0x9e, 0x09, 0xc5, 0x83, 0x2e, 0x4a, 0x07, 0x88, 0xee, 0xe0, 0x29, 0x8d, 0x2e, 0x1f, 0x88, 0xad, 0xfd, 0x18, 0x93, 0xb7, 0xed, 0x42, 0x86, 0x78, 0xf0, 0xb8, 0x70, 0xbe, 0x01, 0x67,
- /* (2^ 7)P */ 0xdf, 0x62, 0x2d, 0x94, 0xc7, 0x35, 0x23, 0xda, 0x27, 0xbb, 0x2b, 0xdb, 0x30, 0x80, 0x68, 0x16, 0xa3, 0xae, 0xd7, 0xd2, 0xa7, 0x7c, 0xbf, 0x6a, 0x1d, 0x83, 0xde, 0x96, 0x0a, 0x43, 0xb6, 0x30, 0x37, 0xd6, 0xee, 0x63, 0x59, 0x9a, 0xbf, 0xa3, 0x30, 0x6c, 0xaf, 0x0c, 0xee, 0x3d, 0xcb, 0x35, 0x4b, 0x55, 0x5f, 0x84, 0x85, 0xcb, 0x4f, 0x1e,
- /* (2^ 8)P */ 0x9d, 0x04, 0x68, 0x89, 0xa4, 0xa9, 0x0d, 0x87, 0xc1, 0x70, 0xf1, 0xeb, 0xfb, 0x47, 0x0a, 0xf0, 0xde, 0x67, 0xb7, 0x94, 0xcd, 0x36, 0x43, 0xa5, 0x49, 0x43, 0x67, 0xc3, 0xee, 0x3c, 0x6b, 0xec, 0xd0, 0x1a, 0xf4, 0xad, 0xef, 0x06, 0x4a, 0xe8, 0x46, 0x24, 0xd7, 0x93, 0xbf, 0xf0, 0xe3, 0x81, 0x61, 0xec, 0xea, 0x64, 0xfe, 0x67, 0xeb, 0xc7,
- /* (2^ 9)P */ 0x95, 0x45, 0x79, 0xcf, 0x2c, 0xfd, 0x9b, 0xfe, 0x84, 0x46, 0x4b, 0x8f, 0xa1, 0xcf, 0xc3, 0x04, 0x94, 0x78, 0xdb, 0xc9, 0xa6, 0x01, 0x75, 0xa4, 0xb4, 0x93, 0x72, 0x43, 0xa7, 0x7d, 0xda, 0x31, 0x38, 0x54, 0xab, 0x4e, 0x3f, 0x89, 0xa6, 0xab, 0x57, 0xc0, 0x16, 0x65, 0xdb, 0x92, 0x96, 0xe4, 0xc8, 0xae, 0xe7, 0x4c, 0x7a, 0xeb, 0xbb, 0x5a,
- /* (2^ 10)P */ 0xbe, 0xfe, 0x86, 0xc3, 0x97, 0xe0, 0x6a, 0x18, 0x20, 0x21, 0xca, 0x22, 0x55, 0xa1, 0xeb, 0xf5, 0x74, 0xe5, 0xc9, 0x59, 0xa7, 0x92, 0x65, 0x15, 0x08, 0x71, 0xd1, 0x09, 0x7e, 0x83, 0xfc, 0xbc, 0x5a, 0x93, 0x38, 0x0d, 0x43, 0x42, 0xfd, 0x76, 0x30, 0xe8, 0x63, 0x60, 0x09, 0x8d, 0x6c, 0xd3, 0xf8, 0x56, 0x3d, 0x68, 0x47, 0xab, 0xa0, 0x1d,
- /* (2^ 11)P */ 0x38, 0x50, 0x1c, 0xb1, 0xac, 0x88, 0x8f, 0x38, 0xe3, 0x69, 0xe6, 0xfc, 0x4f, 0x8f, 0xe1, 0x9b, 0xb1, 0x1a, 0x09, 0x39, 0x19, 0xdf, 0xcd, 0x98, 0x7b, 0x64, 0x42, 0xf6, 0x11, 0xea, 0xc7, 0xe8, 0x92, 0x65, 0x00, 0x2c, 0x75, 0xb5, 0x94, 0x1e, 0x5b, 0xa6, 0x66, 0x81, 0x77, 0xf3, 0x39, 0x94, 0xac, 0xbd, 0xe4, 0x2a, 0x66, 0x84, 0x9c, 0x60,
- /* (2^ 12)P */ 0xb5, 0xb6, 0xd9, 0x03, 0x67, 0xa4, 0xa8, 0x0a, 0x4a, 0x2b, 0x9d, 0xfa, 0x13, 0xe1, 0x99, 0x25, 0x4a, 0x5c, 0x67, 0xb9, 0xb2, 0xb7, 0xdd, 0x1e, 0xaf, 0xeb, 0x63, 0x41, 0xb6, 0xb9, 0xa0, 0x87, 0x0a, 0xe0, 0x06, 0x07, 0xaa, 0x97, 0xf8, 0xf9, 0x38, 0x4f, 0xdf, 0x0c, 0x40, 0x7c, 0xc3, 0x98, 0xa9, 0x74, 0xf1, 0x5d, 0xda, 0xd1, 0xc0, 0x0a,
- /* (2^ 13)P */ 0xf2, 0x0a, 0xab, 0xab, 0x94, 0x50, 0xf0, 0xa3, 0x6f, 0xc6, 0x66, 0xba, 0xa6, 0xdc, 0x44, 0xdd, 0xd6, 0x08, 0xf4, 0xd3, 0xed, 0xb1, 0x40, 0x93, 0xee, 0xf6, 0xb8, 0x8e, 0xb4, 0x7c, 0xb9, 0x82, 0xc9, 0x9d, 0x45, 0x3b, 0x8e, 0x10, 0xcb, 0x70, 0x1e, 0xba, 0x3c, 0x62, 0x50, 0xda, 0xa9, 0x93, 0xb5, 0xd7, 0xd0, 0x6f, 0x29, 0x52, 0x95, 0xae,
- /* (2^ 14)P */ 0x14, 0x68, 0x69, 0x23, 0xa8, 0x44, 0x87, 0x9e, 0x22, 0x91, 0xe8, 0x92, 0xdf, 0xf7, 0xae, 0xba, 0x1c, 0x96, 0xe1, 0xc3, 0x94, 0xed, 0x6c, 0x95, 0xae, 0x96, 0xa7, 0x15, 0x9f, 0xf1, 0x17, 0x11, 0x92, 0x42, 0xd5, 0xcd, 0x18, 0xe7, 0xa9, 0xb5, 0x2f, 0xcd, 0xde, 0x6c, 0xc9, 0x7d, 0xfc, 0x7e, 0xbd, 0x7f, 0x10, 0x3d, 0x01, 0x00, 0x8d, 0x95,
- /* (2^ 15)P */ 0x3b, 0x76, 0x72, 0xae, 0xaf, 0x84, 0xf2, 0xf7, 0xd1, 0x6d, 0x13, 0x9c, 0x47, 0xe1, 0xb7, 0xa3, 0x19, 0x16, 0xee, 0x75, 0x45, 0xf6, 0x1a, 0x7b, 0x78, 0x49, 0x79, 0x05, 0x86, 0xf0, 0x7f, 0x9f, 0xfc, 0xc4, 0xbd, 0x86, 0xf3, 0x41, 0xa7, 0xfe, 0x01, 0xd5, 0x67, 0x16, 0x10, 0x5b, 0xa5, 0x16, 0xf3, 0x7f, 0x60, 0xce, 0xd2, 0x0c, 0x8e, 0x4b,
- /* (2^ 16)P */ 0x4a, 0x07, 0x99, 0x4a, 0x0f, 0x74, 0x91, 0x14, 0x68, 0xb9, 0x48, 0xb7, 0x44, 0x77, 0x9b, 0x4a, 0xe0, 0x68, 0x0e, 0x43, 0x4d, 0x98, 0x98, 0xbf, 0xa8, 0x3a, 0xb7, 0x6d, 0x2a, 0x9a, 0x77, 0x5f, 0x62, 0xf5, 0x6b, 0x4a, 0xb7, 0x7d, 0xe5, 0x09, 0x6b, 0xc0, 0x8b, 0x9c, 0x88, 0x37, 0x33, 0xf2, 0x41, 0xac, 0x22, 0x1f, 0xcf, 0x3b, 0x82, 0x34,
- /* (2^ 17)P */ 0x00, 0xc3, 0x78, 0x42, 0x32, 0x2e, 0xdc, 0xda, 0xb1, 0x96, 0x21, 0xa4, 0xe4, 0xbb, 0xe9, 0x9d, 0xbb, 0x0f, 0x93, 0xed, 0x26, 0x3d, 0xb5, 0xdb, 0x94, 0x31, 0x37, 0x07, 0xa2, 0xb2, 0xd5, 0x99, 0x0d, 0x93, 0xe1, 0xce, 0x3f, 0x0b, 0x96, 0x82, 0x47, 0xfe, 0x60, 0x6f, 0x8f, 0x61, 0x88, 0xd7, 0x05, 0x95, 0x0b, 0x46, 0x06, 0xb7, 0x32, 0x06,
- /* (2^ 18)P */ 0x44, 0xf5, 0x34, 0xdf, 0x2f, 0x9c, 0x5d, 0x9f, 0x53, 0x5c, 0x42, 0x8f, 0xc9, 0xdc, 0xd8, 0x40, 0xa2, 0xe7, 0x6a, 0x4a, 0x05, 0xf7, 0x86, 0x77, 0x2b, 0xae, 0x37, 0xed, 0x48, 0xfb, 0xf7, 0x62, 0x7c, 0x17, 0x59, 0x92, 0x41, 0x61, 0x93, 0x38, 0x30, 0xd1, 0xef, 0x54, 0x54, 0x03, 0x17, 0x57, 0x91, 0x15, 0x11, 0x33, 0xb5, 0xfa, 0xfb, 0x17,
- /* (2^ 19)P */ 0x29, 0xbb, 0xd4, 0xb4, 0x9c, 0xf1, 0x72, 0x94, 0xce, 0x6a, 0x29, 0xa8, 0x89, 0x18, 0x19, 0xf7, 0xb7, 0xcc, 0xee, 0x9a, 0x02, 0xe3, 0xc0, 0xb1, 0xe0, 0xee, 0x83, 0x78, 0xb4, 0x9e, 0x07, 0x87, 0xdf, 0xb0, 0x82, 0x26, 0x4e, 0xa4, 0x0c, 0x33, 0xaf, 0x40, 0x59, 0xb6, 0xdd, 0x52, 0x45, 0xf0, 0xb4, 0xf6, 0xe8, 0x4e, 0x4e, 0x79, 0x1a, 0x5d,
- /* (2^ 20)P */ 0x27, 0x33, 0x4d, 0x4c, 0x6b, 0x4f, 0x75, 0xb1, 0xbc, 0x1f, 0xab, 0x5b, 0x2b, 0xf0, 0x1c, 0x57, 0x86, 0xdd, 0xfd, 0x60, 0xb0, 0x8c, 0xe7, 0x9a, 0xe5, 0x5c, 0xeb, 0x11, 0x3a, 0xda, 0x22, 0x25, 0x99, 0x06, 0x8d, 0xf4, 0xaf, 0x29, 0x7a, 0xc9, 0xe5, 0xd2, 0x16, 0x9e, 0xd4, 0x63, 0x1d, 0x64, 0xa6, 0x47, 0x96, 0x37, 0x6f, 0x93, 0x2c, 0xcc,
- /* (2^ 21)P */ 0xc1, 0x94, 0x74, 0x86, 0x75, 0xf2, 0x91, 0x58, 0x23, 0x85, 0x63, 0x76, 0x54, 0xc7, 0xb4, 0x8c, 0xbc, 0x4e, 0xc4, 0xa7, 0xba, 0xa0, 0x55, 0x26, 0x71, 0xd5, 0x33, 0x72, 0xc9, 0xad, 0x1e, 0xf9, 0x5d, 0x78, 0x70, 0x93, 0x4e, 0x85, 0xfc, 0x39, 0x06, 0x73, 0x76, 0xff, 0xe8, 0x64, 0x69, 0x42, 0x45, 0xb2, 0x69, 0xb5, 0x32, 0xe7, 0x2c, 0xde,
- /* (2^ 22)P */ 0xde, 0x16, 0xd8, 0x33, 0x49, 0x32, 0xe9, 0x0e, 0x3a, 0x60, 0xee, 0x2e, 0x24, 0x75, 0xe3, 0x9c, 0x92, 0x07, 0xdb, 0xad, 0x92, 0xf5, 0x11, 0xdf, 0xdb, 0xb0, 0x17, 0x5c, 0xd6, 0x1a, 0x70, 0x00, 0xb7, 0xe2, 0x18, 0xec, 0xdc, 0xc2, 0x02, 0x93, 0xb3, 0xc8, 0x3f, 0x4f, 0x1b, 0x96, 0xe6, 0x33, 0x8c, 0xfb, 0xcc, 0xa5, 0x4e, 0xe8, 0xe7, 0x11,
- /* (2^ 23)P */ 0x05, 0x7a, 0x74, 0x52, 0xf8, 0xdf, 0x0d, 0x7c, 0x6a, 0x1a, 0x4e, 0x9a, 0x02, 0x1d, 0xae, 0x77, 0xf8, 0x8e, 0xf9, 0xa2, 0x38, 0x54, 0x50, 0xb2, 0x2c, 0x08, 0x9d, 0x9b, 0x9f, 0xfb, 0x2b, 0x06, 0xde, 0x9d, 0xc2, 0x03, 0x0b, 0x22, 0x2b, 0x10, 0x5b, 0x3a, 0x73, 0x29, 0x8e, 0x3e, 0x37, 0x08, 0x2c, 0x3b, 0xf8, 0x80, 0xc1, 0x66, 0x1e, 0x98,
- /* (2^ 24)P */ 0xd8, 0xd6, 0x3e, 0xcd, 0x63, 0x8c, 0x2b, 0x41, 0x81, 0xc0, 0x0c, 0x06, 0x87, 0xd6, 0xe7, 0x92, 0xfe, 0xf1, 0x0c, 0x4a, 0x84, 0x5b, 0xaf, 0x40, 0x53, 0x6f, 0x60, 0xd6, 0x6b, 0x76, 0x4b, 0xc2, 0xad, 0xc9, 0xb6, 0xb6, 0x6a, 0xa2, 0xb3, 0xf5, 0xf5, 0xc2, 0x55, 0x83, 0xb2, 0xd3, 0xe9, 0x41, 0x6c, 0x63, 0x51, 0xb8, 0x81, 0x74, 0xc8, 0x2c,
- /* (2^ 25)P */ 0xb2, 0xaf, 0x1c, 0xee, 0x07, 0xb0, 0x58, 0xa8, 0x2c, 0x6a, 0xc9, 0x2d, 0x62, 0x28, 0x75, 0x0c, 0x40, 0xb6, 0x11, 0x33, 0x96, 0x80, 0x28, 0x6d, 0xd5, 0x9e, 0x87, 0x90, 0x01, 0x66, 0x1d, 0x1c, 0xf8, 0xb4, 0x92, 0xac, 0x38, 0x18, 0x05, 0xc2, 0x4c, 0x4b, 0x54, 0x7d, 0x80, 0x46, 0x87, 0x2d, 0x99, 0x8e, 0x70, 0x80, 0x69, 0x71, 0x8b, 0xed,
- /* (2^ 26)P */ 0x37, 0xa7, 0x6b, 0x71, 0x36, 0x75, 0x8e, 0xff, 0x0f, 0x42, 0xda, 0x5a, 0x46, 0xa6, 0x97, 0x79, 0x7e, 0x30, 0xb3, 0x8f, 0xc7, 0x3a, 0xa0, 0xcb, 0x1d, 0x9c, 0x78, 0x77, 0x36, 0xc2, 0xe7, 0xf4, 0x2f, 0x29, 0x07, 0xb1, 0x07, 0xfd, 0xed, 0x1b, 0x39, 0x77, 0x06, 0x38, 0x77, 0x0f, 0x50, 0x31, 0x12, 0xbf, 0x92, 0xbf, 0x72, 0x79, 0x54, 0xa9,
- /* (2^ 27)P */ 0xbd, 0x4d, 0x46, 0x6b, 0x1a, 0x80, 0x46, 0x2d, 0xed, 0xfd, 0x64, 0x6d, 0x94, 0xbc, 0x4a, 0x6e, 0x0c, 0x12, 0xf6, 0x12, 0xab, 0x54, 0x88, 0xd3, 0x85, 0xac, 0x51, 0xae, 0x6f, 0xca, 0xc4, 0xb7, 0xec, 0x22, 0x54, 0x6d, 0x80, 0xb2, 0x1c, 0x63, 0x33, 0x76, 0x6b, 0x8e, 0x6d, 0x59, 0xcd, 0x73, 0x92, 0x5f, 0xff, 0xad, 0x10, 0x35, 0x70, 0x5f,
- /* (2^ 28)P */ 0xb3, 0x84, 0xde, 0xc8, 0x04, 0x43, 0x63, 0xfa, 0x29, 0xd9, 0xf0, 0x69, 0x65, 0x5a, 0x0c, 0xe8, 0x2e, 0x0b, 0xfe, 0xb0, 0x7a, 0x42, 0xb3, 0xc3, 0xfc, 0xe6, 0xb8, 0x92, 0x29, 0xae, 0xed, 0xec, 0xd5, 0xe8, 0x4a, 0xa1, 0xbd, 0x3b, 0xd3, 0xc0, 0x07, 0xab, 0x65, 0x65, 0x35, 0x9a, 0xa6, 0x5e, 0x78, 0x18, 0x76, 0x1c, 0x15, 0x49, 0xe6, 0x75,
- /* (2^ 29)P */ 0x45, 0xb3, 0x92, 0xa9, 0xc3, 0xb8, 0x11, 0x68, 0x64, 0x3a, 0x83, 0x5d, 0xa8, 0x94, 0x6a, 0x9d, 0xaa, 0x27, 0x9f, 0x98, 0x5d, 0xc0, 0x29, 0xf0, 0xc0, 0x4b, 0x14, 0x3c, 0x05, 0xe7, 0xf8, 0xbd, 0x38, 0x22, 0x96, 0x75, 0x65, 0x5e, 0x0d, 0x3f, 0xbb, 0x6f, 0xe8, 0x3f, 0x96, 0x76, 0x9f, 0xba, 0xd9, 0x44, 0x92, 0x96, 0x22, 0xe7, 0x52, 0xe7,
- /* (2^ 30)P */ 0xf4, 0xa3, 0x95, 0x90, 0x47, 0xdf, 0x7d, 0xdc, 0xf4, 0x13, 0x87, 0x67, 0x7d, 0x4f, 0x9d, 0xa0, 0x00, 0x46, 0x72, 0x08, 0xc3, 0xa2, 0x7a, 0x3e, 0xe7, 0x6d, 0x52, 0x7c, 0x11, 0x36, 0x50, 0x83, 0x89, 0x64, 0xcb, 0x1f, 0x08, 0x83, 0x46, 0xcb, 0xac, 0xa6, 0xd8, 0x9c, 0x1b, 0xe8, 0x05, 0x47, 0xc7, 0x26, 0x06, 0x83, 0x39, 0xe9, 0xb1, 0x1c,
- /* (2^ 31)P */ 0x11, 0xe8, 0xc8, 0x42, 0xbf, 0x30, 0x9c, 0xa3, 0xf1, 0x85, 0x96, 0x95, 0x4f, 0x4f, 0x52, 0xa2, 0xf5, 0x8b, 0x68, 0x24, 0x16, 0xac, 0x9b, 0xa9, 0x27, 0x28, 0x0e, 0x84, 0x03, 0x46, 0x22, 0x5f, 0xf7, 0x0d, 0xa6, 0x85, 0x88, 0xc1, 0x45, 0x4b, 0x85, 0x1a, 0x10, 0x7f, 0xc9, 0x94, 0x20, 0xb0, 0x04, 0x28, 0x12, 0x30, 0xb9, 0xe6, 0x40, 0x6b,
- /* (2^ 32)P */ 0xac, 0x1b, 0x57, 0xb6, 0x42, 0xdb, 0x81, 0x8d, 0x76, 0xfd, 0x9b, 0x1c, 0x29, 0x30, 0xd5, 0x3a, 0xcc, 0x53, 0xd9, 0x26, 0x7a, 0x0f, 0x9c, 0x2e, 0x79, 0xf5, 0x62, 0xeb, 0x61, 0x9d, 0x9b, 0x80, 0x39, 0xcd, 0x60, 0x2e, 0x1f, 0x08, 0x22, 0xbc, 0x19, 0xb3, 0x2a, 0x43, 0x44, 0xf2, 0x4e, 0x66, 0xf4, 0x36, 0xa6, 0xa7, 0xbc, 0xa4, 0x15, 0x7e,
- /* (2^ 33)P */ 0xc1, 0x90, 0x8a, 0xde, 0xff, 0x78, 0xc3, 0x73, 0x16, 0xee, 0x76, 0xa0, 0x84, 0x60, 0x8d, 0xe6, 0x82, 0x0f, 0xde, 0x4e, 0xc5, 0x99, 0x34, 0x06, 0x90, 0x44, 0x55, 0xf8, 0x91, 0xd8, 0xe1, 0xe4, 0x2c, 0x8a, 0xde, 0x94, 0x1e, 0x78, 0x25, 0x3d, 0xfd, 0xd8, 0x59, 0x7d, 0xaf, 0x6e, 0xbe, 0x96, 0xbe, 0x3c, 0x16, 0x23, 0x0f, 0x4c, 0xa4, 0x28,
- /* (2^ 34)P */ 0xba, 0x11, 0x35, 0x57, 0x03, 0xb6, 0xf4, 0x24, 0x89, 0xb8, 0x5a, 0x0d, 0x50, 0x9c, 0xaa, 0x51, 0x7f, 0xa4, 0x0e, 0xfc, 0x71, 0xb3, 0x3b, 0xf1, 0x96, 0x50, 0x23, 0x15, 0xf5, 0xf5, 0xd4, 0x23, 0xdc, 0x8b, 0x26, 0x9e, 0xae, 0xb7, 0x50, 0xcd, 0xc4, 0x25, 0xf6, 0x75, 0x40, 0x9c, 0x37, 0x79, 0x33, 0x60, 0xd4, 0x4b, 0x13, 0x32, 0xee, 0xe2,
- /* (2^ 35)P */ 0x43, 0xb8, 0x56, 0x59, 0xf0, 0x68, 0x23, 0xb3, 0xea, 0x70, 0x58, 0x4c, 0x1e, 0x5a, 0x16, 0x54, 0x03, 0xb2, 0xf4, 0x73, 0xb6, 0xd9, 0x5c, 0x9c, 0x6f, 0xcf, 0x82, 0x2e, 0x54, 0x15, 0x46, 0x2c, 0xa3, 0xda, 0x4e, 0x87, 0xf5, 0x2b, 0xba, 0x91, 0xa3, 0xa0, 0x89, 0xba, 0x48, 0x2b, 0xfa, 0x64, 0x02, 0x7f, 0x78, 0x03, 0xd1, 0xe8, 0x3b, 0xe9,
- /* (2^ 36)P */ 0x15, 0xa4, 0x71, 0xd4, 0x0c, 0x24, 0xe9, 0x07, 0xa1, 0x43, 0xf4, 0x7f, 0xbb, 0xa2, 0xa6, 0x6b, 0xfa, 0xb7, 0xea, 0x58, 0xd1, 0x96, 0xb0, 0x24, 0x5c, 0xc7, 0x37, 0x4e, 0x60, 0x0f, 0x40, 0xf2, 0x2f, 0x44, 0x70, 0xea, 0x80, 0x63, 0xfe, 0xfc, 0x46, 0x59, 0x12, 0x27, 0xb5, 0x27, 0xfd, 0xb7, 0x73, 0x0b, 0xca, 0x8b, 0xc2, 0xd3, 0x71, 0x08,
- /* (2^ 37)P */ 0x26, 0x0e, 0xd7, 0x52, 0x6f, 0xf1, 0xf2, 0x9d, 0xb8, 0x3d, 0xbd, 0xd4, 0x75, 0x97, 0xd8, 0xbf, 0xa8, 0x86, 0x96, 0xa5, 0x80, 0xa0, 0x45, 0x75, 0xf6, 0x77, 0x71, 0xdb, 0x77, 0x96, 0x55, 0x99, 0x31, 0xd0, 0x4f, 0x34, 0xf4, 0x35, 0x39, 0x41, 0xd3, 0x7d, 0xf7, 0xe2, 0x74, 0xde, 0xbe, 0x5b, 0x1f, 0x39, 0x10, 0x21, 0xa3, 0x4d, 0x3b, 0xc8,
- /* (2^ 38)P */ 0x04, 0x00, 0x2a, 0x45, 0xb2, 0xaf, 0x9b, 0x18, 0x6a, 0xeb, 0x96, 0x28, 0xa4, 0x77, 0xd0, 0x13, 0xcf, 0x17, 0x65, 0xe8, 0xc5, 0x81, 0x28, 0xad, 0x39, 0x7a, 0x0b, 0xaa, 0x55, 0x2b, 0xf3, 0xfc, 0x86, 0x40, 0xad, 0x0d, 0x1e, 0x28, 0xa2, 0x2d, 0xc5, 0xd6, 0x04, 0x15, 0xa2, 0x30, 0x3d, 0x12, 0x8e, 0xd6, 0xb5, 0xf7, 0x69, 0xbb, 0x84, 0x20,
- /* (2^ 39)P */ 0xd7, 0x7a, 0x77, 0x2c, 0xfb, 0x81, 0x80, 0xe9, 0x1e, 0xc6, 0x36, 0x31, 0x79, 0xc3, 0x7c, 0xa9, 0x57, 0x6b, 0xb5, 0x70, 0xfb, 0xe4, 0xa1, 0xff, 0xfd, 0x21, 0xa5, 0x7c, 0xfa, 0x44, 0xba, 0x0d, 0x96, 0x3d, 0xc4, 0x5c, 0x39, 0x52, 0x87, 0xd7, 0x22, 0x0f, 0x52, 0x88, 0x91, 0x87, 0x96, 0xac, 0xfa, 0x3b, 0xdf, 0xdc, 0x83, 0x8c, 0x99, 0x29,
- /* (2^ 40)P */ 0x98, 0x6b, 0x3a, 0x8d, 0x83, 0x17, 0xe1, 0x62, 0xd8, 0x80, 0x4c, 0x97, 0xce, 0x6b, 0xaa, 0x10, 0xa7, 0xc4, 0xe9, 0xeb, 0xa5, 0xfb, 0xc9, 0xdd, 0x2d, 0xeb, 0xfc, 0x9a, 0x71, 0xcd, 0x68, 0x6e, 0xc0, 0x35, 0x64, 0x62, 0x1b, 0x95, 0x12, 0xe8, 0x53, 0xec, 0xf0, 0xf4, 0x86, 0x86, 0x78, 0x18, 0xc4, 0xc6, 0xbc, 0x5a, 0x59, 0x8f, 0x7c, 0x7e,
- /* (2^ 41)P */ 0x7f, 0xd7, 0x1e, 0xc5, 0x83, 0xdc, 0x1f, 0xbe, 0x0b, 0xcf, 0x2e, 0x01, 0x01, 0xed, 0xac, 0x17, 0x3b, 0xed, 0xa4, 0x30, 0x96, 0x0e, 0x14, 0x7e, 0x19, 0x2b, 0xa5, 0x67, 0x1e, 0xb3, 0x34, 0x03, 0xa8, 0xbb, 0x0a, 0x7d, 0x08, 0x2d, 0xd5, 0x53, 0x19, 0x6f, 0x13, 0xd5, 0xc0, 0x90, 0x8a, 0xcc, 0xc9, 0x5c, 0xab, 0x24, 0xd7, 0x03, 0xf6, 0x57,
- /* (2^ 42)P */ 0x49, 0xcb, 0xb4, 0x96, 0x5f, 0xa6, 0xf8, 0x71, 0x6f, 0x59, 0xad, 0x05, 0x24, 0x2d, 0xaf, 0x67, 0xa8, 0xbe, 0x95, 0xdf, 0x0d, 0x28, 0x5a, 0x7f, 0x6e, 0x87, 0x8c, 0x6e, 0x67, 0x0c, 0xf4, 0xe0, 0x1c, 0x30, 0xc2, 0x66, 0xae, 0x20, 0xa1, 0x34, 0xec, 0x9c, 0xbc, 0xae, 0x3d, 0xa1, 0x28, 0x28, 0x95, 0x1d, 0xc9, 0x3a, 0xa8, 0xfd, 0xfc, 0xa1,
- /* (2^ 43)P */ 0xe2, 0x2b, 0x9d, 0xed, 0x02, 0x99, 0x67, 0xbb, 0x2e, 0x16, 0x62, 0x05, 0x70, 0xc7, 0x27, 0xb9, 0x1c, 0x3f, 0xf2, 0x11, 0x01, 0xd8, 0x51, 0xa4, 0x18, 0x92, 0xa9, 0x5d, 0xfb, 0xa9, 0xe4, 0x42, 0xba, 0x38, 0x34, 0x1a, 0x4a, 0xc5, 0x6a, 0x37, 0xde, 0xa7, 0x0c, 0xb4, 0x7e, 0x7f, 0xde, 0xa6, 0xee, 0xcd, 0x55, 0x57, 0x05, 0x06, 0xfd, 0x5d,
- /* (2^ 44)P */ 0x2f, 0x32, 0xcf, 0x2e, 0x2c, 0x7b, 0xbe, 0x9a, 0x0c, 0x57, 0x35, 0xf8, 0x87, 0xda, 0x9c, 0xec, 0x48, 0xf2, 0xbb, 0xe2, 0xda, 0x10, 0x58, 0x20, 0xc6, 0xd3, 0x87, 0xe9, 0xc7, 0x26, 0xd1, 0x9a, 0x46, 0x87, 0x90, 0xda, 0xdc, 0xde, 0xc3, 0xb3, 0xf2, 0xe8, 0x6f, 0x4a, 0xe6, 0xe8, 0x9d, 0x98, 0x36, 0x20, 0x03, 0x47, 0x15, 0x3f, 0x64, 0x59,
- /* (2^ 45)P */ 0xd4, 0x71, 0x49, 0x0a, 0x67, 0x97, 0xaa, 0x3f, 0xf4, 0x1b, 0x3a, 0x6e, 0x5e, 0x17, 0xcc, 0x0a, 0x8f, 0x81, 0x6a, 0x41, 0x38, 0x77, 0x40, 0x8a, 0x11, 0x42, 0x62, 0xd2, 0x50, 0x32, 0x79, 0x78, 0x28, 0xc2, 0x2e, 0x10, 0x01, 0x94, 0x30, 0x4f, 0x7f, 0x18, 0x17, 0x56, 0x85, 0x4e, 0xad, 0xf7, 0xcb, 0x87, 0x3c, 0x3f, 0x50, 0x2c, 0xc0, 0xba,
- /* (2^ 46)P */ 0xbc, 0x30, 0x8e, 0x65, 0x8e, 0x57, 0x5b, 0x38, 0x7a, 0xd4, 0x95, 0x52, 0x7a, 0x32, 0x59, 0x69, 0xcd, 0x9d, 0x47, 0x34, 0x5b, 0x55, 0xa5, 0x24, 0x60, 0xdd, 0xc0, 0xc1, 0x62, 0x73, 0x44, 0xae, 0x4c, 0x9c, 0x65, 0x55, 0x1b, 0x9d, 0x8a, 0x29, 0xb0, 0x1a, 0x52, 0xa8, 0xf1, 0xe6, 0x9a, 0xb3, 0xf6, 0xa3, 0xc9, 0x0a, 0x70, 0x7d, 0x0f, 0xee,
- /* (2^ 47)P */ 0x77, 0xd3, 0xe5, 0x8e, 0xfa, 0x00, 0xeb, 0x1b, 0x7f, 0xdc, 0x68, 0x3f, 0x92, 0xbd, 0xb7, 0x0b, 0xb7, 0xb5, 0x24, 0xdf, 0xc5, 0x67, 0x53, 0xd4, 0x36, 0x79, 0xc4, 0x7b, 0x57, 0xbc, 0x99, 0x97, 0x60, 0xef, 0xe4, 0x01, 0xa1, 0xa7, 0xaa, 0x12, 0x36, 0x29, 0xb1, 0x03, 0xc2, 0x83, 0x1c, 0x2b, 0x83, 0xef, 0x2e, 0x2c, 0x23, 0x92, 0xfd, 0xd1,
- /* (2^ 48)P */ 0x94, 0xef, 0x03, 0x59, 0xfa, 0x8a, 0x18, 0x76, 0xee, 0x58, 0x08, 0x4d, 0x44, 0xce, 0xf1, 0x52, 0x33, 0x49, 0xf6, 0x69, 0x71, 0xe3, 0xa9, 0xbc, 0x86, 0xe3, 0x43, 0xde, 0x33, 0x7b, 0x90, 0x8b, 0x3e, 0x7d, 0xd5, 0x4a, 0xf0, 0x23, 0x99, 0xa6, 0xea, 0x5f, 0x08, 0xe5, 0xb9, 0x49, 0x8b, 0x0d, 0x6a, 0x21, 0xab, 0x07, 0x62, 0xcd, 0xc4, 0xbe,
- /* (2^ 49)P */ 0x61, 0xbf, 0x70, 0x14, 0xfa, 0x4e, 0x9e, 0x7c, 0x0c, 0xf8, 0xb2, 0x48, 0x71, 0x62, 0x83, 0xd6, 0xd1, 0xdc, 0x9c, 0x29, 0x66, 0xb1, 0x34, 0x9c, 0x8d, 0xe6, 0x88, 0xaf, 0xbe, 0xdc, 0x4d, 0xeb, 0xb0, 0xe7, 0x28, 0xae, 0xb2, 0x05, 0x56, 0xc6, 0x0e, 0x10, 0x26, 0xab, 0x2c, 0x59, 0x72, 0x03, 0x66, 0xfe, 0x8f, 0x2c, 0x51, 0x2d, 0xdc, 0xae,
- /* (2^ 50)P */ 0xdc, 0x63, 0xf1, 0x8b, 0x5c, 0x65, 0x0b, 0xf1, 0xa6, 0x22, 0xe2, 0xd9, 0xdb, 0x49, 0xb1, 0x3c, 0x47, 0xc2, 0xfe, 0xac, 0x86, 0x07, 0x52, 0xec, 0xb0, 0x08, 0x69, 0xfb, 0xd1, 0x06, 0xdc, 0x48, 0x5c, 0x3d, 0xb2, 0x4d, 0xb8, 0x1a, 0x4e, 0xda, 0xb9, 0xc1, 0x2b, 0xab, 0x4b, 0x62, 0x81, 0x21, 0x9a, 0xfc, 0x3d, 0x39, 0x83, 0x11, 0x36, 0xeb,
- /* (2^ 51)P */ 0x94, 0xf3, 0x17, 0xef, 0xf9, 0x60, 0x54, 0xc3, 0xd7, 0x27, 0x35, 0xc5, 0x98, 0x5e, 0xf6, 0x63, 0x6c, 0xa0, 0x4a, 0xd3, 0xa3, 0x98, 0xd9, 0x42, 0xe3, 0xf1, 0xf8, 0x81, 0x96, 0xa9, 0xea, 0x6d, 0x4b, 0x8e, 0x33, 0xca, 0x94, 0x0d, 0xa0, 0xf7, 0xbb, 0x64, 0xa3, 0x36, 0x6f, 0xdc, 0x5a, 0x94, 0x42, 0xca, 0x06, 0xb2, 0x2b, 0x9a, 0x9f, 0x71,
- /* (2^ 52)P */ 0xec, 0xdb, 0xa6, 0x1f, 0xdf, 0x15, 0x36, 0xa3, 0xda, 0x8a, 0x7a, 0xb6, 0xa7, 0xe3, 0xaf, 0x52, 0xe0, 0x8d, 0xe8, 0xf2, 0x44, 0x20, 0xeb, 0xa1, 0x20, 0xc4, 0x65, 0x3c, 0x7c, 0x6c, 0x49, 0xed, 0x2f, 0x66, 0x23, 0x68, 0x61, 0x91, 0x40, 0x9f, 0x50, 0x19, 0xd1, 0x84, 0xa7, 0xe2, 0xed, 0x34, 0x37, 0xe3, 0xe4, 0x11, 0x7f, 0x87, 0x55, 0x0f,
- /* (2^ 53)P */ 0xb3, 0xa1, 0x0f, 0xb0, 0x48, 0xc0, 0x4d, 0x96, 0xa7, 0xcf, 0x5a, 0x81, 0xb8, 0x4a, 0x46, 0xef, 0x0a, 0xd3, 0x40, 0x7e, 0x02, 0xe3, 0x63, 0xaa, 0x50, 0xd1, 0x2a, 0x37, 0x22, 0x4a, 0x7f, 0x4f, 0xb6, 0xf9, 0x01, 0x82, 0x78, 0x3d, 0x93, 0x14, 0x11, 0x8a, 0x90, 0x60, 0xcd, 0x45, 0x4e, 0x7b, 0x42, 0xb9, 0x3e, 0x6e, 0x68, 0x1f, 0x36, 0x41,
- /* (2^ 54)P */ 0x13, 0x73, 0x0e, 0x4f, 0x79, 0x93, 0x9e, 0x29, 0x70, 0x7b, 0x4a, 0x59, 0x1a, 0x9a, 0xf4, 0x55, 0x08, 0xf0, 0xdb, 0x17, 0x58, 0xec, 0x64, 0xad, 0x7f, 0x29, 0xeb, 0x3f, 0x85, 0x4e, 0x60, 0x28, 0x98, 0x1f, 0x73, 0x4e, 0xe6, 0xa8, 0xab, 0xd5, 0xd6, 0xfc, 0xa1, 0x36, 0x6d, 0x15, 0xc6, 0x13, 0x83, 0xa0, 0xc2, 0x6e, 0xd9, 0xdb, 0xc9, 0xcc,
- /* (2^ 55)P */ 0xff, 0xd8, 0x52, 0xa3, 0xdc, 0x99, 0xcf, 0x3e, 0x19, 0xb3, 0x68, 0xd0, 0xb5, 0x0d, 0xb8, 0xee, 0x3f, 0xef, 0x6e, 0xc0, 0x38, 0x28, 0x44, 0x92, 0x78, 0x91, 0x1a, 0x08, 0x78, 0x6c, 0x65, 0x24, 0xf3, 0xa2, 0x3d, 0xf2, 0xe5, 0x79, 0x62, 0x69, 0x29, 0xf4, 0x22, 0xc5, 0xdb, 0x6a, 0xae, 0xf4, 0x44, 0xa3, 0x6f, 0xc7, 0x86, 0xab, 0xef, 0xef,
- /* (2^ 56)P */ 0xbf, 0x54, 0x9a, 0x09, 0x5d, 0x17, 0xd0, 0xde, 0xfb, 0xf5, 0xca, 0xff, 0x13, 0x20, 0x88, 0x82, 0x3a, 0xe2, 0xd0, 0x3b, 0xfb, 0x05, 0x76, 0xd1, 0xc0, 0x02, 0x71, 0x3b, 0x94, 0xe8, 0xc9, 0x84, 0xcf, 0xa4, 0xe9, 0x28, 0x7b, 0xf5, 0x09, 0xc3, 0x2b, 0x22, 0x40, 0xf1, 0x68, 0x24, 0x24, 0x7d, 0x9f, 0x6e, 0xcd, 0xfe, 0xb0, 0x19, 0x61, 0xf5,
- /* (2^ 57)P */ 0xe8, 0x63, 0x51, 0xb3, 0x95, 0x6b, 0x7b, 0x74, 0x92, 0x52, 0x45, 0xa4, 0xed, 0xea, 0x0e, 0x0d, 0x2b, 0x01, 0x1e, 0x2c, 0xbc, 0x91, 0x06, 0x69, 0xdb, 0x1f, 0xb5, 0x77, 0x1d, 0x56, 0xf5, 0xb4, 0x02, 0x80, 0x49, 0x56, 0x12, 0xce, 0x86, 0x05, 0xc9, 0xd9, 0xae, 0xf3, 0x6d, 0xe6, 0x3f, 0x40, 0x52, 0xe9, 0x49, 0x2b, 0x31, 0x06, 0x86, 0x14,
- /* (2^ 58)P */ 0xf5, 0x09, 0x3b, 0xd2, 0xff, 0xdf, 0x11, 0xa5, 0x1c, 0x99, 0xe8, 0x1b, 0xa4, 0x2c, 0x7d, 0x8e, 0xc8, 0xf7, 0x03, 0x46, 0xfa, 0xb6, 0xde, 0x73, 0x91, 0x7e, 0x5a, 0x7a, 0xd7, 0x9a, 0x5b, 0x80, 0x24, 0x62, 0x5e, 0x92, 0xf1, 0xa3, 0x45, 0xa3, 0x43, 0x92, 0x8a, 0x2a, 0x5b, 0x0c, 0xb4, 0xc8, 0xad, 0x1c, 0xb6, 0x6c, 0x5e, 0x81, 0x18, 0x91,
- /* (2^ 59)P */ 0x96, 0xb3, 0xca, 0x2b, 0xe3, 0x7a, 0x59, 0x72, 0x17, 0x74, 0x29, 0x21, 0xe7, 0x78, 0x07, 0xad, 0xda, 0xb6, 0xcd, 0xf9, 0x27, 0x4d, 0xc8, 0xf2, 0x98, 0x22, 0xca, 0xf2, 0x33, 0x74, 0x7a, 0xdd, 0x1e, 0x71, 0xec, 0xe3, 0x3f, 0xe2, 0xa2, 0xd2, 0x38, 0x75, 0xb0, 0xd0, 0x0a, 0xcf, 0x7d, 0x36, 0xdc, 0x49, 0x38, 0x25, 0x34, 0x4f, 0x20, 0x9a,
- /* (2^ 60)P */ 0x2b, 0x6e, 0x04, 0x0d, 0x4f, 0x3d, 0x3b, 0x24, 0xf6, 0x4e, 0x5e, 0x0a, 0xbd, 0x48, 0x96, 0xba, 0x81, 0x8f, 0x39, 0x82, 0x13, 0xe6, 0x72, 0xf3, 0x0f, 0xb6, 0x94, 0xf4, 0xc5, 0x90, 0x74, 0x91, 0xa8, 0xf2, 0xc9, 0xca, 0x9a, 0x4d, 0x98, 0xf2, 0xdf, 0x52, 0x4e, 0x97, 0x2f, 0xeb, 0x84, 0xd3, 0xaf, 0xc2, 0xcc, 0xfb, 0x4c, 0x26, 0x4b, 0xe4,
- /* (2^ 61)P */ 0x12, 0x9e, 0xfb, 0x9d, 0x78, 0x79, 0x99, 0xdd, 0xb3, 0x0b, 0x2e, 0x56, 0x41, 0x8e, 0x3f, 0x39, 0xb8, 0x97, 0x89, 0x53, 0x9b, 0x8a, 0x3c, 0x40, 0x9d, 0xa4, 0x6c, 0x2e, 0x31, 0x71, 0xc6, 0x0a, 0x41, 0xd4, 0x95, 0x06, 0x5e, 0xc1, 0xab, 0xc2, 0x14, 0xc4, 0xc7, 0x15, 0x08, 0x3a, 0xad, 0x7a, 0xb4, 0x62, 0xa3, 0x0c, 0x90, 0xf4, 0x47, 0x08,
- /* (2^ 62)P */ 0x7f, 0xec, 0x09, 0x82, 0xf5, 0x94, 0x09, 0x93, 0x32, 0xd3, 0xdc, 0x56, 0x80, 0x7b, 0x5b, 0x22, 0x80, 0x6a, 0x96, 0x72, 0xb1, 0xc2, 0xd9, 0xa1, 0x8b, 0x66, 0x42, 0x16, 0xe2, 0x07, 0xb3, 0x2d, 0xf1, 0x75, 0x35, 0x72, 0xc7, 0x98, 0xbe, 0x63, 0x3b, 0x20, 0x75, 0x05, 0xc1, 0x3e, 0x31, 0x5a, 0xf7, 0xaa, 0xae, 0x4b, 0xdb, 0x1d, 0xd0, 0x74,
- /* (2^ 63)P */ 0x36, 0x5c, 0x74, 0xe6, 0x5d, 0x59, 0x3f, 0x15, 0x4b, 0x4d, 0x4e, 0x67, 0x41, 0xfe, 0x98, 0x1f, 0x49, 0x76, 0x91, 0x0f, 0x9b, 0xf4, 0xaf, 0x86, 0xaf, 0x66, 0x19, 0xed, 0x46, 0xf1, 0x05, 0x9a, 0xcc, 0xd1, 0x14, 0x1f, 0x82, 0x12, 0x8e, 0xe6, 0xf4, 0xc3, 0x42, 0x5c, 0x4e, 0x33, 0x93, 0xbe, 0x30, 0xe7, 0x64, 0xa9, 0x35, 0x00, 0x4d, 0xf9,
- /* (2^ 64)P */ 0x1f, 0xc1, 0x1e, 0xb7, 0xe3, 0x7c, 0xfa, 0xa3, 0x6b, 0x76, 0xaf, 0x9c, 0x05, 0x85, 0x4a, 0xa9, 0xfb, 0xe3, 0x7e, 0xf2, 0x49, 0x56, 0xdc, 0x2f, 0x57, 0x10, 0xba, 0x37, 0xb2, 0x62, 0xf5, 0x6b, 0xe5, 0x8f, 0x0a, 0x87, 0xd1, 0x6a, 0xcb, 0x9d, 0x07, 0xd0, 0xf6, 0x38, 0x99, 0x2c, 0x61, 0x4a, 0x4e, 0xd8, 0xd2, 0x88, 0x29, 0x99, 0x11, 0x95,
- /* (2^ 65)P */ 0x6f, 0xdc, 0xd5, 0xd6, 0xd6, 0xa7, 0x4c, 0x46, 0x93, 0x65, 0x62, 0x23, 0x95, 0x32, 0x9c, 0xde, 0x40, 0x41, 0x68, 0x2c, 0x18, 0x4e, 0x5a, 0x8c, 0xc0, 0xc5, 0xc5, 0xea, 0x5c, 0x45, 0x0f, 0x60, 0x78, 0x39, 0xb6, 0x36, 0x23, 0x12, 0xbc, 0x21, 0x9a, 0xf8, 0x91, 0xac, 0xc4, 0x70, 0xdf, 0x85, 0x8e, 0x3c, 0xec, 0x22, 0x04, 0x98, 0xa8, 0xaa,
- /* (2^ 66)P */ 0xcc, 0x52, 0x10, 0x5b, 0x4b, 0x6c, 0xc5, 0xfa, 0x3e, 0xd4, 0xf8, 0x1c, 0x04, 0x14, 0x48, 0x33, 0xd9, 0xfc, 0x5f, 0xb0, 0xa5, 0x48, 0x8c, 0x45, 0x8a, 0xee, 0x3e, 0xa7, 0xc1, 0x2e, 0x34, 0xca, 0xf6, 0xc9, 0xeb, 0x10, 0xbb, 0xe1, 0x59, 0x84, 0x25, 0xe8, 0x81, 0x70, 0xc0, 0x09, 0x42, 0xa7, 0x3b, 0x0d, 0x33, 0x00, 0xb5, 0x77, 0xbe, 0x25,
- /* (2^ 67)P */ 0xcd, 0x1f, 0xbc, 0x7d, 0xef, 0xe5, 0xca, 0x91, 0xaf, 0xa9, 0x59, 0x6a, 0x09, 0xca, 0xd6, 0x1b, 0x3d, 0x55, 0xde, 0xa2, 0x6a, 0x80, 0xd6, 0x95, 0x47, 0xe4, 0x5f, 0x68, 0x54, 0x08, 0xdf, 0x29, 0xba, 0x2a, 0x02, 0x84, 0xe8, 0xe9, 0x00, 0x77, 0x99, 0x36, 0x03, 0xf6, 0x4a, 0x3e, 0x21, 0x81, 0x7d, 0xb8, 0xa4, 0x8a, 0xa2, 0x05, 0xef, 0xbc,
- /* (2^ 68)P */ 0x7c, 0x59, 0x5f, 0x66, 0xd9, 0xb7, 0x83, 0x43, 0x8a, 0xa1, 0x8d, 0x51, 0x70, 0xba, 0xf2, 0x9b, 0x95, 0xc0, 0x4b, 0x4c, 0xa0, 0x14, 0xd3, 0xa4, 0x5d, 0x4a, 0x37, 0x36, 0x97, 0x31, 0x1e, 0x12, 0xe7, 0xbb, 0x08, 0x67, 0xa5, 0x23, 0xd7, 0xfb, 0x97, 0xd8, 0x6a, 0x03, 0xb1, 0xf8, 0x7f, 0xda, 0x58, 0xd9, 0x3f, 0x73, 0x4a, 0x53, 0xe1, 0x7b,
- /* (2^ 69)P */ 0x55, 0x83, 0x98, 0x78, 0x6c, 0x56, 0x5e, 0xed, 0xf7, 0x23, 0x3e, 0x4c, 0x7d, 0x09, 0x2d, 0x09, 0x9c, 0x58, 0x8b, 0x32, 0xca, 0xfe, 0xbf, 0x47, 0x03, 0xeb, 0x4d, 0xe7, 0xeb, 0x9c, 0x83, 0x05, 0x68, 0xaa, 0x80, 0x89, 0x44, 0xf9, 0xd4, 0xdc, 0xdb, 0xb1, 0xdb, 0x77, 0xac, 0xf9, 0x2a, 0xae, 0x35, 0xac, 0x74, 0xb5, 0x95, 0x62, 0x18, 0x85,
- /* (2^ 70)P */ 0xab, 0x82, 0x7e, 0x10, 0xd7, 0xe6, 0x57, 0xd1, 0x66, 0x12, 0x31, 0x9c, 0x9c, 0xa6, 0x27, 0x59, 0x71, 0x2e, 0xeb, 0xa0, 0x68, 0xc5, 0x87, 0x51, 0xf4, 0xca, 0x3f, 0x98, 0x56, 0xb0, 0x89, 0xb1, 0xc7, 0x7b, 0x46, 0xb3, 0xae, 0x36, 0xf2, 0xee, 0x15, 0x1a, 0x60, 0xf4, 0x50, 0x76, 0x4f, 0xc4, 0x53, 0x0d, 0x36, 0x4d, 0x31, 0xb1, 0x20, 0x51,
- /* (2^ 71)P */ 0xf7, 0x1d, 0x8c, 0x1b, 0x5e, 0xe5, 0x02, 0x6f, 0xc5, 0xa5, 0xe0, 0x5f, 0xc6, 0xb6, 0x63, 0x43, 0xaf, 0x3c, 0x19, 0x6c, 0xf4, 0xaf, 0xa4, 0x33, 0xb1, 0x0a, 0x37, 0x3d, 0xd9, 0x4d, 0xe2, 0x29, 0x24, 0x26, 0x94, 0x7c, 0x02, 0xe4, 0xe2, 0xf2, 0xbe, 0xbd, 0xac, 0x1b, 0x48, 0xb8, 0xdd, 0xe9, 0x0d, 0x9a, 0x50, 0x1a, 0x98, 0x71, 0x6e, 0xdc,
- /* (2^ 72)P */ 0x9f, 0x40, 0xb1, 0xb3, 0x66, 0x28, 0x6c, 0xfe, 0xa6, 0x7d, 0xf8, 0x3e, 0xb8, 0xf3, 0xde, 0x52, 0x76, 0x52, 0xa3, 0x92, 0x98, 0x23, 0xab, 0x4f, 0x88, 0x97, 0xfc, 0x22, 0xe1, 0x6b, 0x67, 0xcd, 0x13, 0x95, 0xda, 0x65, 0xdd, 0x3b, 0x67, 0x3f, 0x5f, 0x4c, 0xf2, 0x8a, 0xad, 0x98, 0xa7, 0x94, 0x24, 0x45, 0x87, 0x11, 0x7c, 0x75, 0x79, 0x85,
- /* (2^ 73)P */ 0x70, 0xbf, 0xf9, 0x3b, 0xa9, 0x44, 0x57, 0x72, 0x96, 0xc9, 0xa4, 0x98, 0x65, 0xbf, 0x87, 0xb3, 0x3a, 0x39, 0x12, 0xde, 0xe5, 0x39, 0x01, 0x4f, 0xf7, 0xc0, 0x71, 0x52, 0x36, 0x85, 0xb3, 0x18, 0xf8, 0x14, 0xc0, 0x6d, 0xae, 0x9e, 0x4f, 0xb0, 0x72, 0x87, 0xac, 0x5c, 0xd1, 0x6c, 0x41, 0x6c, 0x90, 0x9d, 0x22, 0x81, 0xe4, 0x2b, 0xea, 0xe5,
- /* (2^ 74)P */ 0xfc, 0xea, 0x1a, 0x65, 0xd9, 0x49, 0x6a, 0x39, 0xb5, 0x96, 0x72, 0x7b, 0x32, 0xf1, 0xd0, 0xe9, 0x45, 0xd9, 0x31, 0x55, 0xc7, 0x34, 0xe9, 0x5a, 0xec, 0x73, 0x0b, 0x03, 0xc4, 0xb3, 0xe6, 0xc9, 0x5e, 0x0a, 0x17, 0xfe, 0x53, 0x66, 0x7f, 0x21, 0x18, 0x74, 0x54, 0x1b, 0xc9, 0x49, 0x16, 0xd2, 0x48, 0xaf, 0x5b, 0x47, 0x7b, 0xeb, 0xaa, 0xc9,
- /* (2^ 75)P */ 0x47, 0x04, 0xf5, 0x5a, 0x87, 0x77, 0x9e, 0x21, 0x34, 0x4e, 0x83, 0x88, 0xaf, 0x02, 0x1d, 0xb0, 0x5a, 0x1d, 0x1d, 0x7d, 0x8d, 0x2c, 0xd3, 0x8d, 0x63, 0xa9, 0x45, 0xfb, 0x15, 0x6d, 0x86, 0x45, 0xcd, 0x38, 0x0e, 0xf7, 0x37, 0x79, 0xed, 0x6d, 0x5a, 0xbc, 0x32, 0xcc, 0x66, 0xf1, 0x3a, 0xb2, 0x87, 0x6f, 0x70, 0x71, 0xd9, 0xf2, 0xfa, 0x7b,
- /* (2^ 76)P */ 0x68, 0x07, 0xdc, 0x61, 0x40, 0xe4, 0xec, 0x32, 0xc8, 0xbe, 0x66, 0x30, 0x54, 0x80, 0xfd, 0x13, 0x7a, 0xef, 0xae, 0xed, 0x2e, 0x00, 0x6d, 0x3f, 0xbd, 0xfc, 0x91, 0x24, 0x53, 0x7f, 0x63, 0x9d, 0x2e, 0xe3, 0x76, 0xe0, 0xf3, 0xe1, 0x8f, 0x7a, 0xc4, 0x77, 0x0c, 0x91, 0xc0, 0xc2, 0x18, 0x6b, 0x04, 0xad, 0xb6, 0x70, 0x9a, 0x64, 0xc5, 0x82,
- /* (2^ 77)P */ 0x7f, 0xea, 0x13, 0xd8, 0x9e, 0xfc, 0x5b, 0x06, 0xb5, 0x4f, 0xda, 0x38, 0xe0, 0x9c, 0xd2, 0x3a, 0xc1, 0x1c, 0x62, 0x70, 0x7f, 0xc6, 0x24, 0x0a, 0x47, 0x04, 0x01, 0xc4, 0x55, 0x09, 0xd1, 0x7a, 0x07, 0xba, 0xa3, 0x80, 0x4f, 0xc1, 0x65, 0x36, 0x6d, 0xc0, 0x10, 0xcf, 0x94, 0xa9, 0xa2, 0x01, 0x44, 0xd1, 0xf9, 0x1c, 0x4c, 0xfb, 0xf8, 0x99,
- /* (2^ 78)P */ 0x6c, 0xb9, 0x6b, 0xee, 0x43, 0x5b, 0xb9, 0xbb, 0xee, 0x2e, 0x52, 0xc1, 0xc6, 0xb9, 0x61, 0xd2, 0x93, 0xa5, 0xaf, 0x52, 0xf4, 0xa4, 0x1a, 0x51, 0x61, 0xa7, 0xcb, 0x9e, 0xbb, 0x56, 0x65, 0xe2, 0xbf, 0x75, 0xb9, 0x9c, 0x50, 0x96, 0x60, 0x81, 0x74, 0x47, 0xc0, 0x04, 0x88, 0x71, 0x76, 0x39, 0x9a, 0xa7, 0xb1, 0x4e, 0x43, 0x15, 0xe0, 0xbb,
- /* (2^ 79)P */ 0xbb, 0xce, 0xe2, 0xbb, 0xf9, 0x17, 0x0f, 0x82, 0x40, 0xad, 0x73, 0xe3, 0xeb, 0x3b, 0x06, 0x1a, 0xcf, 0x8e, 0x6e, 0x28, 0xb8, 0x26, 0xd9, 0x5b, 0xb7, 0xb3, 0xcf, 0xb4, 0x6a, 0x1c, 0xbf, 0x7f, 0xb8, 0xb5, 0x79, 0xcf, 0x45, 0x68, 0x7d, 0xc5, 0xeb, 0xf3, 0xbe, 0x39, 0x40, 0xfc, 0x07, 0x90, 0x7a, 0x62, 0xad, 0x86, 0x08, 0x71, 0x25, 0xe1,
- /* (2^ 80)P */ 0x9b, 0x46, 0xac, 0xef, 0xc1, 0x4e, 0xa1, 0x97, 0x95, 0x76, 0xf9, 0x1b, 0xc2, 0xb2, 0x6a, 0x41, 0xea, 0x80, 0x3d, 0xe9, 0x08, 0x52, 0x5a, 0xe3, 0xf2, 0x08, 0xc5, 0xea, 0x39, 0x3f, 0x44, 0x71, 0x4d, 0xea, 0x0d, 0x05, 0x23, 0xe4, 0x2e, 0x3c, 0x89, 0xfe, 0x12, 0x8a, 0x95, 0x42, 0x0a, 0x68, 0xea, 0x5a, 0x28, 0x06, 0x9e, 0xe3, 0x5f, 0xe0,
- /* (2^ 81)P */ 0x00, 0x61, 0x6c, 0x98, 0x9b, 0xe7, 0xb9, 0x06, 0x1c, 0xc5, 0x1b, 0xed, 0xbe, 0xc8, 0xb3, 0xea, 0x87, 0xf0, 0xc4, 0x24, 0x7d, 0xbb, 0x5d, 0xa4, 0x1d, 0x7a, 0x16, 0x00, 0x55, 0x94, 0x67, 0x78, 0xbd, 0x58, 0x02, 0x82, 0x90, 0x53, 0x76, 0xd4, 0x72, 0x99, 0x51, 0x6f, 0x7b, 0xcf, 0x80, 0x30, 0x31, 0x3b, 0x01, 0xc7, 0xc1, 0xef, 0xe6, 0x42,
- /* (2^ 82)P */ 0xe2, 0x35, 0xaf, 0x4b, 0x79, 0xc6, 0x12, 0x24, 0x99, 0xc0, 0x68, 0xb0, 0x43, 0x3e, 0xe5, 0xef, 0xe2, 0x29, 0xea, 0xb8, 0xb3, 0xbc, 0x6a, 0x53, 0x2c, 0x69, 0x18, 0x5a, 0xf9, 0x15, 0xae, 0x66, 0x58, 0x18, 0xd3, 0x2d, 0x4b, 0x00, 0xfd, 0x84, 0xab, 0x4f, 0xae, 0x70, 0x6b, 0x9e, 0x9a, 0xdf, 0x83, 0xfd, 0x2e, 0x3c, 0xcf, 0xf8, 0x88, 0x5b,
- /* (2^ 83)P */ 0xa4, 0x90, 0x31, 0x85, 0x13, 0xcd, 0xdf, 0x64, 0xc9, 0xa1, 0x0b, 0xe7, 0xb6, 0x73, 0x8a, 0x1b, 0x22, 0x78, 0x4c, 0xd4, 0xae, 0x48, 0x18, 0x00, 0x00, 0xa8, 0x9f, 0x06, 0xf9, 0xfb, 0x2d, 0xc3, 0xb1, 0x2a, 0xbc, 0x13, 0x99, 0x57, 0xaf, 0xf0, 0x8d, 0x61, 0x54, 0x29, 0xd5, 0xf2, 0x72, 0x00, 0x96, 0xd1, 0x85, 0x12, 0x8a, 0xf0, 0x23, 0xfb,
- /* (2^ 84)P */ 0x69, 0xc7, 0xdb, 0xd9, 0x92, 0x75, 0x08, 0x9b, 0xeb, 0xa5, 0x93, 0xd1, 0x1a, 0xf4, 0xf5, 0xaf, 0xe6, 0xc4, 0x4a, 0x0d, 0x35, 0x26, 0x39, 0x9d, 0xd3, 0x17, 0x3e, 0xae, 0x2d, 0xbf, 0x73, 0x9f, 0xb7, 0x74, 0x91, 0xd1, 0xd8, 0x5c, 0x14, 0xf9, 0x75, 0xdf, 0xeb, 0xc2, 0x22, 0xd8, 0x14, 0x8d, 0x86, 0x23, 0x4d, 0xd1, 0x2d, 0xdb, 0x6b, 0x42,
- /* (2^ 85)P */ 0x8c, 0xda, 0xc6, 0xf8, 0x71, 0xba, 0x2b, 0x06, 0x78, 0xae, 0xcc, 0x3a, 0xe3, 0xe3, 0xa1, 0x8b, 0xe2, 0x34, 0x6d, 0x28, 0x9e, 0x46, 0x13, 0x4d, 0x9e, 0xa6, 0x73, 0x49, 0x65, 0x79, 0x88, 0xb9, 0x3a, 0xd1, 0x6d, 0x2f, 0x48, 0x2b, 0x0a, 0x7f, 0x58, 0x20, 0x37, 0xf4, 0x0e, 0xbb, 0x4a, 0x95, 0x58, 0x0c, 0x88, 0x30, 0xc4, 0x74, 0xdd, 0xfd,
- /* (2^ 86)P */ 0x6d, 0x13, 0x4e, 0x89, 0x2d, 0xa9, 0xa3, 0xed, 0x09, 0xe3, 0x0e, 0x71, 0x3e, 0x4a, 0xab, 0x90, 0xde, 0x03, 0xeb, 0x56, 0x46, 0x60, 0x06, 0xf5, 0x71, 0xe5, 0xee, 0x9b, 0xef, 0xff, 0xc4, 0x2c, 0x9f, 0x37, 0x48, 0x45, 0x94, 0x12, 0x41, 0x81, 0x15, 0x70, 0x91, 0x99, 0x5e, 0x56, 0x6b, 0xf4, 0xa6, 0xc9, 0xf5, 0x69, 0x9d, 0x78, 0x37, 0x57,
- /* (2^ 87)P */ 0xf3, 0x51, 0x57, 0x7e, 0x43, 0x6f, 0xc6, 0x67, 0x59, 0x0c, 0xcf, 0x94, 0xe6, 0x3d, 0xb5, 0x07, 0xc9, 0x77, 0x48, 0xc9, 0x68, 0x0d, 0x98, 0x36, 0x62, 0x35, 0x38, 0x1c, 0xf5, 0xc5, 0xec, 0x66, 0x78, 0xfe, 0x47, 0xab, 0x26, 0xd6, 0x44, 0xb6, 0x06, 0x0f, 0x89, 0xe3, 0x19, 0x40, 0x1a, 0xe7, 0xd8, 0x65, 0x55, 0xf7, 0x1a, 0xfc, 0xa3, 0x0e,
- /* (2^ 88)P */ 0x0e, 0x30, 0xa6, 0xb7, 0x58, 0x60, 0x62, 0x2a, 0x6c, 0x13, 0xa8, 0x14, 0x9b, 0xb8, 0xf2, 0x70, 0xd8, 0xb1, 0x71, 0x88, 0x8c, 0x18, 0x31, 0x25, 0x93, 0x90, 0xb4, 0xc7, 0x49, 0xd8, 0xd4, 0xdb, 0x1e, 0x1e, 0x7f, 0xaa, 0xba, 0xc9, 0xf2, 0x5d, 0xa9, 0x3a, 0x43, 0xb4, 0x5c, 0xee, 0x7b, 0xc7, 0x97, 0xb7, 0x66, 0xd7, 0x23, 0xd9, 0x22, 0x59,
- /* (2^ 89)P */ 0x28, 0x19, 0xa6, 0xf9, 0x89, 0x20, 0x78, 0xd4, 0x6d, 0xcb, 0x79, 0x8f, 0x61, 0x6f, 0xb2, 0x5c, 0x4f, 0xa6, 0x54, 0x84, 0x95, 0x24, 0x36, 0x64, 0xcb, 0x39, 0xe7, 0x8f, 0x97, 0x9c, 0x5c, 0x3c, 0xfb, 0x51, 0x11, 0x01, 0x17, 0xdb, 0xc9, 0x9b, 0x51, 0x03, 0x9a, 0xe9, 0xe5, 0x24, 0x1e, 0xf5, 0xda, 0xe0, 0x48, 0x02, 0x23, 0xd0, 0x2c, 0x81,
- /* (2^ 90)P */ 0x42, 0x1b, 0xe4, 0x91, 0x85, 0x2a, 0x0c, 0xd2, 0x28, 0x66, 0x57, 0x9e, 0x33, 0x8d, 0x25, 0x71, 0x10, 0x65, 0x76, 0xa2, 0x8c, 0x21, 0x86, 0x81, 0x15, 0xc2, 0x27, 0xeb, 0x54, 0x2d, 0x4f, 0x6c, 0xe6, 0xd6, 0x24, 0x9c, 0x1a, 0x12, 0xb8, 0x81, 0xe2, 0x0a, 0xf3, 0xd3, 0xf0, 0xd3, 0xe1, 0x74, 0x1f, 0x9b, 0x11, 0x47, 0xd0, 0xcf, 0xb6, 0x54,
- /* (2^ 91)P */ 0x26, 0x45, 0xa2, 0x10, 0xd4, 0x2d, 0xae, 0xc0, 0xb0, 0xe8, 0x86, 0xb3, 0xc7, 0xea, 0x70, 0x87, 0x61, 0xb5, 0xa5, 0x55, 0xbe, 0x88, 0x1d, 0x7a, 0xd9, 0x6f, 0xeb, 0x83, 0xe2, 0x44, 0x7f, 0x98, 0x04, 0xd6, 0x50, 0x9d, 0xa7, 0x86, 0x66, 0x09, 0x63, 0xe1, 0xed, 0x72, 0xb1, 0xe4, 0x1d, 0x3a, 0xfd, 0x47, 0xce, 0x1c, 0xaa, 0x3b, 0x8f, 0x1b,
- /* (2^ 92)P */ 0xf4, 0x3c, 0x4a, 0xb6, 0xc2, 0x9c, 0xe0, 0x2e, 0xb7, 0x38, 0xea, 0x61, 0x35, 0x97, 0x10, 0x90, 0xae, 0x22, 0x48, 0xb3, 0xa9, 0xc6, 0x7a, 0xbb, 0x23, 0xf2, 0xf8, 0x1b, 0xa7, 0xa1, 0x79, 0xcc, 0xc4, 0xf8, 0x08, 0x76, 0x8a, 0x5a, 0x1c, 0x1b, 0xc5, 0x33, 0x91, 0xa9, 0xb8, 0xb9, 0xd3, 0xf8, 0x49, 0xcd, 0xe5, 0x82, 0x43, 0xf7, 0xca, 0x68,
- /* (2^ 93)P */ 0x38, 0xba, 0xae, 0x44, 0xfe, 0x57, 0x64, 0x56, 0x7c, 0x0e, 0x9c, 0xca, 0xff, 0xa9, 0x82, 0xbb, 0x38, 0x4a, 0xa7, 0xf7, 0x47, 0xab, 0xbe, 0x6d, 0x23, 0x0b, 0x8a, 0xed, 0xc2, 0xb9, 0x8f, 0xf1, 0xec, 0x91, 0x44, 0x73, 0x64, 0xba, 0xd5, 0x8f, 0x37, 0x38, 0x0d, 0xd5, 0xf8, 0x73, 0x57, 0xb6, 0xc2, 0x45, 0xdc, 0x25, 0xb2, 0xb6, 0xea, 0xd9,
- /* (2^ 94)P */ 0xbf, 0xe9, 0x1a, 0x40, 0x4d, 0xcc, 0xe6, 0x1d, 0x70, 0x1a, 0x65, 0xcc, 0x34, 0x2c, 0x37, 0x2c, 0x2d, 0x6b, 0x6d, 0xe5, 0x2f, 0x19, 0x9e, 0xe4, 0xe1, 0xaa, 0xd4, 0xab, 0x54, 0xf4, 0xa8, 0xe4, 0x69, 0x2d, 0x8e, 0x4d, 0xd7, 0xac, 0xb0, 0x5b, 0xfe, 0xe3, 0x26, 0x07, 0xc3, 0xf8, 0x1b, 0x43, 0xa8, 0x1d, 0x64, 0xa5, 0x25, 0x88, 0xbb, 0x77,
- /* (2^ 95)P */ 0x92, 0xcd, 0x6e, 0xa0, 0x79, 0x04, 0x18, 0xf4, 0x11, 0x58, 0x48, 0xb5, 0x3c, 0x7b, 0xd1, 0xcc, 0xd3, 0x14, 0x2c, 0xa0, 0xdd, 0x04, 0x44, 0x11, 0xb3, 0x6d, 0x2f, 0x0d, 0xf5, 0x2a, 0x75, 0x5d, 0x1d, 0xda, 0x86, 0x8d, 0x7d, 0x6b, 0x32, 0x68, 0xb6, 0x6c, 0x64, 0x9e, 0xde, 0x80, 0x88, 0xce, 0x08, 0xbf, 0x0b, 0xe5, 0x8e, 0x4f, 0x1d, 0xfb,
- /* (2^ 96)P */ 0xaf, 0xe8, 0x85, 0xbf, 0x7f, 0x37, 0x8d, 0x66, 0x7c, 0xd5, 0xd3, 0x96, 0xa5, 0x81, 0x67, 0x95, 0xff, 0x48, 0xde, 0xde, 0xd7, 0x7a, 0x46, 0x34, 0xb1, 0x13, 0x70, 0x29, 0xed, 0x87, 0x90, 0xb0, 0x40, 0x2c, 0xa6, 0x43, 0x6e, 0xb6, 0xbc, 0x48, 0x8a, 0xc1, 0xae, 0xb8, 0xd4, 0xe2, 0xc0, 0x32, 0xb2, 0xa6, 0x2a, 0x8f, 0xb5, 0x16, 0x9e, 0xc3,
- /* (2^ 97)P */ 0xff, 0x4d, 0xd2, 0xd6, 0x74, 0xef, 0x2c, 0x96, 0xc1, 0x11, 0xa8, 0xb8, 0xfe, 0x94, 0x87, 0x3e, 0xa0, 0xfb, 0x57, 0xa3, 0xfc, 0x7a, 0x7e, 0x6a, 0x59, 0x6c, 0x54, 0xbb, 0xbb, 0xa2, 0x25, 0x38, 0x1b, 0xdf, 0x5d, 0x7b, 0x94, 0x14, 0xde, 0x07, 0x6e, 0xd3, 0xab, 0x02, 0x26, 0x74, 0x16, 0x12, 0xdf, 0x2e, 0x2a, 0xa7, 0xb0, 0xe8, 0x29, 0xc0,
- /* (2^ 98)P */ 0x6a, 0x38, 0x0b, 0xd3, 0xba, 0x45, 0x23, 0xe0, 0x04, 0x3b, 0x83, 0x39, 0xc5, 0x11, 0xe6, 0xcf, 0x39, 0x0a, 0xb3, 0xb0, 0x3b, 0x27, 0x29, 0x63, 0x1c, 0xf3, 0x00, 0xe6, 0xd2, 0x55, 0x21, 0x1f, 0x84, 0x97, 0x9f, 0x01, 0x49, 0x43, 0x30, 0x5f, 0xe0, 0x1d, 0x24, 0xc4, 0x4e, 0xa0, 0x2b, 0x0b, 0x12, 0x55, 0xc3, 0x27, 0xae, 0x08, 0x83, 0x7c,
- /* (2^ 99)P */ 0x5d, 0x1a, 0xb7, 0xa9, 0xf5, 0xfd, 0xec, 0xad, 0xb7, 0x87, 0x02, 0x5f, 0x0d, 0x30, 0x4d, 0xe2, 0x65, 0x87, 0xa4, 0x41, 0x45, 0x1d, 0x67, 0xe0, 0x30, 0x5c, 0x13, 0x87, 0xf6, 0x2e, 0x08, 0xc1, 0xc7, 0x12, 0x45, 0xc8, 0x9b, 0xad, 0xb8, 0xd5, 0x57, 0xbb, 0x5c, 0x48, 0x3a, 0xe1, 0x91, 0x5e, 0xf6, 0x4d, 0x8a, 0x63, 0x75, 0x69, 0x0c, 0x01,
- /* (2^100)P */ 0x8f, 0x53, 0x2d, 0xa0, 0x71, 0x3d, 0xfc, 0x45, 0x10, 0x96, 0xcf, 0x56, 0xf9, 0xbb, 0x40, 0x3c, 0x86, 0x52, 0x76, 0xbe, 0x84, 0xf9, 0xa6, 0x9d, 0x3d, 0x27, 0xbe, 0xb4, 0x00, 0x49, 0x94, 0xf5, 0x5d, 0xe1, 0x62, 0x85, 0x66, 0xe5, 0xb8, 0x20, 0x2c, 0x09, 0x7d, 0x9d, 0x3d, 0x6e, 0x74, 0x39, 0xab, 0xad, 0xa0, 0x90, 0x97, 0x5f, 0xbb, 0xa7,
- /* (2^101)P */ 0xdb, 0x2d, 0x99, 0x08, 0x16, 0x46, 0x83, 0x7a, 0xa8, 0xea, 0x3d, 0x28, 0x5b, 0x49, 0xfc, 0xb9, 0x6d, 0x00, 0x9e, 0x54, 0x4f, 0x47, 0x64, 0x9b, 0x58, 0x4d, 0x07, 0x0c, 0x6f, 0x29, 0x56, 0x0b, 0x00, 0x14, 0x85, 0x96, 0x41, 0x04, 0xb9, 0x5c, 0xa4, 0xf6, 0x16, 0x73, 0x6a, 0xc7, 0x62, 0x0c, 0x65, 0x2f, 0x93, 0xbf, 0xf7, 0xb9, 0xb7, 0xf1,
- /* (2^102)P */ 0xeb, 0x6d, 0xb3, 0x46, 0x32, 0xd2, 0xcb, 0x08, 0x94, 0x14, 0xbf, 0x3f, 0xc5, 0xcb, 0x5f, 0x9f, 0x8a, 0x89, 0x0c, 0x1b, 0x45, 0xad, 0x4c, 0x50, 0xb4, 0xe1, 0xa0, 0x6b, 0x11, 0x92, 0xaf, 0x1f, 0x00, 0xcc, 0xe5, 0x13, 0x7e, 0xe4, 0x2e, 0xa0, 0x57, 0xf3, 0xa7, 0x84, 0x79, 0x7a, 0xc2, 0xb7, 0xb7, 0xfc, 0x5d, 0xa5, 0xa9, 0x64, 0xcc, 0xd8,
- /* (2^103)P */ 0xa9, 0xc4, 0x12, 0x8b, 0x34, 0x78, 0x3e, 0x38, 0xfd, 0x3f, 0x87, 0xfa, 0x88, 0x94, 0xd5, 0xd9, 0x7f, 0xeb, 0x58, 0xff, 0xb9, 0x45, 0xdb, 0xa1, 0xed, 0x22, 0x28, 0x1d, 0x00, 0x6d, 0x79, 0x85, 0x7a, 0x75, 0x5d, 0xf0, 0xb1, 0x9e, 0x47, 0x28, 0x8c, 0x62, 0xdf, 0xfb, 0x4c, 0x7b, 0xc5, 0x1a, 0x42, 0x95, 0xef, 0x9a, 0xb7, 0x27, 0x7e, 0xda,
- /* (2^104)P */ 0xca, 0xd5, 0xc0, 0x17, 0xa1, 0x66, 0x79, 0x9c, 0x2a, 0xb7, 0x0a, 0xfe, 0x62, 0xe4, 0x26, 0x78, 0x90, 0xa7, 0xcb, 0xb0, 0x4f, 0x6d, 0xf9, 0x8f, 0xf7, 0x7d, 0xac, 0xb8, 0x78, 0x1f, 0x41, 0xea, 0x97, 0x1e, 0x62, 0x97, 0x43, 0x80, 0x58, 0x80, 0xb6, 0x69, 0x7d, 0xee, 0x16, 0xd2, 0xa1, 0x81, 0xd7, 0xb1, 0x27, 0x03, 0x48, 0xda, 0xab, 0xec,
- /* (2^105)P */ 0x5b, 0xed, 0x40, 0x8e, 0x8c, 0xc1, 0x66, 0x90, 0x7f, 0x0c, 0xb2, 0xfc, 0xbd, 0x16, 0xac, 0x7d, 0x4c, 0x6a, 0xf9, 0xae, 0xe7, 0x4e, 0x11, 0x12, 0xe9, 0xbe, 0x17, 0x09, 0xc6, 0xc1, 0x5e, 0xb5, 0x7b, 0x50, 0x5c, 0x27, 0xfb, 0x80, 0xab, 0x01, 0xfa, 0x5b, 0x9b, 0x75, 0x16, 0x6e, 0xb2, 0x5c, 0x8c, 0x2f, 0xa5, 0x6a, 0x1a, 0x68, 0xa6, 0x90,
- /* (2^106)P */ 0x75, 0xfe, 0xb6, 0x96, 0x96, 0x87, 0x4c, 0x12, 0xa9, 0xd1, 0xd8, 0x03, 0xa3, 0xc1, 0x15, 0x96, 0xe8, 0xa0, 0x75, 0x82, 0xa0, 0x6d, 0xea, 0x54, 0xdc, 0x5f, 0x0d, 0x7e, 0xf6, 0x70, 0xb5, 0xdc, 0x7a, 0xf6, 0xc4, 0xd4, 0x21, 0x49, 0xf5, 0xd4, 0x14, 0x6d, 0x48, 0x1d, 0x7c, 0x99, 0x42, 0xdf, 0x78, 0x6b, 0x9d, 0xb9, 0x30, 0x3c, 0xd0, 0x29,
- /* (2^107)P */ 0x85, 0xd6, 0xd8, 0xf3, 0x91, 0x74, 0xdd, 0xbd, 0x72, 0x96, 0x10, 0xe4, 0x76, 0x02, 0x5a, 0x72, 0x67, 0xd3, 0x17, 0x72, 0x14, 0x9a, 0x20, 0x5b, 0x0f, 0x8d, 0xed, 0x6d, 0x4e, 0xe3, 0xd9, 0x82, 0xc2, 0x99, 0xee, 0x39, 0x61, 0x69, 0x8a, 0x24, 0x01, 0x92, 0x15, 0xe7, 0xfc, 0xf9, 0x4d, 0xac, 0xf1, 0x30, 0x49, 0x01, 0x0b, 0x6e, 0x0f, 0x20,
- /* (2^108)P */ 0xd8, 0x25, 0x94, 0x5e, 0x43, 0x29, 0xf5, 0xcc, 0xe8, 0xe3, 0x55, 0x41, 0x3c, 0x9f, 0x58, 0x5b, 0x00, 0xeb, 0xc5, 0xdf, 0xcf, 0xfb, 0xfd, 0x6e, 0x92, 0xec, 0x99, 0x30, 0xd6, 0x05, 0xdd, 0x80, 0x7a, 0x5d, 0x6d, 0x16, 0x85, 0xd8, 0x9d, 0x43, 0x65, 0xd8, 0x2c, 0x33, 0x2f, 0x5c, 0x41, 0xea, 0xb7, 0x95, 0x77, 0xf2, 0x9e, 0x59, 0x09, 0xe8,
- /* (2^109)P */ 0x00, 0xa0, 0x03, 0x80, 0xcd, 0x60, 0xe5, 0x17, 0xd4, 0x15, 0x99, 0xdd, 0x4f, 0xbf, 0x66, 0xb8, 0xc0, 0xf5, 0xf9, 0xfc, 0x6d, 0x42, 0x18, 0x34, 0x1c, 0x7d, 0x5b, 0xb5, 0x09, 0xd0, 0x99, 0x57, 0x81, 0x0b, 0x62, 0xb3, 0xa2, 0xf9, 0x0b, 0xae, 0x95, 0xb8, 0xc2, 0x3b, 0x0d, 0x5b, 0x00, 0xf1, 0xed, 0xbc, 0x05, 0x9d, 0x61, 0xbc, 0x73, 0x9d,
- /* (2^110)P */ 0xd4, 0xdb, 0x29, 0xe5, 0x85, 0xe9, 0xc6, 0x89, 0x2a, 0xa8, 0x54, 0xab, 0xb3, 0x7f, 0x88, 0xc0, 0x4d, 0xe0, 0xd1, 0x74, 0x6e, 0xa3, 0xa7, 0x39, 0xd5, 0xcc, 0xa1, 0x8a, 0xcb, 0x5b, 0x34, 0xad, 0x92, 0xb4, 0xd8, 0xd5, 0x17, 0xf6, 0x77, 0x18, 0x9e, 0xaf, 0x45, 0x3b, 0x03, 0xe2, 0xf8, 0x52, 0x60, 0xdc, 0x15, 0x20, 0x9e, 0xdf, 0xd8, 0x5d,
- /* (2^111)P */ 0x02, 0xc1, 0xac, 0x1a, 0x15, 0x8e, 0x6c, 0xf5, 0x1e, 0x1e, 0xba, 0x7e, 0xc2, 0xda, 0x7d, 0x02, 0xda, 0x43, 0xae, 0x04, 0x70, 0x28, 0x54, 0x78, 0x94, 0xf5, 0x4f, 0x07, 0x84, 0x8f, 0xed, 0xaa, 0xc0, 0xb8, 0xcd, 0x7f, 0x7e, 0x33, 0xa3, 0xbe, 0x21, 0x29, 0xc8, 0x56, 0x34, 0xc0, 0x76, 0x87, 0x8f, 0xc7, 0x73, 0x58, 0x90, 0x16, 0xfc, 0xd6,
- /* (2^112)P */ 0xb8, 0x3f, 0xe1, 0xdf, 0x3a, 0x91, 0x25, 0x0c, 0xf6, 0x47, 0xa8, 0x89, 0xc4, 0xc6, 0x61, 0xec, 0x86, 0x2c, 0xfd, 0xbe, 0xa4, 0x6f, 0xc2, 0xd4, 0x46, 0x19, 0x70, 0x5d, 0x09, 0x02, 0x86, 0xd3, 0x4b, 0xe9, 0x16, 0x7b, 0xf0, 0x0d, 0x6c, 0xff, 0x91, 0x05, 0xbf, 0x55, 0xb4, 0x00, 0x8d, 0xe5, 0x6d, 0x68, 0x20, 0x90, 0x12, 0xb5, 0x5c, 0x32,
- /* (2^113)P */ 0x80, 0x45, 0xc8, 0x51, 0x87, 0xba, 0x1c, 0x5c, 0xcf, 0x5f, 0x4b, 0x3c, 0x9e, 0x3b, 0x36, 0xd2, 0x26, 0xa2, 0x7f, 0xab, 0xb7, 0xbf, 0xda, 0x68, 0x23, 0x8f, 0xc3, 0xa0, 0xfd, 0xad, 0xf1, 0x56, 0x3b, 0xd0, 0x75, 0x2b, 0x44, 0x61, 0xd8, 0xf4, 0xf1, 0x05, 0x49, 0x53, 0x07, 0xee, 0x47, 0xef, 0xc0, 0x7c, 0x9d, 0xe4, 0x15, 0x88, 0xc5, 0x47,
- /* (2^114)P */ 0x2d, 0xb5, 0x09, 0x80, 0xb9, 0xd3, 0xd8, 0xfe, 0x4c, 0xd2, 0xa6, 0x6e, 0xd3, 0x75, 0xcf, 0xb0, 0x99, 0xcb, 0x50, 0x8d, 0xe9, 0x67, 0x9b, 0x20, 0xe8, 0x57, 0xd8, 0x14, 0x85, 0x73, 0x6a, 0x74, 0xe0, 0x99, 0xf0, 0x6b, 0x6e, 0x59, 0x30, 0x31, 0x33, 0x96, 0x5f, 0xa1, 0x0c, 0x1b, 0xf4, 0xca, 0x09, 0xe1, 0x9b, 0xb5, 0xcf, 0x6d, 0x0b, 0xeb,
- /* (2^115)P */ 0x1a, 0xde, 0x50, 0xa9, 0xac, 0x3e, 0x10, 0x43, 0x4f, 0x82, 0x4f, 0xc0, 0xfe, 0x3f, 0x33, 0xd2, 0x64, 0x86, 0x50, 0xa9, 0x51, 0x76, 0x5e, 0x50, 0x97, 0x6c, 0x73, 0x8d, 0x77, 0xa3, 0x75, 0x03, 0xbc, 0xc9, 0xfb, 0x50, 0xd9, 0x6d, 0x16, 0xad, 0x5d, 0x32, 0x3d, 0xac, 0x44, 0xdf, 0x51, 0xf7, 0x19, 0xd4, 0x0b, 0x57, 0x78, 0x0b, 0x81, 0x4e,
- /* (2^116)P */ 0x32, 0x24, 0xf1, 0x6c, 0x55, 0x62, 0x1d, 0xb3, 0x1f, 0xda, 0xfa, 0x6a, 0x8f, 0x98, 0x01, 0x16, 0xde, 0x44, 0x50, 0x0d, 0x2e, 0x6c, 0x0b, 0xa2, 0xd3, 0x74, 0x0e, 0xa9, 0xbf, 0x8d, 0xa9, 0xc8, 0xc8, 0x2f, 0x62, 0xc1, 0x35, 0x5e, 0xfd, 0x3a, 0xb3, 0x83, 0x2d, 0xee, 0x4e, 0xfd, 0x5c, 0x5e, 0xad, 0x85, 0xa5, 0x10, 0xb5, 0x4f, 0x34, 0xa7,
- /* (2^117)P */ 0xd1, 0x58, 0x6f, 0xe6, 0x54, 0x2c, 0xc2, 0xcd, 0xcf, 0x83, 0xdc, 0x88, 0x0c, 0xb9, 0xb4, 0x62, 0x18, 0x89, 0x65, 0x28, 0xe9, 0x72, 0x4b, 0x65, 0xcf, 0xd6, 0x90, 0x88, 0xd7, 0x76, 0x17, 0x4f, 0x74, 0x64, 0x1e, 0xcb, 0xd3, 0xf5, 0x4b, 0xaa, 0x2e, 0x4d, 0x2d, 0x7c, 0x13, 0x1f, 0xfd, 0xd9, 0x60, 0x83, 0x7e, 0xda, 0x64, 0x1c, 0xdc, 0x9f,
- /* (2^118)P */ 0xad, 0xef, 0xac, 0x1b, 0xc1, 0x30, 0x5a, 0x15, 0xc9, 0x1f, 0xac, 0xf1, 0xca, 0x44, 0x95, 0x95, 0xea, 0xf2, 0x22, 0xe7, 0x8d, 0x25, 0xf0, 0xff, 0xd8, 0x71, 0xf7, 0xf8, 0x8f, 0x8f, 0xcd, 0xf4, 0x1e, 0xfe, 0x6c, 0x68, 0x04, 0xb8, 0x78, 0xa1, 0x5f, 0xa6, 0x5d, 0x5e, 0xf9, 0x8d, 0xea, 0x80, 0xcb, 0xf3, 0x17, 0xa6, 0x03, 0xc9, 0x38, 0xd5,
- /* (2^119)P */ 0x79, 0x14, 0x31, 0xc3, 0x38, 0xe5, 0xaa, 0xbf, 0x17, 0xa3, 0x04, 0x4e, 0x80, 0x59, 0x9c, 0x9f, 0x19, 0x39, 0xe4, 0x2d, 0x23, 0x54, 0x4a, 0x7f, 0x3e, 0xf3, 0xd9, 0xc7, 0xba, 0x6c, 0x8f, 0x6b, 0xfa, 0x34, 0xb5, 0x23, 0x17, 0x1d, 0xff, 0x1d, 0xea, 0x1f, 0xd7, 0xba, 0x61, 0xb2, 0xe0, 0x38, 0x6a, 0xe9, 0xcf, 0x48, 0x5d, 0x6a, 0x10, 0x9c,
- /* (2^120)P */ 0xc8, 0xbb, 0x13, 0x1c, 0x3f, 0x3c, 0x34, 0xfd, 0xac, 0x37, 0x52, 0x44, 0x25, 0xa8, 0xde, 0x1d, 0x63, 0xf4, 0x81, 0x9a, 0xbe, 0x0b, 0x74, 0x2e, 0xc8, 0x51, 0x16, 0xd3, 0xac, 0x4a, 0xaf, 0xe2, 0x5f, 0x3a, 0x89, 0x32, 0xd1, 0x9b, 0x7c, 0x90, 0x0d, 0xac, 0xdc, 0x8b, 0x73, 0x45, 0x45, 0x97, 0xb1, 0x90, 0x2c, 0x1b, 0x31, 0xca, 0xb1, 0x94,
- /* (2^121)P */ 0x07, 0x28, 0xdd, 0x10, 0x14, 0xa5, 0x95, 0x7e, 0xf3, 0xe4, 0xd4, 0x14, 0xb4, 0x7e, 0x76, 0xdb, 0x42, 0xd6, 0x94, 0x3e, 0xeb, 0x44, 0x64, 0x88, 0x0d, 0xec, 0xc1, 0x21, 0xf0, 0x79, 0xe0, 0x83, 0x67, 0x55, 0x53, 0xc2, 0xf6, 0xc5, 0xc5, 0x89, 0x39, 0xe8, 0x42, 0xd0, 0x17, 0xbd, 0xff, 0x35, 0x59, 0x0e, 0xc3, 0x06, 0x86, 0xd4, 0x64, 0xcf,
- /* (2^122)P */ 0x91, 0xa8, 0xdb, 0x57, 0x9b, 0xe2, 0x96, 0x31, 0x10, 0x6e, 0xd7, 0x9a, 0x97, 0xb3, 0xab, 0xb5, 0x15, 0x66, 0xbe, 0xcc, 0x6d, 0x9a, 0xac, 0x06, 0xb3, 0x0d, 0xaa, 0x4b, 0x9c, 0x96, 0x79, 0x6c, 0x34, 0xee, 0x9e, 0x53, 0x4d, 0x6e, 0xbd, 0x88, 0x02, 0xbf, 0x50, 0x54, 0x12, 0x5d, 0x01, 0x02, 0x46, 0xc6, 0x74, 0x02, 0x8c, 0x24, 0xae, 0xb1,
- /* (2^123)P */ 0xf5, 0x22, 0xea, 0xac, 0x7d, 0x9c, 0x33, 0x8a, 0xa5, 0x36, 0x79, 0x6a, 0x4f, 0xa4, 0xdc, 0xa5, 0x73, 0x64, 0xc4, 0x6f, 0x43, 0x02, 0x3b, 0x94, 0x66, 0xd2, 0x4b, 0x4f, 0xf6, 0x45, 0x33, 0x5d, 0x10, 0x33, 0x18, 0x1e, 0xa3, 0xfc, 0xf7, 0xd2, 0xb8, 0xc8, 0xa7, 0xe0, 0x76, 0x8a, 0xcd, 0xff, 0x4f, 0x99, 0x34, 0x47, 0x84, 0x91, 0x96, 0x9f,
- /* (2^124)P */ 0x8a, 0x48, 0x3b, 0x48, 0x4a, 0xbc, 0xac, 0xe2, 0x80, 0xd6, 0xd2, 0x35, 0xde, 0xd0, 0x56, 0x42, 0x33, 0xb3, 0x56, 0x5a, 0xcd, 0xb8, 0x3d, 0xb5, 0x25, 0xc1, 0xed, 0xff, 0x87, 0x0b, 0x79, 0xff, 0xf2, 0x62, 0xe1, 0x76, 0xc6, 0xa2, 0x0f, 0xa8, 0x9b, 0x0d, 0xcc, 0x3f, 0x3d, 0x35, 0x27, 0x8d, 0x0b, 0x74, 0xb0, 0xc3, 0x78, 0x8c, 0xcc, 0xc8,
- /* (2^125)P */ 0xfc, 0x9a, 0x0c, 0xa8, 0x49, 0x42, 0xb8, 0xdf, 0xcf, 0xb3, 0x19, 0xa6, 0x64, 0x57, 0xfe, 0xe8, 0xf8, 0xa6, 0x4b, 0x86, 0xa1, 0xd5, 0x83, 0x7f, 0x14, 0x99, 0x18, 0x0c, 0x7d, 0x5b, 0xf7, 0x3d, 0xf9, 0x4b, 0x79, 0xb1, 0x86, 0x30, 0xb4, 0x5e, 0x6a, 0xe8, 0x9d, 0xfa, 0x8a, 0x41, 0xc4, 0x30, 0xfc, 0x56, 0x74, 0x14, 0x42, 0xc8, 0x96, 0x0e,
- /* (2^126)P */ 0xdf, 0x66, 0xec, 0xbc, 0x44, 0xdb, 0x19, 0xce, 0xd4, 0xb5, 0x49, 0x40, 0x07, 0x49, 0xe0, 0x3a, 0x61, 0x10, 0xfb, 0x7d, 0xba, 0xb1, 0xe0, 0x28, 0x5b, 0x99, 0x59, 0x96, 0xa2, 0xee, 0xe0, 0x23, 0x37, 0x39, 0x1f, 0xe6, 0x57, 0x9f, 0xf8, 0xf8, 0xdc, 0x74, 0xf6, 0x8f, 0x4f, 0x5e, 0x51, 0xa4, 0x12, 0xac, 0xbe, 0xe4, 0xf3, 0xd1, 0xf0, 0x24,
- /* (2^127)P */ 0x1e, 0x3e, 0x9a, 0x5f, 0xdf, 0x9f, 0xd6, 0x4e, 0x8a, 0x28, 0xc3, 0xcd, 0x96, 0x9d, 0x57, 0xc7, 0x61, 0x81, 0x90, 0xff, 0xae, 0xb1, 0x4f, 0xc2, 0x96, 0x8b, 0x1a, 0x18, 0xf4, 0x50, 0xcb, 0x31, 0xe1, 0x57, 0xf4, 0x90, 0xa8, 0xea, 0xac, 0xe7, 0x61, 0x98, 0xb6, 0x15, 0xc1, 0x7b, 0x29, 0xa4, 0xc3, 0x18, 0xef, 0xb9, 0xd8, 0xdf, 0xf6, 0xac,
- /* (2^128)P */ 0xca, 0xa8, 0x6c, 0xf1, 0xb4, 0xca, 0xfe, 0x31, 0xee, 0x48, 0x38, 0x8b, 0x0e, 0xbb, 0x7a, 0x30, 0xaa, 0xf9, 0xee, 0x27, 0x53, 0x24, 0xdc, 0x2e, 0x15, 0xa6, 0x48, 0x8f, 0xa0, 0x7e, 0xf1, 0xdc, 0x93, 0x87, 0x39, 0xeb, 0x7f, 0x38, 0x92, 0x92, 0x4c, 0x29, 0xe9, 0x57, 0xd8, 0x59, 0xfc, 0xe9, 0x9c, 0x44, 0xc0, 0x65, 0xcf, 0xac, 0x4b, 0xdc,
- /* (2^129)P */ 0xa3, 0xd0, 0x37, 0x8f, 0x86, 0x2f, 0xc6, 0x47, 0x55, 0x46, 0x65, 0x26, 0x4b, 0x91, 0xe2, 0x18, 0x5c, 0x4f, 0x23, 0xc1, 0x37, 0x29, 0xb9, 0xc1, 0x27, 0xc5, 0x3c, 0xbf, 0x7e, 0x23, 0xdb, 0x73, 0x99, 0xbd, 0x1b, 0xb2, 0x31, 0x68, 0x3a, 0xad, 0xb7, 0xb0, 0x10, 0xc5, 0xe5, 0x11, 0x51, 0xba, 0xa7, 0x60, 0x66, 0x54, 0xf0, 0x08, 0xd7, 0x69,
- /* (2^130)P */ 0x89, 0x41, 0x79, 0xcc, 0xeb, 0x0a, 0xf5, 0x4b, 0xa3, 0x4c, 0xce, 0x52, 0xb0, 0xa7, 0xe4, 0x41, 0x75, 0x7d, 0x04, 0xbb, 0x09, 0x4c, 0x50, 0x9f, 0xdf, 0xea, 0x74, 0x61, 0x02, 0xad, 0xb4, 0x9d, 0xb7, 0x05, 0xb9, 0xea, 0xeb, 0x91, 0x35, 0xe7, 0x49, 0xea, 0xd3, 0x4f, 0x3c, 0x60, 0x21, 0x7a, 0xde, 0xc7, 0xe2, 0x5a, 0xee, 0x8e, 0x93, 0xc7,
- /* (2^131)P */ 0x00, 0xe8, 0xed, 0xd0, 0xb3, 0x0d, 0xaf, 0xb2, 0xde, 0x2c, 0xf6, 0x00, 0xe2, 0xea, 0x6d, 0xf8, 0x0e, 0xd9, 0x67, 0x59, 0xa9, 0x50, 0xbb, 0x17, 0x8f, 0xff, 0xb1, 0x9f, 0x17, 0xb6, 0xf2, 0xb5, 0xba, 0x80, 0xf7, 0x0f, 0xba, 0xd5, 0x09, 0x43, 0xaa, 0x4e, 0x3a, 0x67, 0x6a, 0x89, 0x9b, 0x18, 0x65, 0x35, 0xf8, 0x3a, 0x49, 0x91, 0x30, 0x51,
- /* (2^132)P */ 0x8d, 0x25, 0xe9, 0x0e, 0x7d, 0x50, 0x76, 0xe4, 0x58, 0x7e, 0xb9, 0x33, 0xe6, 0x65, 0x90, 0xc2, 0x50, 0x9d, 0x50, 0x2e, 0x11, 0xad, 0xd5, 0x43, 0x52, 0x32, 0x41, 0x4f, 0x7b, 0xb6, 0xa0, 0xec, 0x81, 0x75, 0x36, 0x7c, 0x77, 0x85, 0x59, 0x70, 0xe4, 0xf9, 0xef, 0x66, 0x8d, 0x35, 0xc8, 0x2a, 0x6e, 0x5b, 0xc6, 0x0d, 0x0b, 0x29, 0x60, 0x68,
- /* (2^133)P */ 0xf8, 0xce, 0xb0, 0x3a, 0x56, 0x7d, 0x51, 0x9a, 0x25, 0x73, 0xea, 0xdd, 0xe4, 0xe0, 0x0e, 0xf0, 0x07, 0xc0, 0x31, 0x00, 0x73, 0x35, 0xd0, 0x39, 0xc4, 0x9b, 0xb7, 0x95, 0xe0, 0x62, 0x70, 0x36, 0x0b, 0xcb, 0xa0, 0x42, 0xde, 0x51, 0xcf, 0x41, 0xe0, 0xb8, 0xb4, 0xc0, 0xe5, 0x46, 0x99, 0x9f, 0x02, 0x7f, 0x14, 0x8c, 0xc1, 0x4e, 0xef, 0xe8,
- /* (2^134)P */ 0x10, 0x01, 0x57, 0x0a, 0xbe, 0x8b, 0x18, 0xc8, 0xca, 0x00, 0x28, 0x77, 0x4a, 0x9a, 0xc7, 0x55, 0x2a, 0xcc, 0x0c, 0x7b, 0xb9, 0xe9, 0xc8, 0x97, 0x7c, 0x02, 0xe3, 0x09, 0x2f, 0x62, 0x30, 0xb8, 0x40, 0x09, 0x65, 0xe9, 0x55, 0x63, 0xb5, 0x07, 0xca, 0x9f, 0x00, 0xdf, 0x9d, 0x5c, 0xc7, 0xee, 0x57, 0xa5, 0x90, 0x15, 0x1e, 0x22, 0xa0, 0x12,
- /* (2^135)P */ 0x71, 0x2d, 0xc9, 0xef, 0x27, 0xb9, 0xd8, 0x12, 0x43, 0x6b, 0xa8, 0xce, 0x3b, 0x6d, 0x6e, 0x91, 0x43, 0x23, 0xbc, 0x32, 0xb3, 0xbf, 0xe1, 0xc7, 0x39, 0xcf, 0x7c, 0x42, 0x4c, 0xb1, 0x30, 0xe2, 0xdd, 0x69, 0x06, 0xe5, 0xea, 0xf0, 0x2a, 0x16, 0x50, 0x71, 0xca, 0x92, 0xdf, 0xc1, 0xcc, 0xec, 0xe6, 0x54, 0x07, 0xf3, 0x18, 0x8d, 0xd8, 0x29,
- /* (2^136)P */ 0x98, 0x51, 0x48, 0x8f, 0xfa, 0x2e, 0x5e, 0x67, 0xb0, 0xc6, 0x17, 0x12, 0xb6, 0x7d, 0xc9, 0xad, 0x81, 0x11, 0xad, 0x0c, 0x1c, 0x2d, 0x45, 0xdf, 0xac, 0x66, 0xbd, 0x08, 0x6f, 0x7c, 0xc7, 0x06, 0x6e, 0x19, 0x08, 0x39, 0x64, 0xd7, 0xe4, 0xd1, 0x11, 0x5f, 0x1c, 0xf4, 0x67, 0xc3, 0x88, 0x6a, 0xe6, 0x07, 0xa3, 0x83, 0xd7, 0xfd, 0x2a, 0xf9,
- /* (2^137)P */ 0x87, 0xed, 0xeb, 0xd9, 0xdf, 0xff, 0x43, 0x8b, 0xaa, 0x20, 0x58, 0xb0, 0xb4, 0x6b, 0x14, 0xb8, 0x02, 0xc5, 0x40, 0x20, 0x22, 0xbb, 0xf7, 0xb4, 0xf3, 0x05, 0x1e, 0x4d, 0x94, 0xff, 0xe3, 0xc5, 0x22, 0x82, 0xfe, 0xaf, 0x90, 0x42, 0x98, 0x6b, 0x76, 0x8b, 0x3e, 0x89, 0x3f, 0x42, 0x2a, 0xa7, 0x26, 0x00, 0xda, 0x5c, 0xa2, 0x2b, 0xec, 0xdd,
- /* (2^138)P */ 0x5c, 0x21, 0x16, 0x0d, 0x46, 0xb8, 0xd0, 0xa7, 0x88, 0xe7, 0x25, 0xcb, 0x3e, 0x50, 0x73, 0x61, 0xe7, 0xaf, 0x5a, 0x3f, 0x47, 0x8b, 0x3d, 0x97, 0x79, 0x2c, 0xe6, 0x6d, 0x95, 0x74, 0x65, 0x70, 0x36, 0xfd, 0xd1, 0x9e, 0x13, 0x18, 0x63, 0xb1, 0x2d, 0x0b, 0xb5, 0x36, 0x3e, 0xe7, 0x35, 0x42, 0x3b, 0xe6, 0x1f, 0x4d, 0x9d, 0x59, 0xa2, 0x43,
- /* (2^139)P */ 0x8c, 0x0c, 0x7c, 0x24, 0x9e, 0xe0, 0xf8, 0x05, 0x1c, 0x9e, 0x1f, 0x31, 0xc0, 0x70, 0xb3, 0xfb, 0x4e, 0xf8, 0x0a, 0x57, 0xb7, 0x49, 0xb5, 0x73, 0xa1, 0x5f, 0x9b, 0x6a, 0x07, 0x6c, 0x87, 0x71, 0x87, 0xd4, 0xbe, 0x98, 0x1e, 0x98, 0xee, 0x52, 0xc1, 0x7b, 0x95, 0x0f, 0x28, 0x32, 0x36, 0x28, 0xd0, 0x3a, 0x0f, 0x7d, 0x2a, 0xa9, 0x62, 0xb9,
- /* (2^140)P */ 0x97, 0xe6, 0x18, 0x77, 0xf9, 0x34, 0xac, 0xbc, 0xe0, 0x62, 0x9f, 0x42, 0xde, 0xbd, 0x2f, 0xf7, 0x1f, 0xb7, 0x14, 0x52, 0x8a, 0x79, 0xb2, 0x3f, 0xd2, 0x95, 0x71, 0x01, 0xe8, 0xaf, 0x8c, 0xa4, 0xa4, 0xa7, 0x27, 0xf3, 0x5c, 0xdf, 0x3e, 0x57, 0x7a, 0xf1, 0x76, 0x49, 0xe6, 0x42, 0x3f, 0x8f, 0x1e, 0x63, 0x4a, 0x65, 0xb5, 0x41, 0xf5, 0x02,
- /* (2^141)P */ 0x72, 0x85, 0xc5, 0x0b, 0xe1, 0x47, 0x64, 0x02, 0xc5, 0x4d, 0x81, 0x69, 0xb2, 0xcf, 0x0f, 0x6c, 0xd4, 0x6d, 0xd0, 0xc7, 0xb4, 0x1c, 0xd0, 0x32, 0x59, 0x89, 0xe2, 0xe0, 0x96, 0x8b, 0x12, 0x98, 0xbf, 0x63, 0x7a, 0x4c, 0x76, 0x7e, 0x58, 0x17, 0x8f, 0x5b, 0x0a, 0x59, 0x65, 0x75, 0xbc, 0x61, 0x1f, 0xbe, 0xc5, 0x6e, 0x0a, 0x57, 0x52, 0x70,
- /* (2^142)P */ 0x92, 0x1c, 0x77, 0xbb, 0x62, 0x02, 0x6c, 0x25, 0x9c, 0x66, 0x07, 0x83, 0xab, 0xcc, 0x80, 0x5d, 0xd2, 0x76, 0x0c, 0xa4, 0xc5, 0xb4, 0x8a, 0x68, 0x23, 0x31, 0x32, 0x29, 0x8a, 0x47, 0x92, 0x12, 0x80, 0xb3, 0xfa, 0x18, 0xe4, 0x8d, 0xc0, 0x4d, 0xfe, 0x97, 0x5f, 0x72, 0x41, 0xb5, 0x5c, 0x7a, 0xbd, 0xf0, 0xcf, 0x5e, 0x97, 0xaa, 0x64, 0x32,
- /* (2^143)P */ 0x35, 0x3f, 0x75, 0xc1, 0x7a, 0x75, 0x7e, 0xa9, 0xc6, 0x0b, 0x4e, 0x32, 0x62, 0xec, 0xe3, 0x5c, 0xfb, 0x01, 0x43, 0xb6, 0xd4, 0x5b, 0x75, 0xd2, 0xee, 0x7f, 0x5d, 0x23, 0x2b, 0xb3, 0x54, 0x34, 0x4c, 0xd3, 0xb4, 0x32, 0x84, 0x81, 0xb5, 0x09, 0x76, 0x19, 0xda, 0x58, 0xda, 0x7c, 0xdb, 0x2e, 0xdd, 0x4c, 0x8e, 0xdd, 0x5d, 0x89, 0x10, 0x10,
- /* (2^144)P */ 0x57, 0x25, 0x6a, 0x08, 0x37, 0x92, 0xa8, 0xdf, 0x24, 0xef, 0x8f, 0x33, 0x34, 0x52, 0xa4, 0x4c, 0xf0, 0x77, 0x9f, 0x69, 0x77, 0xd5, 0x8f, 0xd2, 0x9a, 0xb3, 0xb6, 0x1d, 0x2d, 0xa6, 0xf7, 0x1f, 0xda, 0xd7, 0xcb, 0x75, 0x11, 0xc3, 0x6b, 0xc0, 0x38, 0xb1, 0xd5, 0x2d, 0x96, 0x84, 0x16, 0xfa, 0x26, 0xb9, 0xcc, 0x3f, 0x16, 0x47, 0x23, 0x74,
- /* (2^145)P */ 0x9b, 0x61, 0x2a, 0x1c, 0xdd, 0x39, 0xa5, 0xfa, 0x1c, 0x7d, 0x63, 0x50, 0xca, 0xe6, 0x9d, 0xfa, 0xb7, 0xc4, 0x4c, 0x6a, 0x97, 0x5f, 0x36, 0x4e, 0x47, 0xdd, 0x17, 0xf7, 0xf9, 0x19, 0xce, 0x75, 0x17, 0xad, 0xce, 0x2a, 0xf3, 0xfe, 0x27, 0x8f, 0x3e, 0x48, 0xc0, 0x60, 0x87, 0x24, 0x19, 0xae, 0x59, 0xe4, 0x5a, 0x00, 0x2a, 0xba, 0xa2, 0x1f,
- /* (2^146)P */ 0x26, 0x88, 0x42, 0x60, 0x9f, 0x6e, 0x2c, 0x7c, 0x39, 0x0f, 0x47, 0x6a, 0x0e, 0x02, 0xbb, 0x4b, 0x34, 0x29, 0x55, 0x18, 0x36, 0xcf, 0x3b, 0x47, 0xf1, 0x2e, 0xfc, 0x6e, 0x94, 0xff, 0xe8, 0x6b, 0x06, 0xd2, 0xba, 0x77, 0x5e, 0x60, 0xd7, 0x19, 0xef, 0x02, 0x9d, 0x3a, 0xc2, 0xb7, 0xa9, 0xd8, 0x57, 0xee, 0x7e, 0x2b, 0xf2, 0x6d, 0x28, 0xda,
- /* (2^147)P */ 0xdf, 0xd9, 0x92, 0x11, 0x98, 0x23, 0xe2, 0x45, 0x2f, 0x74, 0x70, 0xee, 0x0e, 0x55, 0x65, 0x79, 0x86, 0x38, 0x17, 0x92, 0x85, 0x87, 0x99, 0x50, 0xd9, 0x7c, 0xdb, 0xa1, 0x10, 0xec, 0x30, 0xb7, 0x40, 0xa3, 0x23, 0x9b, 0x0e, 0x27, 0x49, 0x29, 0x03, 0x94, 0xff, 0x53, 0xdc, 0xd7, 0xed, 0x49, 0xa9, 0x5a, 0x3b, 0xee, 0xd7, 0xc7, 0x65, 0xaf,
- /* (2^148)P */ 0xa0, 0xbd, 0xbe, 0x03, 0xee, 0x0c, 0xbe, 0x32, 0x00, 0x7b, 0x52, 0xcb, 0x92, 0x29, 0xbf, 0xa0, 0xc6, 0xd9, 0xd2, 0xd6, 0x15, 0xe8, 0x3a, 0x75, 0x61, 0x65, 0x56, 0xae, 0xad, 0x3c, 0x2a, 0x64, 0x14, 0x3f, 0x8e, 0xc1, 0x2d, 0x0c, 0x8d, 0x20, 0xdb, 0x58, 0x4b, 0xe5, 0x40, 0x15, 0x4b, 0xdc, 0xa8, 0xbd, 0xef, 0x08, 0xa7, 0xd1, 0xf4, 0xb0,
- /* (2^149)P */ 0xa9, 0x0f, 0x05, 0x94, 0x66, 0xac, 0x1f, 0x65, 0x3f, 0xe1, 0xb8, 0xe1, 0x34, 0x5e, 0x1d, 0x8f, 0xe3, 0x93, 0x03, 0x15, 0xff, 0xb6, 0x65, 0xb6, 0x6e, 0xc0, 0x2f, 0xd4, 0x2e, 0xb9, 0x2c, 0x13, 0x3c, 0x99, 0x1c, 0xb5, 0x87, 0xba, 0x79, 0xcb, 0xf0, 0x18, 0x06, 0x86, 0x04, 0x14, 0x25, 0x09, 0xcd, 0x1c, 0x14, 0xda, 0x35, 0xd0, 0x38, 0x3b,
- /* (2^150)P */ 0x1b, 0x04, 0xa3, 0x27, 0xb4, 0xd3, 0x37, 0x48, 0x1e, 0x8f, 0x69, 0xd3, 0x5a, 0x2f, 0x20, 0x02, 0x36, 0xbe, 0x06, 0x7b, 0x6b, 0x6c, 0x12, 0x5b, 0x80, 0x74, 0x44, 0xe6, 0xf8, 0xf5, 0x95, 0x59, 0x29, 0xab, 0x51, 0x47, 0x83, 0x28, 0xe0, 0xad, 0xde, 0xaa, 0xd3, 0xb1, 0x1a, 0xcb, 0xa3, 0xcd, 0x8b, 0x6a, 0xb1, 0xa7, 0x0a, 0xd1, 0xf9, 0xbe,
- /* (2^151)P */ 0xce, 0x2f, 0x85, 0xca, 0x74, 0x6d, 0x49, 0xb8, 0xce, 0x80, 0x44, 0xe0, 0xda, 0x5b, 0xcf, 0x2f, 0x79, 0x74, 0xfe, 0xb4, 0x2c, 0x99, 0x20, 0x6e, 0x09, 0x04, 0xfb, 0x6d, 0x57, 0x5b, 0x95, 0x0c, 0x45, 0xda, 0x4f, 0x7f, 0x63, 0xcc, 0x85, 0x5a, 0x67, 0x50, 0x68, 0x71, 0xb4, 0x67, 0xb1, 0x2e, 0xc1, 0x1c, 0xdc, 0xff, 0x2a, 0x7c, 0x10, 0x5e,
- /* (2^152)P */ 0xa6, 0xde, 0xf3, 0xd4, 0x22, 0x30, 0x24, 0x9e, 0x0b, 0x30, 0x54, 0x59, 0x7e, 0xa2, 0xeb, 0x89, 0x54, 0x65, 0x3e, 0x40, 0xd1, 0xde, 0xe6, 0xee, 0x4d, 0xbf, 0x5e, 0x40, 0x1d, 0xee, 0x4f, 0x68, 0xd9, 0xa7, 0x2f, 0xb3, 0x64, 0xb3, 0xf5, 0xc8, 0xd3, 0xaa, 0x70, 0x70, 0x3d, 0xef, 0xd3, 0x95, 0x54, 0xdb, 0x3e, 0x94, 0x95, 0x92, 0x1f, 0x45,
- /* (2^153)P */ 0x22, 0x80, 0x1d, 0x9d, 0x96, 0xa5, 0x78, 0x6f, 0xe0, 0x1e, 0x1b, 0x66, 0x42, 0xc8, 0xae, 0x9e, 0x46, 0x45, 0x08, 0x41, 0xdf, 0x80, 0xae, 0x6f, 0xdb, 0x15, 0x5a, 0x21, 0x31, 0x7a, 0xd0, 0xf2, 0x54, 0x15, 0x88, 0xd3, 0x0f, 0x7f, 0x14, 0x5a, 0x14, 0x97, 0xab, 0xf4, 0x58, 0x6a, 0x9f, 0xea, 0x74, 0xe5, 0x6b, 0x90, 0x59, 0x2b, 0x48, 0xd9,
- /* (2^154)P */ 0x12, 0x24, 0x04, 0xf5, 0x50, 0xc2, 0x8c, 0xb0, 0x7c, 0x46, 0x98, 0xd5, 0x24, 0xad, 0xf6, 0x72, 0xdc, 0x82, 0x1a, 0x60, 0xc1, 0xeb, 0x48, 0xef, 0x7f, 0x6e, 0xe6, 0xcc, 0xdb, 0x7b, 0xae, 0xbe, 0x5e, 0x1e, 0x5c, 0xe6, 0x0a, 0x70, 0xdf, 0xa4, 0xa3, 0x85, 0x1b, 0x1b, 0x7f, 0x72, 0xb9, 0x96, 0x6f, 0xdc, 0x03, 0x76, 0x66, 0xfb, 0xa0, 0x33,
- /* (2^155)P */ 0x37, 0x40, 0xbb, 0xbc, 0x68, 0x58, 0x86, 0xca, 0xbb, 0xa5, 0x24, 0x76, 0x3d, 0x48, 0xd1, 0xad, 0xb4, 0xa8, 0xcf, 0xc3, 0xb6, 0xa8, 0xba, 0x1a, 0x3a, 0xbe, 0x33, 0x75, 0x04, 0x5c, 0x13, 0x8c, 0x0d, 0x70, 0x8d, 0xa6, 0x4e, 0x2a, 0xeb, 0x17, 0x3c, 0x22, 0xdd, 0x3e, 0x96, 0x40, 0x11, 0x9e, 0x4e, 0xae, 0x3d, 0xf8, 0x91, 0xd7, 0x50, 0xc8,
- /* (2^156)P */ 0xd8, 0xca, 0xde, 0x19, 0xcf, 0x00, 0xe4, 0x73, 0x18, 0x7f, 0x9b, 0x9f, 0xf4, 0x5b, 0x49, 0x49, 0x99, 0xdc, 0xa4, 0x46, 0x21, 0xb5, 0xd7, 0x3e, 0xb7, 0x47, 0x1b, 0xa9, 0x9f, 0x4c, 0x69, 0x7d, 0xec, 0x33, 0xd6, 0x1c, 0x51, 0x7f, 0x47, 0x74, 0x7a, 0x6c, 0xf3, 0xd2, 0x2e, 0xbf, 0xdf, 0x6c, 0x9e, 0x77, 0x3b, 0x34, 0xf6, 0x73, 0x80, 0xed,
- /* (2^157)P */ 0x16, 0xfb, 0x16, 0xc3, 0xc2, 0x83, 0xe4, 0xf4, 0x03, 0x7f, 0x52, 0xb0, 0x67, 0x51, 0x7b, 0x24, 0x5a, 0x51, 0xd3, 0xb6, 0x4e, 0x59, 0x76, 0xcd, 0x08, 0x7b, 0x1d, 0x7a, 0x9c, 0x65, 0xae, 0xce, 0xaa, 0xd2, 0x1c, 0x85, 0x66, 0x68, 0x06, 0x15, 0xa8, 0x06, 0xe6, 0x16, 0x37, 0xf4, 0x49, 0x9e, 0x0f, 0x50, 0x37, 0xb1, 0xb2, 0x93, 0x70, 0x43,
- /* (2^158)P */ 0x18, 0x3a, 0x16, 0xe5, 0x8d, 0xc8, 0x35, 0xd6, 0x7b, 0x09, 0xec, 0x61, 0x5f, 0x5c, 0x2a, 0x19, 0x96, 0x2e, 0xc3, 0xfd, 0xab, 0xe6, 0x23, 0xae, 0xab, 0xc5, 0xcb, 0xb9, 0x7b, 0x2d, 0x34, 0x51, 0xb9, 0x41, 0x9e, 0x7d, 0xca, 0xda, 0x25, 0x45, 0x14, 0xb0, 0xc7, 0x4d, 0x26, 0x2b, 0xfe, 0x43, 0xb0, 0x21, 0x5e, 0xfa, 0xdc, 0x7c, 0xf9, 0x5a,
- /* (2^159)P */ 0x94, 0xad, 0x42, 0x17, 0xf5, 0xcd, 0x1c, 0x0d, 0xf6, 0x41, 0xd2, 0x55, 0xbb, 0x50, 0xf1, 0xc6, 0xbc, 0xa6, 0xc5, 0x3a, 0xfd, 0x9b, 0x75, 0x3e, 0xf6, 0x1a, 0xa7, 0xb2, 0x6e, 0x64, 0x12, 0xdc, 0x3c, 0xe5, 0xf6, 0xfc, 0x3b, 0xfa, 0x43, 0x81, 0xd4, 0xa5, 0xee, 0xf5, 0x9c, 0x47, 0x2f, 0xd0, 0x9c, 0xde, 0xa1, 0x48, 0x91, 0x9a, 0x34, 0xc1,
- /* (2^160)P */ 0x37, 0x1b, 0xb3, 0x88, 0xc9, 0x98, 0x4e, 0xfb, 0x84, 0x4f, 0x2b, 0x0a, 0xb6, 0x8f, 0x35, 0x15, 0xcd, 0x61, 0x7a, 0x5f, 0x5c, 0xa0, 0xca, 0x23, 0xa0, 0x93, 0x1f, 0xcc, 0x3c, 0x39, 0x3a, 0x24, 0xa7, 0x49, 0xad, 0x8d, 0x59, 0xcc, 0x94, 0x5a, 0x16, 0xf5, 0x70, 0xe8, 0x52, 0x1e, 0xee, 0x20, 0x30, 0x17, 0x7e, 0xf0, 0x4c, 0x93, 0x06, 0x5a,
- /* (2^161)P */ 0x81, 0xba, 0x3b, 0xd7, 0x3e, 0xb4, 0x32, 0x3a, 0x22, 0x39, 0x2a, 0xfc, 0x19, 0xd9, 0xd2, 0xf6, 0xc5, 0x79, 0x6c, 0x0e, 0xde, 0xda, 0x01, 0xff, 0x52, 0xfb, 0xb6, 0x95, 0x4e, 0x7a, 0x10, 0xb8, 0x06, 0x86, 0x3c, 0xcd, 0x56, 0xd6, 0x15, 0xbf, 0x6e, 0x3e, 0x4f, 0x35, 0x5e, 0xca, 0xbc, 0xa5, 0x95, 0xa2, 0xdf, 0x2d, 0x1d, 0xaf, 0x59, 0xf9,
- /* (2^162)P */ 0x69, 0xe5, 0xe2, 0xfa, 0xc9, 0x7f, 0xdd, 0x09, 0xf5, 0x6b, 0x4e, 0x2e, 0xbe, 0xb4, 0xbf, 0x3e, 0xb2, 0xf2, 0x81, 0x30, 0xe1, 0x07, 0xa8, 0x0d, 0x2b, 0xd2, 0x5a, 0x55, 0xbe, 0x4b, 0x86, 0x5d, 0xb0, 0x5e, 0x7c, 0x8f, 0xc1, 0x3c, 0x81, 0x4c, 0xf7, 0x6d, 0x7d, 0xe6, 0x4f, 0x8a, 0x85, 0xc2, 0x2f, 0x28, 0xef, 0x8c, 0x69, 0xc2, 0xc2, 0x1a,
- /* (2^163)P */ 0xd9, 0xe4, 0x0e, 0x1e, 0xc2, 0xf7, 0x2f, 0x9f, 0xa1, 0x40, 0xfe, 0x46, 0x16, 0xaf, 0x2e, 0xd1, 0xec, 0x15, 0x9b, 0x61, 0x92, 0xce, 0xfc, 0x10, 0x43, 0x1d, 0x00, 0xf6, 0xbe, 0x20, 0x80, 0x80, 0x6f, 0x3c, 0x16, 0x94, 0x59, 0xba, 0x03, 0x53, 0x6e, 0xb6, 0xdd, 0x25, 0x7b, 0x86, 0xbf, 0x96, 0xf4, 0x2f, 0xa1, 0x96, 0x8d, 0xf9, 0xb3, 0x29,
- /* (2^164)P */ 0x3b, 0x04, 0x60, 0x6e, 0xce, 0xab, 0xd2, 0x63, 0x18, 0x53, 0x88, 0x16, 0x4a, 0x6a, 0xab, 0x72, 0x03, 0x68, 0xa5, 0xd4, 0x0d, 0xb2, 0x82, 0x81, 0x1f, 0x2b, 0x5c, 0x75, 0xe8, 0xd2, 0x1d, 0x7f, 0xe7, 0x1b, 0x35, 0x02, 0xde, 0xec, 0xbd, 0xcb, 0xc7, 0x01, 0xd3, 0x95, 0x61, 0xfe, 0xb2, 0x7a, 0x66, 0x09, 0x4c, 0x6d, 0xfd, 0x39, 0xf7, 0x52,
- /* (2^165)P */ 0x42, 0xc1, 0x5f, 0xf8, 0x35, 0x52, 0xc1, 0xfe, 0xc5, 0x11, 0x80, 0x1c, 0x11, 0x46, 0x31, 0x11, 0xbe, 0xd0, 0xc4, 0xb6, 0x07, 0x13, 0x38, 0xa0, 0x8d, 0x65, 0xf0, 0x56, 0x9e, 0x16, 0xbf, 0x9d, 0xcd, 0x51, 0x34, 0xf9, 0x08, 0x48, 0x7b, 0x76, 0x0c, 0x7b, 0x30, 0x07, 0xa8, 0x76, 0xaf, 0xa3, 0x29, 0x38, 0xb0, 0x58, 0xde, 0x72, 0x4b, 0x45,
- /* (2^166)P */ 0xd4, 0x16, 0xa7, 0xc0, 0xb4, 0x9f, 0xdf, 0x1a, 0x37, 0xc8, 0x35, 0xed, 0xc5, 0x85, 0x74, 0x64, 0x09, 0x22, 0xef, 0xe9, 0x0c, 0xaf, 0x12, 0x4c, 0x9e, 0xf8, 0x47, 0x56, 0xe0, 0x7f, 0x4e, 0x24, 0x6b, 0x0c, 0xe7, 0xad, 0xc6, 0x47, 0x1d, 0xa4, 0x0d, 0x86, 0x89, 0x65, 0xe8, 0x5f, 0x71, 0xc7, 0xe9, 0xcd, 0xec, 0x6c, 0x62, 0xc7, 0xe3, 0xb3,
- /* (2^167)P */ 0xb5, 0xea, 0x86, 0xe3, 0x15, 0x18, 0x3f, 0x6d, 0x7b, 0x05, 0x95, 0x15, 0x53, 0x26, 0x1c, 0xeb, 0xbe, 0x7e, 0x16, 0x42, 0x4b, 0xa2, 0x3d, 0xdd, 0x0e, 0xff, 0xba, 0x67, 0xb5, 0xae, 0x7a, 0x17, 0xde, 0x23, 0xad, 0x14, 0xcc, 0xd7, 0xaf, 0x57, 0x01, 0xe0, 0xdd, 0x48, 0xdd, 0xd7, 0xe3, 0xdf, 0xe9, 0x2d, 0xda, 0x67, 0xa4, 0x9f, 0x29, 0x04,
- /* (2^168)P */ 0x16, 0x53, 0xe6, 0x9c, 0x4e, 0xe5, 0x1e, 0x70, 0x81, 0x25, 0x02, 0x9b, 0x47, 0x6d, 0xd2, 0x08, 0x73, 0xbe, 0x0a, 0xf1, 0x7b, 0xeb, 0x24, 0xeb, 0x38, 0x23, 0x5c, 0xb6, 0x3e, 0xce, 0x1e, 0xe3, 0xbc, 0x82, 0x35, 0x1f, 0xaf, 0x3a, 0x3a, 0xe5, 0x4e, 0xc1, 0xca, 0xbf, 0x47, 0xb4, 0xbb, 0xbc, 0x5f, 0xea, 0xc6, 0xca, 0xf3, 0xa0, 0xa2, 0x73,
- /* (2^169)P */ 0xef, 0xa4, 0x7a, 0x4e, 0xe4, 0xc7, 0xb6, 0x43, 0x2e, 0xa5, 0xe4, 0xa5, 0xba, 0x1e, 0xa5, 0xfe, 0x9e, 0xce, 0xa9, 0x80, 0x04, 0xcb, 0x4f, 0xd8, 0x74, 0x05, 0x48, 0xfa, 0x99, 0x11, 0x5d, 0x97, 0x3b, 0x07, 0x0d, 0xdd, 0xe6, 0xb1, 0x74, 0x87, 0x1a, 0xd3, 0x26, 0xb7, 0x8f, 0xe1, 0x63, 0x3d, 0xec, 0x53, 0x93, 0xb0, 0x81, 0x78, 0x34, 0xa4,
- /* (2^170)P */ 0xe1, 0xe7, 0xd4, 0x58, 0x9d, 0x0e, 0x8b, 0x65, 0x66, 0x37, 0x16, 0x48, 0x6f, 0xaa, 0x42, 0x37, 0x77, 0xad, 0xb1, 0x56, 0x48, 0xdf, 0x65, 0x36, 0x30, 0xb8, 0x00, 0x12, 0xd8, 0x32, 0x28, 0x7f, 0xc1, 0x71, 0xeb, 0x93, 0x0f, 0x48, 0x04, 0xe1, 0x5a, 0x6a, 0x96, 0xc1, 0xca, 0x89, 0x6d, 0x1b, 0x82, 0x4c, 0x18, 0x6d, 0x55, 0x4b, 0xea, 0xfd,
- /* (2^171)P */ 0x62, 0x1a, 0x53, 0xb4, 0xb1, 0xbe, 0x6f, 0x15, 0x18, 0x88, 0xd4, 0x66, 0x61, 0xc7, 0x12, 0x69, 0x02, 0xbd, 0x03, 0x23, 0x2b, 0xef, 0xf9, 0x54, 0xa4, 0x85, 0xa8, 0xe3, 0xb7, 0xbd, 0xa9, 0xa3, 0xf3, 0x2a, 0xdd, 0xf1, 0xd4, 0x03, 0x0f, 0xa9, 0xa1, 0xd8, 0xa3, 0xcd, 0xb2, 0x71, 0x90, 0x4b, 0x35, 0x62, 0xf2, 0x2f, 0xce, 0x67, 0x1f, 0xaa,
- /* (2^172)P */ 0x9e, 0x1e, 0xcd, 0x43, 0x7e, 0x87, 0x37, 0x94, 0x3a, 0x97, 0x4c, 0x7e, 0xee, 0xc9, 0x37, 0x85, 0xf1, 0xd9, 0x4f, 0xbf, 0xf9, 0x6f, 0x39, 0x9a, 0x39, 0x87, 0x2e, 0x25, 0x84, 0x42, 0xc3, 0x80, 0xcb, 0x07, 0x22, 0xae, 0x30, 0xd5, 0x50, 0xa1, 0x23, 0xcc, 0x31, 0x81, 0x9d, 0xf1, 0x30, 0xd9, 0x2b, 0x73, 0x41, 0x16, 0x50, 0xab, 0x2d, 0xa2,
- /* (2^173)P */ 0xa4, 0x69, 0x4f, 0xa1, 0x4e, 0xb9, 0xbf, 0x14, 0xe8, 0x2b, 0x04, 0x93, 0xb7, 0x6e, 0x9f, 0x7d, 0x73, 0x0a, 0xc5, 0x14, 0xb8, 0xde, 0x8c, 0xc1, 0xfe, 0xc0, 0xa7, 0xa4, 0xcc, 0x42, 0x42, 0x81, 0x15, 0x65, 0x8a, 0x80, 0xb9, 0xde, 0x1f, 0x60, 0x33, 0x0e, 0xcb, 0xfc, 0xe0, 0xdb, 0x83, 0xa1, 0xe5, 0xd0, 0x16, 0x86, 0x2c, 0xe2, 0x87, 0xed,
- /* (2^174)P */ 0x7a, 0xc0, 0xeb, 0x6b, 0xf6, 0x0d, 0x4c, 0x6d, 0x1e, 0xdb, 0xab, 0xe7, 0x19, 0x45, 0xc6, 0xe3, 0xb2, 0x06, 0xbb, 0xbc, 0x70, 0x99, 0x83, 0x33, 0xeb, 0x28, 0xc8, 0x77, 0xf6, 0x4d, 0x01, 0xb7, 0x59, 0xa0, 0xd2, 0xb3, 0x2a, 0x72, 0x30, 0xe7, 0x11, 0x39, 0xb6, 0x41, 0x29, 0x65, 0x5a, 0x14, 0xb9, 0x86, 0x08, 0xe0, 0x7d, 0x32, 0x8c, 0xf0,
- /* (2^175)P */ 0x5c, 0x11, 0x30, 0x9e, 0x05, 0x27, 0xf5, 0x45, 0x0f, 0xb3, 0xc9, 0x75, 0xc3, 0xd7, 0xe1, 0x82, 0x3b, 0x8e, 0x87, 0x23, 0x00, 0x15, 0x19, 0x07, 0xd9, 0x21, 0x53, 0xc7, 0xf1, 0xa3, 0xbf, 0x70, 0x64, 0x15, 0x18, 0xca, 0x23, 0x9e, 0xd3, 0x08, 0xc3, 0x2a, 0x8b, 0xe5, 0x83, 0x04, 0x89, 0x14, 0xfd, 0x28, 0x25, 0x1c, 0xe3, 0x26, 0xa7, 0x22,
- /* (2^176)P */ 0xdc, 0xd4, 0x75, 0x60, 0x99, 0x94, 0xea, 0x09, 0x8e, 0x8a, 0x3c, 0x1b, 0xf9, 0xbd, 0x33, 0x0d, 0x51, 0x3d, 0x12, 0x6f, 0x4e, 0x72, 0xe0, 0x17, 0x20, 0xe9, 0x75, 0xe6, 0x3a, 0xb2, 0x13, 0x83, 0x4e, 0x7a, 0x08, 0x9e, 0xd1, 0x04, 0x5f, 0x6b, 0x42, 0x0b, 0x76, 0x2a, 0x2d, 0x77, 0x53, 0x6c, 0x65, 0x6d, 0x8e, 0x25, 0x3c, 0xb6, 0x8b, 0x69,
- /* (2^177)P */ 0xb9, 0x49, 0x28, 0xd0, 0xdc, 0x6c, 0x8f, 0x4c, 0xc9, 0x14, 0x8a, 0x38, 0xa3, 0xcb, 0xc4, 0x9d, 0x53, 0xcf, 0xe9, 0xe3, 0xcf, 0xe0, 0xb1, 0xf2, 0x1b, 0x4c, 0x7f, 0x83, 0x2a, 0x7a, 0xe9, 0x8b, 0x3b, 0x86, 0x61, 0x30, 0xe9, 0x99, 0xbd, 0xba, 0x19, 0x6e, 0x65, 0x2a, 0x12, 0x3e, 0x9c, 0xa8, 0xaf, 0xc3, 0xcf, 0xf8, 0x1f, 0x77, 0x86, 0xea,
- /* (2^178)P */ 0x30, 0xde, 0xe7, 0xff, 0x54, 0xf7, 0xa2, 0x59, 0xf6, 0x0b, 0xfb, 0x7a, 0xf2, 0x39, 0xf0, 0xdb, 0x39, 0xbc, 0xf0, 0xfa, 0x60, 0xeb, 0x6b, 0x4f, 0x47, 0x17, 0xc8, 0x00, 0x65, 0x6d, 0x25, 0x1c, 0xd0, 0x48, 0x56, 0x53, 0x45, 0x11, 0x30, 0x02, 0x49, 0x20, 0x27, 0xac, 0xf2, 0x4c, 0xac, 0x64, 0x3d, 0x52, 0xb8, 0x89, 0xe0, 0x93, 0x16, 0x0f,
- /* (2^179)P */ 0x84, 0x09, 0xba, 0x40, 0xb2, 0x2f, 0xa3, 0xa8, 0xc2, 0xba, 0x46, 0x33, 0x05, 0x9d, 0x62, 0xad, 0xa1, 0x3c, 0x33, 0xef, 0x0d, 0xeb, 0xf0, 0x77, 0x11, 0x5a, 0xb0, 0x21, 0x9c, 0xdf, 0x55, 0x24, 0x25, 0x35, 0x51, 0x61, 0x92, 0xf0, 0xb1, 0xce, 0xf5, 0xd4, 0x7b, 0x6c, 0x21, 0x9d, 0x56, 0x52, 0xf8, 0xa1, 0x4c, 0xe9, 0x27, 0x55, 0xac, 0x91,
- /* (2^180)P */ 0x03, 0x3e, 0x30, 0xd2, 0x0a, 0xfa, 0x7d, 0x82, 0x3d, 0x1f, 0x8b, 0xcb, 0xb6, 0x04, 0x5c, 0xcc, 0x8b, 0xda, 0xe2, 0x68, 0x74, 0x08, 0x8c, 0x44, 0x83, 0x57, 0x6d, 0x6f, 0x80, 0xb0, 0x7e, 0xa9, 0x82, 0x91, 0x7b, 0x4c, 0x37, 0x97, 0xd1, 0x63, 0xd1, 0xbd, 0x45, 0xe6, 0x8a, 0x86, 0xd6, 0x89, 0x54, 0xfd, 0xd2, 0xb1, 0xd7, 0x54, 0xad, 0xaf,
- /* (2^181)P */ 0x8b, 0x33, 0x62, 0x49, 0x9f, 0x63, 0xf9, 0x87, 0x42, 0x58, 0xbf, 0xb3, 0xe6, 0x68, 0x02, 0x60, 0x5c, 0x76, 0x62, 0xf7, 0x61, 0xd7, 0x36, 0x31, 0xf7, 0x9c, 0xb5, 0xe5, 0x13, 0x6c, 0xea, 0x78, 0xae, 0xcf, 0xde, 0xbf, 0xb6, 0xeb, 0x4f, 0xc8, 0x2a, 0xb4, 0x9a, 0x9f, 0xf3, 0xd1, 0x6a, 0xec, 0x0c, 0xbd, 0x85, 0x98, 0x40, 0x06, 0x1c, 0x2a,
- /* (2^182)P */ 0x74, 0x3b, 0xe7, 0x81, 0xd5, 0xae, 0x54, 0x56, 0x03, 0xe8, 0x97, 0x16, 0x76, 0xcf, 0x24, 0x96, 0x96, 0x5b, 0xcc, 0x09, 0xab, 0x23, 0x6f, 0x54, 0xae, 0x8f, 0xe4, 0x12, 0xcb, 0xfd, 0xbc, 0xac, 0x93, 0x45, 0x3d, 0x68, 0x08, 0x22, 0x59, 0xc6, 0xf0, 0x47, 0x19, 0x8c, 0x79, 0x93, 0x1e, 0x0e, 0x30, 0xb0, 0x94, 0xfb, 0x17, 0x1d, 0x5a, 0x12,
- /* (2^183)P */ 0x85, 0xff, 0x40, 0x18, 0x85, 0xff, 0x44, 0x37, 0x69, 0x23, 0x4d, 0x34, 0xe1, 0xeb, 0xa3, 0x1b, 0x55, 0x40, 0xc1, 0x64, 0xf4, 0xd4, 0x13, 0x0a, 0x9f, 0xb9, 0x19, 0xfc, 0x88, 0x7d, 0xc0, 0x72, 0xcf, 0x69, 0x2f, 0xd2, 0x0c, 0x82, 0x0f, 0xda, 0x08, 0xba, 0x0f, 0xaa, 0x3b, 0xe9, 0xe5, 0x83, 0x7a, 0x06, 0xe8, 0x1b, 0x38, 0x43, 0xc3, 0x54,
- /* (2^184)P */ 0x14, 0xaa, 0xb3, 0x6e, 0xe6, 0x28, 0xee, 0xc5, 0x22, 0x6c, 0x7c, 0xf9, 0xa8, 0x71, 0xcc, 0xfe, 0x68, 0x7e, 0xd3, 0xb8, 0x37, 0x96, 0xca, 0x0b, 0xd9, 0xb6, 0x06, 0xa9, 0xf6, 0x71, 0xe8, 0x31, 0xf7, 0xd8, 0xf1, 0x5d, 0xab, 0xb9, 0xf0, 0x5c, 0x98, 0xcf, 0x22, 0xa2, 0x2a, 0xf6, 0xd0, 0x59, 0xf0, 0x9d, 0xd9, 0x6a, 0x4f, 0x59, 0x57, 0xad,
- /* (2^185)P */ 0xd7, 0x2b, 0x3d, 0x38, 0x4c, 0x2e, 0x23, 0x4d, 0x49, 0xa2, 0x62, 0x62, 0xf9, 0x0f, 0xde, 0x08, 0xf3, 0x86, 0x71, 0xb6, 0xc7, 0xf9, 0x85, 0x9c, 0x33, 0xa1, 0xcf, 0x16, 0xaa, 0x60, 0xb9, 0xb7, 0xea, 0xed, 0x01, 0x1c, 0x59, 0xdb, 0x3f, 0x3f, 0x97, 0x2e, 0xf0, 0x09, 0x9f, 0x10, 0x85, 0x5f, 0x53, 0x39, 0xf3, 0x13, 0x40, 0x56, 0x95, 0xf9,
- /* (2^186)P */ 0xb4, 0xe3, 0xda, 0xc6, 0x1f, 0x78, 0x8e, 0xac, 0xd4, 0x20, 0x1d, 0xa0, 0xbf, 0x4c, 0x09, 0x16, 0xa7, 0x30, 0xb5, 0x8d, 0x9e, 0xa1, 0x5f, 0x6d, 0x52, 0xf4, 0x71, 0xb6, 0x32, 0x2d, 0x21, 0x51, 0xc6, 0xfc, 0x2f, 0x08, 0xf4, 0x13, 0x6c, 0x55, 0xba, 0x72, 0x81, 0x24, 0x49, 0x0e, 0x4f, 0x06, 0x36, 0x39, 0x6a, 0xc5, 0x81, 0xfc, 0xeb, 0xb2,
- /* (2^187)P */ 0x7d, 0x8d, 0xc8, 0x6c, 0xea, 0xb4, 0xb9, 0xe8, 0x40, 0xc9, 0x69, 0xc9, 0x30, 0x05, 0xfd, 0x34, 0x46, 0xfd, 0x94, 0x05, 0x16, 0xf5, 0x4b, 0x13, 0x3d, 0x24, 0x1a, 0xd6, 0x64, 0x2b, 0x9c, 0xe2, 0xa5, 0xd9, 0x98, 0xe0, 0xe8, 0xf4, 0xbc, 0x2c, 0xbd, 0xa2, 0x56, 0xe3, 0x9e, 0x14, 0xdb, 0xbf, 0x05, 0xbf, 0x9a, 0x13, 0x5d, 0xf7, 0x91, 0xa3,
- /* (2^188)P */ 0x8b, 0xcb, 0x27, 0xf3, 0x15, 0x26, 0x05, 0x40, 0x0f, 0xa6, 0x15, 0x13, 0x71, 0x95, 0xa2, 0xc6, 0x38, 0x04, 0x67, 0xf8, 0x9a, 0x83, 0x06, 0xaa, 0x25, 0x36, 0x72, 0x01, 0x6f, 0x74, 0x5f, 0xe5, 0x6e, 0x44, 0x99, 0xce, 0x13, 0xbc, 0x82, 0xc2, 0x0d, 0xa4, 0x98, 0x50, 0x38, 0xf3, 0xa2, 0xc5, 0xe5, 0x24, 0x1f, 0x6f, 0x56, 0x3e, 0x07, 0xb2,
- /* (2^189)P */ 0xbd, 0x0f, 0x32, 0x60, 0x07, 0xb1, 0xd7, 0x0b, 0x11, 0x07, 0x57, 0x02, 0x89, 0xe8, 0x8b, 0xe8, 0x5a, 0x1f, 0xee, 0x54, 0x6b, 0xff, 0xb3, 0x04, 0x07, 0x57, 0x13, 0x0b, 0x94, 0xa8, 0x4d, 0x81, 0xe2, 0x17, 0x16, 0x45, 0xd4, 0x4b, 0xf7, 0x7e, 0x64, 0x66, 0x20, 0xe8, 0x0b, 0x26, 0xfd, 0xa9, 0x8a, 0x47, 0x52, 0x89, 0x14, 0xd0, 0xd1, 0xa1,
- /* (2^190)P */ 0xdc, 0x03, 0xe6, 0x20, 0x44, 0x47, 0x8f, 0x04, 0x16, 0x24, 0x22, 0xc1, 0x55, 0x5c, 0xbe, 0x43, 0xc3, 0x92, 0xc5, 0x54, 0x3d, 0x5d, 0xd1, 0x05, 0x9c, 0xc6, 0x7c, 0xbf, 0x23, 0x84, 0x1a, 0xba, 0x4f, 0x1f, 0xfc, 0xa1, 0xae, 0x1a, 0x64, 0x02, 0x51, 0xf1, 0xcb, 0x7a, 0x20, 0xce, 0xb2, 0x34, 0x3c, 0xca, 0xe0, 0xe4, 0xba, 0x22, 0xd4, 0x7b,
- /* (2^191)P */ 0xca, 0xfd, 0xca, 0xd7, 0xde, 0x61, 0xae, 0xf0, 0x79, 0x0c, 0x20, 0xab, 0xbc, 0x6f, 0x4d, 0x61, 0xf0, 0xc7, 0x9c, 0x8d, 0x4b, 0x52, 0xf3, 0xb9, 0x48, 0x63, 0x0b, 0xb6, 0xd2, 0x25, 0x9a, 0x96, 0x72, 0xc1, 0x6b, 0x0c, 0xb5, 0xfb, 0x71, 0xaa, 0xad, 0x47, 0x5b, 0xe7, 0xc0, 0x0a, 0x55, 0xb2, 0xd4, 0x16, 0x2f, 0xb1, 0x01, 0xfd, 0xce, 0x27,
- /* (2^192)P */ 0x64, 0x11, 0x4b, 0xab, 0x57, 0x09, 0xc6, 0x49, 0x4a, 0x37, 0xc3, 0x36, 0xc4, 0x7b, 0x81, 0x1f, 0x42, 0xed, 0xbb, 0xe0, 0xa0, 0x8d, 0x51, 0xe6, 0xca, 0x8b, 0xb9, 0xcd, 0x99, 0x2d, 0x91, 0x53, 0xa9, 0x47, 0xcb, 0x32, 0xc7, 0xa4, 0x92, 0xec, 0x46, 0x74, 0x44, 0x6d, 0x71, 0x9f, 0x6d, 0x0c, 0x69, 0xa4, 0xf8, 0xbe, 0x9f, 0x7f, 0xa0, 0xd7,
- /* (2^193)P */ 0x5f, 0x33, 0xb6, 0x91, 0xc8, 0xa5, 0x3f, 0x5d, 0x7f, 0x38, 0x6e, 0x74, 0x20, 0x4a, 0xd6, 0x2b, 0x98, 0x2a, 0x41, 0x4b, 0x83, 0x64, 0x0b, 0x92, 0x7a, 0x06, 0x1e, 0xc6, 0x2c, 0xf6, 0xe4, 0x91, 0xe5, 0xb1, 0x2e, 0x6e, 0x4e, 0xa8, 0xc8, 0x14, 0x32, 0x57, 0x44, 0x1c, 0xe4, 0xb9, 0x7f, 0x54, 0x51, 0x08, 0x81, 0xaa, 0x4e, 0xce, 0xa1, 0x5d,
- /* (2^194)P */ 0x5c, 0xd5, 0x9b, 0x5e, 0x7c, 0xb5, 0xb1, 0x52, 0x73, 0x00, 0x41, 0x56, 0x79, 0x08, 0x7e, 0x07, 0x28, 0x06, 0xa6, 0xfb, 0x7f, 0x69, 0xbd, 0x7a, 0x3c, 0xae, 0x9f, 0x39, 0xbb, 0x54, 0xa2, 0x79, 0xb9, 0x0e, 0x7f, 0xbb, 0xe0, 0xe6, 0xb7, 0x27, 0x64, 0x38, 0x45, 0xdb, 0x84, 0xe4, 0x61, 0x72, 0x3f, 0xe2, 0x24, 0xfe, 0x7a, 0x31, 0x9a, 0xc9,
- /* (2^195)P */ 0xa1, 0xd2, 0xa4, 0xee, 0x24, 0x96, 0xe5, 0x5b, 0x79, 0x78, 0x3c, 0x7b, 0x82, 0x3b, 0x8b, 0x58, 0x0b, 0xa3, 0x63, 0x2d, 0xbc, 0x75, 0x46, 0xe8, 0x83, 0x1a, 0xc0, 0x2a, 0x92, 0x61, 0xa8, 0x75, 0x37, 0x3c, 0xbf, 0x0f, 0xef, 0x8f, 0x6c, 0x97, 0x75, 0x10, 0x05, 0x7a, 0xde, 0x23, 0xe8, 0x2a, 0x35, 0xeb, 0x41, 0x64, 0x7d, 0xcf, 0xe0, 0x52,
- /* (2^196)P */ 0x4a, 0xd0, 0x49, 0x93, 0xae, 0xf3, 0x24, 0x8c, 0xe1, 0x09, 0x98, 0x45, 0xd8, 0xb9, 0xfe, 0x8e, 0x8c, 0xa8, 0x2c, 0xc9, 0x9f, 0xce, 0x01, 0xdc, 0x38, 0x11, 0xab, 0x85, 0xb9, 0xe8, 0x00, 0x51, 0xfd, 0x82, 0xe1, 0x9b, 0x4e, 0xfc, 0xb5, 0x2a, 0x0f, 0x8b, 0xda, 0x4e, 0x02, 0xca, 0xcc, 0xe3, 0x91, 0xc4, 0xe0, 0xcf, 0x7b, 0xd6, 0xe6, 0x6a,
- /* (2^197)P */ 0xfe, 0x11, 0xd7, 0xaa, 0xe3, 0x0c, 0x52, 0x2e, 0x04, 0xe0, 0xe0, 0x61, 0xc8, 0x05, 0xd7, 0x31, 0x4c, 0xc3, 0x9b, 0x2d, 0xce, 0x59, 0xbe, 0x12, 0xb7, 0x30, 0x21, 0xfc, 0x81, 0xb8, 0x5e, 0x57, 0x73, 0xd0, 0xad, 0x8e, 0x9e, 0xe4, 0xeb, 0xcd, 0xcf, 0xd2, 0x0f, 0x01, 0x35, 0x16, 0xed, 0x7a, 0x43, 0x8e, 0x42, 0xdc, 0xea, 0x4c, 0xa8, 0x7c,
- /* (2^198)P */ 0x37, 0x26, 0xcc, 0x76, 0x0b, 0xe5, 0x76, 0xdd, 0x3e, 0x19, 0x3c, 0xc4, 0x6c, 0x7f, 0xd0, 0x03, 0xc1, 0xb8, 0x59, 0x82, 0xca, 0x36, 0xc1, 0xe4, 0xc8, 0xb2, 0x83, 0x69, 0x9c, 0xc5, 0x9d, 0x12, 0x82, 0x1c, 0xea, 0xb2, 0x84, 0x9f, 0xf3, 0x52, 0x6b, 0xbb, 0xd8, 0x81, 0x56, 0x83, 0x04, 0x66, 0x05, 0x22, 0x49, 0x37, 0x93, 0xb1, 0xfd, 0xd5,
- /* (2^199)P */ 0xaf, 0x96, 0xbf, 0x03, 0xbe, 0xe6, 0x5d, 0x78, 0x19, 0xba, 0x37, 0x46, 0x0a, 0x2b, 0x52, 0x7c, 0xd8, 0x51, 0x9e, 0x3d, 0x29, 0x42, 0xdb, 0x0e, 0x31, 0x20, 0x94, 0xf8, 0x43, 0x9a, 0x2d, 0x22, 0xd3, 0xe3, 0xa1, 0x79, 0x68, 0xfb, 0x2d, 0x7e, 0xd6, 0x79, 0xda, 0x0b, 0xc6, 0x5b, 0x76, 0x68, 0xf0, 0xfe, 0x72, 0x59, 0xbb, 0xa1, 0x9c, 0x74,
- /* (2^200)P */ 0x0a, 0xd9, 0xec, 0xc5, 0xbd, 0xf0, 0xda, 0xcf, 0x82, 0xab, 0x46, 0xc5, 0x32, 0x13, 0xdc, 0x5b, 0xac, 0xc3, 0x53, 0x9a, 0x7f, 0xef, 0xa5, 0x40, 0x5a, 0x1f, 0xc1, 0x12, 0x91, 0x54, 0x83, 0x6a, 0xb0, 0x9a, 0x85, 0x4d, 0xbf, 0x36, 0x8e, 0xd3, 0xa2, 0x2b, 0xe5, 0xd6, 0xc6, 0xe1, 0x58, 0x5b, 0x82, 0x9b, 0xc8, 0xf2, 0x03, 0xba, 0xf5, 0x92,
- /* (2^201)P */ 0xfb, 0x21, 0x7e, 0xde, 0xe7, 0xb4, 0xc0, 0x56, 0x86, 0x3a, 0x5b, 0x78, 0xf8, 0xf0, 0xf4, 0xe7, 0x5c, 0x00, 0xd2, 0xd7, 0xd6, 0xf8, 0x75, 0x5e, 0x0f, 0x3e, 0xd1, 0x4b, 0x77, 0xd8, 0xad, 0xb0, 0xc9, 0x8b, 0x59, 0x7d, 0x30, 0x76, 0x64, 0x7a, 0x76, 0xd9, 0x51, 0x69, 0xfc, 0xbd, 0x8e, 0xb5, 0x55, 0xe0, 0xd2, 0x07, 0x15, 0xa9, 0xf7, 0xa4,
- /* (2^202)P */ 0xaa, 0x2d, 0x2f, 0x2b, 0x3c, 0x15, 0xdd, 0xcd, 0xe9, 0x28, 0x82, 0x4f, 0xa2, 0xaa, 0x31, 0x48, 0xcc, 0xfa, 0x07, 0x73, 0x8a, 0x34, 0x74, 0x0d, 0xab, 0x1a, 0xca, 0xd2, 0xbf, 0x3a, 0xdb, 0x1a, 0x5f, 0x50, 0x62, 0xf4, 0x6b, 0x83, 0x38, 0x43, 0x96, 0xee, 0x6b, 0x39, 0x1e, 0xf0, 0x17, 0x80, 0x1e, 0x9b, 0xed, 0x2b, 0x2f, 0xcc, 0x65, 0xf7,
- /* (2^203)P */ 0x03, 0xb3, 0x23, 0x9c, 0x0d, 0xd1, 0xeb, 0x7e, 0x34, 0x17, 0x8a, 0x4c, 0xde, 0x54, 0x39, 0xc4, 0x11, 0x82, 0xd3, 0xa4, 0x00, 0x32, 0x95, 0x9c, 0xa6, 0x64, 0x76, 0x6e, 0xd6, 0x53, 0x27, 0xb4, 0x6a, 0x14, 0x8c, 0x54, 0xf6, 0x58, 0x9e, 0x22, 0x4a, 0x55, 0x18, 0x77, 0xd0, 0x08, 0x6b, 0x19, 0x8a, 0xb5, 0xe7, 0x19, 0xb8, 0x60, 0x92, 0xb1,
- /* (2^204)P */ 0x66, 0xec, 0xf3, 0x12, 0xde, 0x67, 0x7f, 0xd4, 0x5b, 0xf6, 0x70, 0x64, 0x0a, 0xb5, 0xc2, 0xf9, 0xb3, 0x64, 0xab, 0x56, 0x46, 0xc7, 0x93, 0xc2, 0x8b, 0x2d, 0xd0, 0xd6, 0x39, 0x3b, 0x1f, 0xcd, 0xb3, 0xac, 0xcc, 0x2c, 0x27, 0x6a, 0xbc, 0xb3, 0x4b, 0xa8, 0x3c, 0x69, 0x20, 0xe2, 0x18, 0x35, 0x17, 0xe1, 0x8a, 0xd3, 0x11, 0x74, 0xaa, 0x4d,
- /* (2^205)P */ 0x96, 0xc4, 0x16, 0x7e, 0xfd, 0xf5, 0xd0, 0x7d, 0x1f, 0x32, 0x1b, 0xdb, 0xa6, 0xfd, 0x51, 0x75, 0x4d, 0xd7, 0x00, 0xe5, 0x7f, 0x58, 0x5b, 0xeb, 0x4b, 0x6a, 0x78, 0xfe, 0xe5, 0xd6, 0x8f, 0x99, 0x17, 0xca, 0x96, 0x45, 0xf7, 0x52, 0xdf, 0x84, 0x06, 0x77, 0xb9, 0x05, 0x63, 0x5d, 0xe9, 0x91, 0xb1, 0x4b, 0x82, 0x5a, 0xdb, 0xd7, 0xca, 0x69,
- /* (2^206)P */ 0x02, 0xd3, 0x38, 0x38, 0x87, 0xea, 0xbd, 0x9f, 0x11, 0xca, 0xf3, 0x21, 0xf1, 0x9b, 0x35, 0x97, 0x98, 0xff, 0x8e, 0x6d, 0x3d, 0xd6, 0xb2, 0xfa, 0x68, 0xcb, 0x7e, 0x62, 0x85, 0xbb, 0xc7, 0x5d, 0xee, 0x32, 0x30, 0x2e, 0x71, 0x96, 0x63, 0x43, 0x98, 0xc4, 0xa7, 0xde, 0x60, 0xb2, 0xd9, 0x43, 0x4a, 0xfa, 0x97, 0x2d, 0x5f, 0x21, 0xd4, 0xfe,
- /* (2^207)P */ 0x3b, 0x20, 0x29, 0x07, 0x07, 0xb5, 0x78, 0xc3, 0xc7, 0xab, 0x56, 0xba, 0x40, 0xde, 0x1d, 0xcf, 0xc3, 0x00, 0x56, 0x21, 0x0c, 0xc8, 0x42, 0xd9, 0x0e, 0xcd, 0x02, 0x7c, 0x07, 0xb9, 0x11, 0xd7, 0x96, 0xaf, 0xff, 0xad, 0xc5, 0xba, 0x30, 0x6d, 0x82, 0x3a, 0xbf, 0xef, 0x7b, 0xf7, 0x0a, 0x74, 0xbd, 0x31, 0x0c, 0xe4, 0xec, 0x1a, 0xe5, 0xc5,
- /* (2^208)P */ 0xcc, 0xf2, 0x28, 0x16, 0x12, 0xbf, 0xef, 0x85, 0xbc, 0xf7, 0xcb, 0x9f, 0xdb, 0xa8, 0xb2, 0x49, 0x53, 0x48, 0xa8, 0x24, 0xa8, 0x68, 0x8d, 0xbb, 0x21, 0x0a, 0x5a, 0xbd, 0xb2, 0x91, 0x61, 0x47, 0xc4, 0x43, 0x08, 0xa6, 0x19, 0xef, 0x8e, 0x88, 0x39, 0xc6, 0x33, 0x30, 0xf3, 0x0e, 0xc5, 0x92, 0x66, 0xd6, 0xfe, 0xc5, 0x12, 0xd9, 0x4c, 0x2d,
- /* (2^209)P */ 0x30, 0x34, 0x07, 0xbf, 0x9c, 0x5a, 0x4e, 0x65, 0xf1, 0x39, 0x35, 0x38, 0xae, 0x7b, 0x55, 0xac, 0x6a, 0x92, 0x24, 0x7e, 0x50, 0xd3, 0xba, 0x78, 0x51, 0xfe, 0x4d, 0x32, 0x05, 0x11, 0xf5, 0x52, 0xf1, 0x31, 0x45, 0x39, 0x98, 0x7b, 0x28, 0x56, 0xc3, 0x5d, 0x4f, 0x07, 0x6f, 0x84, 0xb8, 0x1a, 0x58, 0x0b, 0xc4, 0x7c, 0xc4, 0x8d, 0x32, 0x8e,
- /* (2^210)P */ 0x7e, 0xaf, 0x98, 0xce, 0xc5, 0x2b, 0x9d, 0xf6, 0xfa, 0x2c, 0xb6, 0x2a, 0x5a, 0x1d, 0xc0, 0x24, 0x8d, 0xa4, 0xce, 0xb1, 0x12, 0x01, 0xf9, 0x79, 0xc6, 0x79, 0x38, 0x0c, 0xd4, 0x07, 0xc9, 0xf7, 0x37, 0xa1, 0x0b, 0xfe, 0x72, 0xec, 0x5d, 0xd6, 0xb0, 0x1c, 0x70, 0xbe, 0x70, 0x01, 0x13, 0xe0, 0x86, 0x95, 0xc7, 0x2e, 0x12, 0x3b, 0xe6, 0xa6,
- /* (2^211)P */ 0x24, 0x82, 0x67, 0xe0, 0x14, 0x7b, 0x56, 0x08, 0x38, 0x44, 0xdb, 0xa0, 0x3a, 0x05, 0x47, 0xb2, 0xc0, 0xac, 0xd1, 0xcc, 0x3f, 0x82, 0xb8, 0x8a, 0x88, 0xbc, 0xf5, 0x33, 0xa1, 0x35, 0x0f, 0xf6, 0xe2, 0xef, 0x6c, 0xf7, 0x37, 0x9e, 0xe8, 0x10, 0xca, 0xb0, 0x8e, 0x80, 0x86, 0x00, 0x23, 0xd0, 0x4a, 0x76, 0x9f, 0xf7, 0x2c, 0x52, 0x15, 0x0e,
- /* (2^212)P */ 0x5e, 0x49, 0xe1, 0x2c, 0x9a, 0x01, 0x76, 0xa6, 0xb3, 0x07, 0x5b, 0xa4, 0x07, 0xef, 0x1d, 0xc3, 0x6a, 0xbb, 0x64, 0xbe, 0x71, 0x15, 0x6e, 0x32, 0x31, 0x46, 0x9a, 0x9e, 0x8f, 0x45, 0x73, 0xce, 0x0b, 0x94, 0x1a, 0x52, 0x07, 0xf4, 0x50, 0x30, 0x49, 0x53, 0x50, 0xfb, 0x71, 0x1f, 0x5a, 0x03, 0xa9, 0x76, 0xf2, 0x8f, 0x42, 0xff, 0xed, 0xed,
- /* (2^213)P */ 0xed, 0x08, 0xdb, 0x91, 0x1c, 0xee, 0xa2, 0xb4, 0x47, 0xa2, 0xfa, 0xcb, 0x03, 0xd1, 0xff, 0x8c, 0xad, 0x64, 0x50, 0x61, 0xcd, 0xfc, 0x88, 0xa0, 0x31, 0x95, 0x30, 0xb9, 0x58, 0xdd, 0xd7, 0x43, 0xe4, 0x46, 0xc2, 0x16, 0xd9, 0x72, 0x4a, 0x56, 0x51, 0x70, 0x85, 0xf1, 0xa1, 0x80, 0x40, 0xd5, 0xba, 0x67, 0x81, 0xda, 0xcd, 0x03, 0xea, 0x51,
- /* (2^214)P */ 0x42, 0x50, 0xf0, 0xef, 0x37, 0x61, 0x72, 0x85, 0xe1, 0xf1, 0xff, 0x6f, 0x3d, 0xe8, 0x7b, 0x21, 0x5c, 0xe5, 0x50, 0x03, 0xde, 0x00, 0xc1, 0xf7, 0x3a, 0x55, 0x12, 0x1c, 0x9e, 0x1e, 0xce, 0xd1, 0x2f, 0xaf, 0x05, 0x70, 0x5b, 0x47, 0xf2, 0x04, 0x7a, 0x89, 0xbc, 0x78, 0xa6, 0x65, 0x6c, 0xaa, 0x3c, 0xa2, 0x3c, 0x8b, 0x5c, 0xa9, 0x22, 0x48,
- /* (2^215)P */ 0x7e, 0x8c, 0x8f, 0x2f, 0x60, 0xe3, 0x5a, 0x94, 0xd4, 0xce, 0xdd, 0x9d, 0x83, 0x3b, 0x77, 0x78, 0x43, 0x1d, 0xfd, 0x8f, 0xc8, 0xe8, 0x02, 0x90, 0xab, 0xf6, 0xc9, 0xfc, 0xf1, 0x63, 0xaa, 0x5f, 0x42, 0xf1, 0x78, 0x34, 0x64, 0x16, 0x75, 0x9c, 0x7d, 0xd0, 0xe4, 0x74, 0x5a, 0xa8, 0xfb, 0xcb, 0xac, 0x20, 0xa3, 0xc2, 0xa6, 0x20, 0xf8, 0x1b,
- /* (2^216)P */ 0x00, 0x4f, 0x1e, 0x56, 0xb5, 0x34, 0xb2, 0x87, 0x31, 0xe5, 0xee, 0x8d, 0xf1, 0x41, 0x67, 0xb7, 0x67, 0x3a, 0x54, 0x86, 0x5c, 0xf0, 0x0b, 0x37, 0x2f, 0x1b, 0x92, 0x5d, 0x58, 0x93, 0xdc, 0xd8, 0x58, 0xcc, 0x9e, 0x67, 0xd0, 0x97, 0x3a, 0xaf, 0x49, 0x39, 0x2d, 0x3b, 0xd8, 0x98, 0xfb, 0x76, 0x6b, 0xe7, 0xaf, 0xc3, 0x45, 0x44, 0x53, 0x94,
- /* (2^217)P */ 0x30, 0xbd, 0x90, 0x75, 0xd3, 0xbd, 0x3b, 0x58, 0x27, 0x14, 0x9f, 0x6b, 0xd4, 0x31, 0x99, 0xcd, 0xde, 0x3a, 0x21, 0x1e, 0xb4, 0x02, 0xe4, 0x33, 0x04, 0x02, 0xb0, 0x50, 0x66, 0x68, 0x90, 0xdd, 0x7b, 0x69, 0x31, 0xd9, 0xcf, 0x68, 0x73, 0xf1, 0x60, 0xdd, 0xc8, 0x1d, 0x5d, 0xe3, 0xd6, 0x5b, 0x2a, 0xa4, 0xea, 0xc4, 0x3f, 0x08, 0xcd, 0x9c,
- /* (2^218)P */ 0x6b, 0x1a, 0xbf, 0x55, 0xc1, 0x1b, 0x0c, 0x05, 0x09, 0xdf, 0xf5, 0x5e, 0xa3, 0x77, 0x95, 0xe9, 0xdf, 0x19, 0xdd, 0xc7, 0x94, 0xcb, 0x06, 0x73, 0xd0, 0x88, 0x02, 0x33, 0x94, 0xca, 0x7a, 0x2f, 0x8e, 0x3d, 0x72, 0x61, 0x2d, 0x4d, 0xa6, 0x61, 0x1f, 0x32, 0x5e, 0x87, 0x53, 0x36, 0x11, 0x15, 0x20, 0xb3, 0x5a, 0x57, 0x51, 0x93, 0x20, 0xd8,
- /* (2^219)P */ 0xb7, 0x56, 0xf4, 0xab, 0x7d, 0x0c, 0xfb, 0x99, 0x1a, 0x30, 0x29, 0xb0, 0x75, 0x2a, 0xf8, 0x53, 0x71, 0x23, 0xbd, 0xa7, 0xd8, 0x0a, 0xe2, 0x27, 0x65, 0xe9, 0x74, 0x26, 0x98, 0x4a, 0x69, 0x19, 0xb2, 0x4d, 0x0a, 0x17, 0x98, 0xb2, 0xa9, 0x57, 0x4e, 0xf6, 0x86, 0xc8, 0x01, 0xa4, 0xc6, 0x98, 0xad, 0x5a, 0x90, 0x2c, 0x05, 0x46, 0x64, 0xb7,
- /* (2^220)P */ 0x7b, 0x91, 0xdf, 0xfc, 0xf8, 0x1c, 0x8c, 0x15, 0x9e, 0xf7, 0xd5, 0xa8, 0xe8, 0xe7, 0xe3, 0xa3, 0xb0, 0x04, 0x74, 0xfa, 0x78, 0xfb, 0x26, 0xbf, 0x67, 0x42, 0xf9, 0x8c, 0x9b, 0xb4, 0x69, 0x5b, 0x02, 0x13, 0x6d, 0x09, 0x6c, 0xd6, 0x99, 0x61, 0x7b, 0x89, 0x4a, 0x67, 0x75, 0xa3, 0x98, 0x13, 0x23, 0x1d, 0x18, 0x24, 0x0e, 0xef, 0x41, 0x79,
- /* (2^221)P */ 0x86, 0x33, 0xab, 0x08, 0xcb, 0xbf, 0x1e, 0x76, 0x3c, 0x0b, 0xbd, 0x30, 0xdb, 0xe9, 0xa3, 0x35, 0x87, 0x1b, 0xe9, 0x07, 0x00, 0x66, 0x7f, 0x3b, 0x35, 0x0c, 0x8a, 0x3f, 0x61, 0xbc, 0xe0, 0xae, 0xf6, 0xcc, 0x54, 0xe1, 0x72, 0x36, 0x2d, 0xee, 0x93, 0x24, 0xf8, 0xd7, 0xc5, 0xf9, 0xcb, 0xb0, 0xe5, 0x88, 0x0d, 0x23, 0x4b, 0x76, 0x15, 0xa2,
- /* (2^222)P */ 0x37, 0xdb, 0x83, 0xd5, 0x6d, 0x06, 0x24, 0x37, 0x1b, 0x15, 0x85, 0x15, 0xe2, 0xc0, 0x4e, 0x02, 0xa9, 0x6d, 0x0a, 0x3a, 0x94, 0x4a, 0x6f, 0x49, 0x00, 0x01, 0x72, 0xbb, 0x60, 0x14, 0x35, 0xae, 0xb4, 0xc6, 0x01, 0x0a, 0x00, 0x9e, 0xc3, 0x58, 0xc5, 0xd1, 0x5e, 0x30, 0x73, 0x96, 0x24, 0x85, 0x9d, 0xf0, 0xf9, 0xec, 0x09, 0xd3, 0xe7, 0x70,
- /* (2^223)P */ 0xf3, 0xbd, 0x96, 0x87, 0xe9, 0x71, 0xbd, 0xd6, 0xa2, 0x45, 0xeb, 0x0a, 0xcd, 0x2c, 0xf1, 0x72, 0xa6, 0x31, 0xa9, 0x6f, 0x09, 0xa1, 0x5e, 0xdd, 0xc8, 0x8d, 0x0d, 0xbc, 0x5a, 0x8d, 0xb1, 0x2c, 0x9a, 0xcc, 0x37, 0x74, 0xc2, 0xa9, 0x4e, 0xd6, 0xc0, 0x3c, 0xa0, 0x23, 0xb0, 0xa0, 0x77, 0x14, 0x80, 0x45, 0x71, 0x6a, 0x2d, 0x41, 0xc3, 0x82,
- /* (2^224)P */ 0x37, 0x44, 0xec, 0x8a, 0x3e, 0xc1, 0x0c, 0xa9, 0x12, 0x9c, 0x08, 0x88, 0xcb, 0xd9, 0xf8, 0xba, 0x00, 0xd6, 0xc3, 0xdf, 0xef, 0x7a, 0x44, 0x7e, 0x25, 0x69, 0xc9, 0xc1, 0x46, 0xe5, 0x20, 0x9e, 0xcc, 0x0b, 0x05, 0x3e, 0xf4, 0x78, 0x43, 0x0c, 0xa6, 0x2f, 0xc1, 0xfa, 0x70, 0xb2, 0x3c, 0x31, 0x7a, 0x63, 0x58, 0xab, 0x17, 0xcf, 0x4c, 0x4f,
- /* (2^225)P */ 0x2b, 0x08, 0x31, 0x59, 0x75, 0x8b, 0xec, 0x0a, 0xa9, 0x79, 0x70, 0xdd, 0xf1, 0x11, 0xc3, 0x11, 0x1f, 0xab, 0x37, 0xaa, 0x26, 0xea, 0x53, 0xc4, 0x79, 0xa7, 0x91, 0x00, 0xaa, 0x08, 0x42, 0xeb, 0x8b, 0x8b, 0xe8, 0xc3, 0x2f, 0xb8, 0x78, 0x90, 0x38, 0x0e, 0x8a, 0x42, 0x0c, 0x0f, 0xbf, 0x3e, 0xf8, 0xd8, 0x07, 0xcf, 0x6a, 0x34, 0xc9, 0xfa,
- /* (2^226)P */ 0x11, 0xe0, 0x76, 0x4d, 0x23, 0xc5, 0xa6, 0xcc, 0x9f, 0x9a, 0x2a, 0xde, 0x3a, 0xb5, 0x92, 0x39, 0x19, 0x8a, 0xf1, 0x8d, 0xf9, 0x4d, 0xc9, 0xb4, 0x39, 0x9f, 0x57, 0xd8, 0x72, 0xab, 0x1d, 0x61, 0x6a, 0xb2, 0xff, 0x52, 0xba, 0x54, 0x0e, 0xfb, 0x83, 0x30, 0x8a, 0xf7, 0x3b, 0xf4, 0xd8, 0xae, 0x1a, 0x94, 0x3a, 0xec, 0x63, 0xfe, 0x6e, 0x7c,
- /* (2^227)P */ 0xdc, 0x70, 0x8e, 0x55, 0x44, 0xbf, 0xd2, 0x6a, 0xa0, 0x14, 0x61, 0x89, 0xd5, 0x55, 0x45, 0x3c, 0xf6, 0x40, 0x0d, 0x83, 0x85, 0x44, 0xb4, 0x62, 0x56, 0xfe, 0x60, 0xd7, 0x07, 0x1d, 0x47, 0x30, 0x3b, 0x73, 0xa4, 0xb5, 0xb7, 0xea, 0xac, 0xda, 0xf1, 0x17, 0xaa, 0x60, 0xdf, 0xe9, 0x84, 0xda, 0x31, 0x32, 0x61, 0xbf, 0xd0, 0x7e, 0x8a, 0x02,
- /* (2^228)P */ 0xb9, 0x51, 0xb3, 0x89, 0x21, 0x5d, 0xa2, 0xfe, 0x79, 0x2a, 0xb3, 0x2a, 0x3b, 0xe6, 0x6f, 0x2b, 0x22, 0x03, 0xea, 0x7b, 0x1f, 0xaf, 0x85, 0xc3, 0x38, 0x55, 0x5b, 0x8e, 0xb4, 0xaa, 0x77, 0xfe, 0x03, 0x6e, 0xda, 0x91, 0x24, 0x0c, 0x48, 0x39, 0x27, 0x43, 0x16, 0xd2, 0x0a, 0x0d, 0x43, 0xa3, 0x0e, 0xca, 0x45, 0xd1, 0x7f, 0xf5, 0xd3, 0x16,
- /* (2^229)P */ 0x3d, 0x32, 0x9b, 0x38, 0xf8, 0x06, 0x93, 0x78, 0x5b, 0x50, 0x2b, 0x06, 0xd8, 0x66, 0xfe, 0xab, 0x9b, 0x58, 0xc7, 0xd1, 0x4d, 0xd5, 0xf8, 0x3b, 0x10, 0x7e, 0x85, 0xde, 0x58, 0x4e, 0xdf, 0x53, 0xd9, 0x58, 0xe0, 0x15, 0x81, 0x9f, 0x1a, 0x78, 0xfc, 0x9f, 0x10, 0xc2, 0x23, 0xd6, 0x78, 0xd1, 0x9d, 0xd2, 0xd5, 0x1c, 0x53, 0xe2, 0xc9, 0x76,
- /* (2^230)P */ 0x98, 0x1e, 0x38, 0x7b, 0x71, 0x18, 0x4b, 0x15, 0xaf, 0xa1, 0xa6, 0x98, 0xcb, 0x26, 0xa3, 0xc8, 0x07, 0x46, 0xda, 0x3b, 0x70, 0x65, 0xec, 0x7a, 0x2b, 0x34, 0x94, 0xa8, 0xb6, 0x14, 0xf8, 0x1a, 0xce, 0xf7, 0xc8, 0x60, 0xf3, 0x88, 0xf4, 0x33, 0x60, 0x7b, 0xd1, 0x02, 0xe7, 0xda, 0x00, 0x4a, 0xea, 0xd2, 0xfd, 0x88, 0xd2, 0x99, 0x28, 0xf3,
- /* (2^231)P */ 0x28, 0x24, 0x1d, 0x26, 0xc2, 0xeb, 0x8b, 0x3b, 0xb4, 0x6b, 0xbe, 0x6b, 0x77, 0xff, 0xf3, 0x21, 0x3b, 0x26, 0x6a, 0x8c, 0x8e, 0x2a, 0x44, 0xa8, 0x01, 0x2b, 0x71, 0xea, 0x64, 0x30, 0xfd, 0xfd, 0x95, 0xcb, 0x39, 0x38, 0x48, 0xfa, 0x96, 0x97, 0x8c, 0x2f, 0x33, 0xca, 0x03, 0xe6, 0xd7, 0x94, 0x55, 0x6c, 0xc3, 0xb3, 0xa8, 0xf7, 0xae, 0x8c,
- /* (2^232)P */ 0xea, 0x62, 0x8a, 0xb4, 0xeb, 0x74, 0xf7, 0xb8, 0xae, 0xc5, 0x20, 0x71, 0x06, 0xd6, 0x7c, 0x62, 0x9b, 0x69, 0x74, 0xef, 0xa7, 0x6d, 0xd6, 0x8c, 0x37, 0xb9, 0xbf, 0xcf, 0xeb, 0xe4, 0x2f, 0x04, 0x02, 0x21, 0x7d, 0x75, 0x6b, 0x92, 0x48, 0xf8, 0x70, 0xad, 0x69, 0xe2, 0xea, 0x0e, 0x88, 0x67, 0x72, 0xcc, 0x2d, 0x10, 0xce, 0x2d, 0xcf, 0x65,
- /* (2^233)P */ 0x49, 0xf3, 0x57, 0x64, 0xe5, 0x5c, 0xc5, 0x65, 0x49, 0x97, 0xc4, 0x8a, 0xcc, 0xa9, 0xca, 0x94, 0x7b, 0x86, 0x88, 0xb6, 0x51, 0x27, 0x69, 0xa5, 0x0f, 0x8b, 0x06, 0x59, 0xa0, 0x94, 0xef, 0x63, 0x1a, 0x01, 0x9e, 0x4f, 0xd2, 0x5a, 0x93, 0xc0, 0x7c, 0xe6, 0x61, 0x77, 0xb6, 0xf5, 0x40, 0xd9, 0x98, 0x43, 0x5b, 0x56, 0x68, 0xe9, 0x37, 0x8f,
- /* (2^234)P */ 0xee, 0x87, 0xd2, 0x05, 0x1b, 0x39, 0x89, 0x10, 0x07, 0x6d, 0xe8, 0xfd, 0x8b, 0x4d, 0xb2, 0xa7, 0x7b, 0x1e, 0xa0, 0x6c, 0x0d, 0x3d, 0x3d, 0x49, 0xba, 0x61, 0x36, 0x1f, 0xc2, 0x84, 0x4a, 0xcc, 0x87, 0xa9, 0x1b, 0x23, 0x04, 0xe2, 0x3e, 0x97, 0xe1, 0xdb, 0xd5, 0x5a, 0xe8, 0x41, 0x6b, 0xe5, 0x5a, 0xa1, 0x99, 0xe5, 0x7b, 0xa7, 0xe0, 0x3b,
- /* (2^235)P */ 0xea, 0xa3, 0x6a, 0xdd, 0x77, 0x7f, 0x77, 0x41, 0xc5, 0x6a, 0xe4, 0xaf, 0x11, 0x5f, 0x88, 0xa5, 0x10, 0xee, 0xd0, 0x8c, 0x0c, 0xb4, 0xa5, 0x2a, 0xd0, 0xd8, 0x1d, 0x47, 0x06, 0xc0, 0xd5, 0xce, 0x51, 0x54, 0x9b, 0x2b, 0xe6, 0x2f, 0xe7, 0xe7, 0x31, 0x5f, 0x5c, 0x23, 0x81, 0x3e, 0x03, 0x93, 0xaa, 0x2d, 0x71, 0x84, 0xa0, 0x89, 0x32, 0xa6,
- /* (2^236)P */ 0x55, 0xa3, 0x13, 0x92, 0x4e, 0x93, 0x7d, 0xec, 0xca, 0x57, 0xfb, 0x37, 0xae, 0xd2, 0x18, 0x2e, 0x54, 0x05, 0x6c, 0xd1, 0x28, 0xca, 0x90, 0x40, 0x82, 0x2e, 0x79, 0xc6, 0x5a, 0xc7, 0xdd, 0x84, 0x93, 0xdf, 0x15, 0xb8, 0x1f, 0xb1, 0xf9, 0xaf, 0x2c, 0xe5, 0x32, 0xcd, 0xc2, 0x99, 0x6d, 0xac, 0x85, 0x5c, 0x63, 0xd3, 0xe2, 0xff, 0x24, 0xda,
- /* (2^237)P */ 0x2d, 0x8d, 0xfd, 0x65, 0xcc, 0xe5, 0x02, 0xa0, 0xe5, 0xb9, 0xec, 0x59, 0x09, 0x50, 0x27, 0xb7, 0x3d, 0x2a, 0x79, 0xb2, 0x76, 0x5d, 0x64, 0x95, 0xf8, 0xc5, 0xaf, 0x8a, 0x62, 0x11, 0x5c, 0x56, 0x1c, 0x05, 0x64, 0x9e, 0x5e, 0xbd, 0x54, 0x04, 0xe6, 0x9e, 0xab, 0xe6, 0x22, 0x7e, 0x42, 0x54, 0xb5, 0xa5, 0xd0, 0x8d, 0x28, 0x6b, 0x0f, 0x0b,
- /* (2^238)P */ 0x2d, 0xb2, 0x8c, 0x59, 0x10, 0x37, 0x84, 0x3b, 0x9b, 0x65, 0x1b, 0x0f, 0x10, 0xf9, 0xea, 0x60, 0x1b, 0x02, 0xf5, 0xee, 0x8b, 0xe6, 0x32, 0x7d, 0x10, 0x7f, 0x5f, 0x8c, 0x72, 0x09, 0x4e, 0x1f, 0x29, 0xff, 0x65, 0xcb, 0x3e, 0x3a, 0xd2, 0x96, 0x50, 0x1e, 0xea, 0x64, 0x99, 0xb5, 0x4c, 0x7a, 0x69, 0xb8, 0x95, 0xae, 0x48, 0xc0, 0x7c, 0xb1,
- /* (2^239)P */ 0xcd, 0x7c, 0x4f, 0x3e, 0xea, 0xf3, 0x90, 0xcb, 0x12, 0x76, 0xd1, 0x17, 0xdc, 0x0d, 0x13, 0x0f, 0xfd, 0x4d, 0xb5, 0x1f, 0xe4, 0xdd, 0xf2, 0x4d, 0x58, 0xea, 0xa5, 0x66, 0x92, 0xcf, 0xe5, 0x54, 0xea, 0x9b, 0x35, 0x83, 0x1a, 0x44, 0x8e, 0x62, 0x73, 0x45, 0x98, 0xa3, 0x89, 0x95, 0x52, 0x93, 0x1a, 0x8d, 0x63, 0x0f, 0xc2, 0x57, 0x3c, 0xb1,
- /* (2^240)P */ 0x72, 0xb4, 0xdf, 0x51, 0xb7, 0xf6, 0x52, 0xa2, 0x14, 0x56, 0xe5, 0x0a, 0x2e, 0x75, 0x81, 0x02, 0xee, 0x93, 0x48, 0x0a, 0x92, 0x4e, 0x0c, 0x0f, 0xdf, 0x09, 0x89, 0x99, 0xf6, 0xf9, 0x22, 0xa2, 0x32, 0xf8, 0xb0, 0x76, 0x0c, 0xb2, 0x4d, 0x6e, 0xbe, 0x83, 0x35, 0x61, 0x44, 0xd2, 0x58, 0xc7, 0xdd, 0x14, 0xcf, 0xc3, 0x4b, 0x7c, 0x07, 0xee,
- /* (2^241)P */ 0x8b, 0x03, 0xee, 0xcb, 0xa7, 0x2e, 0x28, 0xbd, 0x97, 0xd1, 0x4c, 0x2b, 0xd1, 0x92, 0x67, 0x5b, 0x5a, 0x12, 0xbf, 0x29, 0x17, 0xfc, 0x50, 0x09, 0x74, 0x76, 0xa2, 0xd4, 0x82, 0xfd, 0x2c, 0x0c, 0x90, 0xf7, 0xe7, 0xe5, 0x9a, 0x2c, 0x16, 0x40, 0xb9, 0x6c, 0xd9, 0xe0, 0x22, 0x9e, 0xf8, 0xdd, 0x73, 0xe4, 0x7b, 0x9e, 0xbe, 0x4f, 0x66, 0x22,
- /* (2^242)P */ 0xa4, 0x10, 0xbe, 0xb8, 0x83, 0x3a, 0x77, 0x8e, 0xea, 0x0a, 0xc4, 0x97, 0x3e, 0xb6, 0x6c, 0x81, 0xd7, 0x65, 0xd9, 0xf7, 0xae, 0xe6, 0xbe, 0xab, 0x59, 0x81, 0x29, 0x4b, 0xff, 0xe1, 0x0f, 0xc3, 0x2b, 0xad, 0x4b, 0xef, 0xc4, 0x50, 0x9f, 0x88, 0x31, 0xf2, 0xde, 0x80, 0xd6, 0xf4, 0x20, 0x9c, 0x77, 0x9b, 0xbe, 0xbe, 0x08, 0xf5, 0xf0, 0x95,
- /* (2^243)P */ 0x0e, 0x7c, 0x7b, 0x7c, 0xb3, 0xd8, 0x83, 0xfc, 0x8c, 0x75, 0x51, 0x74, 0x1b, 0xe1, 0x6d, 0x11, 0x05, 0x46, 0x24, 0x0d, 0xa4, 0x2b, 0x32, 0xfd, 0x2c, 0x4e, 0x21, 0xdf, 0x39, 0x6b, 0x96, 0xfc, 0xff, 0x92, 0xfc, 0x35, 0x0d, 0x9a, 0x4b, 0xc0, 0x70, 0x46, 0x32, 0x7d, 0xc0, 0xc4, 0x04, 0xe0, 0x2d, 0x83, 0xa7, 0x00, 0xc7, 0xcb, 0xb4, 0x8f,
- /* (2^244)P */ 0xa9, 0x5a, 0x7f, 0x0e, 0xdd, 0x2c, 0x85, 0xaa, 0x4d, 0xac, 0xde, 0xb3, 0xb6, 0xaf, 0xe6, 0xd1, 0x06, 0x7b, 0x2c, 0xa4, 0x01, 0x19, 0x22, 0x7d, 0x78, 0xf0, 0x3a, 0xea, 0x89, 0xfe, 0x21, 0x61, 0x6d, 0xb8, 0xfe, 0xa5, 0x2a, 0xab, 0x0d, 0x7b, 0x51, 0x39, 0xb6, 0xde, 0xbc, 0xf0, 0xc5, 0x48, 0xd7, 0x09, 0x82, 0x6e, 0x66, 0x75, 0xc5, 0xcd,
- /* (2^245)P */ 0xee, 0xdf, 0x2b, 0x6c, 0xa8, 0xde, 0x61, 0xe1, 0x27, 0xfa, 0x2a, 0x0f, 0x68, 0xe7, 0x7a, 0x9b, 0x13, 0xe9, 0x56, 0xd2, 0x1c, 0x3d, 0x2f, 0x3c, 0x7a, 0xf6, 0x6f, 0x45, 0xee, 0xe8, 0xf4, 0xa0, 0xa6, 0xe8, 0xa5, 0x27, 0xee, 0xf2, 0x85, 0xa9, 0xd5, 0x0e, 0xa9, 0x26, 0x60, 0xfe, 0xee, 0xc7, 0x59, 0x99, 0x5e, 0xa3, 0xdf, 0x23, 0x36, 0xd5,
- /* (2^246)P */ 0x15, 0x66, 0x6f, 0xd5, 0x78, 0xa4, 0x0a, 0xf7, 0xb1, 0xe8, 0x75, 0x6b, 0x48, 0x7d, 0xa6, 0x4d, 0x3d, 0x36, 0x9b, 0xc7, 0xcc, 0x68, 0x9a, 0xfe, 0x2f, 0x39, 0x2a, 0x51, 0x31, 0x39, 0x7d, 0x73, 0x6f, 0xc8, 0x74, 0x72, 0x6f, 0x6e, 0xda, 0x5f, 0xad, 0x48, 0xc8, 0x40, 0xe1, 0x06, 0x01, 0x36, 0xa1, 0x88, 0xc8, 0x99, 0x9c, 0xd1, 0x11, 0x8f,
- /* (2^247)P */ 0xab, 0xc5, 0xcb, 0xcf, 0xbd, 0x73, 0x21, 0xd0, 0x82, 0xb1, 0x2e, 0x2d, 0xd4, 0x36, 0x1b, 0xed, 0xa9, 0x8a, 0x26, 0x79, 0xc4, 0x17, 0xae, 0xe5, 0x09, 0x0a, 0x0c, 0xa4, 0x21, 0xa0, 0x6e, 0xdd, 0x62, 0x8e, 0x44, 0x62, 0xcc, 0x50, 0xff, 0x93, 0xb3, 0x9a, 0x72, 0x8c, 0x3f, 0xa1, 0xa6, 0x4d, 0x87, 0xd5, 0x1c, 0x5a, 0xc0, 0x0b, 0x1a, 0xd6,
- /* (2^248)P */ 0x67, 0x36, 0x6a, 0x1f, 0x96, 0xe5, 0x80, 0x20, 0xa9, 0xe8, 0x0b, 0x0e, 0x21, 0x29, 0x3f, 0xc8, 0x0a, 0x6d, 0x27, 0x47, 0xca, 0xd9, 0x05, 0x55, 0xbf, 0x11, 0xcf, 0x31, 0x7a, 0x37, 0xc7, 0x90, 0xa9, 0xf4, 0x07, 0x5e, 0xd5, 0xc3, 0x92, 0xaa, 0x95, 0xc8, 0x23, 0x2a, 0x53, 0x45, 0xe3, 0x3a, 0x24, 0xe9, 0x67, 0x97, 0x3a, 0x82, 0xf9, 0xa6,
- /* (2^249)P */ 0x92, 0x9e, 0x6d, 0x82, 0x67, 0xe9, 0xf9, 0x17, 0x96, 0x2c, 0xa7, 0xd3, 0x89, 0xf9, 0xdb, 0xd8, 0x20, 0xc6, 0x2e, 0xec, 0x4a, 0x76, 0x64, 0xbf, 0x27, 0x40, 0xe2, 0xb4, 0xdf, 0x1f, 0xa0, 0xef, 0x07, 0x80, 0xfb, 0x8e, 0x12, 0xf8, 0xb8, 0xe1, 0xc6, 0xdf, 0x7c, 0x69, 0x35, 0x5a, 0xe1, 0x8e, 0x5d, 0x69, 0x84, 0x56, 0xb6, 0x31, 0x1c, 0x0b,
- /* (2^250)P */ 0xd6, 0x94, 0x5c, 0xef, 0xbb, 0x46, 0x45, 0x44, 0x5b, 0xa1, 0xae, 0x03, 0x65, 0xdd, 0xb5, 0x66, 0x88, 0x35, 0x29, 0x95, 0x16, 0x54, 0xa6, 0xf5, 0xc9, 0x78, 0x34, 0xe6, 0x0f, 0xc4, 0x2b, 0x5b, 0x79, 0x51, 0x68, 0x48, 0x3a, 0x26, 0x87, 0x05, 0x70, 0xaf, 0x8b, 0xa6, 0xc7, 0x2e, 0xb3, 0xa9, 0x10, 0x01, 0xb0, 0xb9, 0x31, 0xfd, 0xdc, 0x80,
- /* (2^251)P */ 0x25, 0xf2, 0xad, 0xd6, 0x75, 0xa3, 0x04, 0x05, 0x64, 0x8a, 0x97, 0x60, 0x27, 0x2a, 0xe5, 0x6d, 0xb0, 0x73, 0xf4, 0x07, 0x2a, 0x9d, 0xe9, 0x46, 0xb4, 0x1c, 0x51, 0xf8, 0x63, 0x98, 0x7e, 0xe5, 0x13, 0x51, 0xed, 0x98, 0x65, 0x98, 0x4f, 0x8f, 0xe7, 0x7e, 0x72, 0xd7, 0x64, 0x11, 0x2f, 0xcd, 0x12, 0xf8, 0xc4, 0x63, 0x52, 0x0f, 0x7f, 0xc4,
- /* (2^252)P */ 0x5c, 0xd9, 0x85, 0x63, 0xc7, 0x8a, 0x65, 0x9a, 0x25, 0x83, 0x31, 0x73, 0x49, 0xf0, 0x93, 0x96, 0x70, 0x67, 0x6d, 0xb1, 0xff, 0x95, 0x54, 0xe4, 0xf8, 0x15, 0x6c, 0x5f, 0xbd, 0xf6, 0x0f, 0x38, 0x7b, 0x68, 0x7d, 0xd9, 0x3d, 0xf0, 0xa9, 0xa0, 0xe4, 0xd1, 0xb6, 0x34, 0x6d, 0x14, 0x16, 0xc2, 0x4c, 0x30, 0x0e, 0x67, 0xd3, 0xbe, 0x2e, 0xc0,
- /* (2^253)P */ 0x06, 0x6b, 0x52, 0xc8, 0x14, 0xcd, 0xae, 0x03, 0x93, 0xea, 0xc1, 0xf2, 0xf6, 0x8b, 0xc5, 0xb6, 0xdc, 0x82, 0x42, 0x29, 0x94, 0xe0, 0x25, 0x6c, 0x3f, 0x9f, 0x5d, 0xe4, 0x96, 0xf6, 0x8e, 0x3f, 0xf9, 0x72, 0xc4, 0x77, 0x60, 0x8b, 0xa4, 0xf9, 0xa8, 0xc3, 0x0a, 0x81, 0xb1, 0x97, 0x70, 0x18, 0xab, 0xea, 0x37, 0x8a, 0x08, 0xc7, 0xe2, 0x95,
- /* (2^254)P */ 0x94, 0x49, 0xd9, 0x5f, 0x76, 0x72, 0x82, 0xad, 0x2d, 0x50, 0x1a, 0x7a, 0x5b, 0xe6, 0x95, 0x1e, 0x95, 0x65, 0x87, 0x1c, 0x52, 0xd7, 0x44, 0xe6, 0x9b, 0x56, 0xcd, 0x6f, 0x05, 0xff, 0x67, 0xc5, 0xdb, 0xa2, 0xac, 0xe4, 0xa2, 0x28, 0x63, 0x5f, 0xfb, 0x0c, 0x3b, 0xf1, 0x87, 0xc3, 0x36, 0x78, 0x3f, 0x77, 0xfa, 0x50, 0x85, 0xf9, 0xd7, 0x82,
- /* (2^255)P */ 0x64, 0xc0, 0xe0, 0xd8, 0x2d, 0xed, 0xcb, 0x6a, 0xfd, 0xcd, 0xbc, 0x7e, 0x9f, 0xc8, 0x85, 0xe9, 0xc1, 0x7c, 0x0f, 0xe5, 0x18, 0xea, 0xd4, 0x51, 0xad, 0x59, 0x13, 0x75, 0xd9, 0x3d, 0xd4, 0x8a, 0xb2, 0xbe, 0x78, 0x52, 0x2b, 0x52, 0x94, 0x37, 0x41, 0xd6, 0xb4, 0xb6, 0x45, 0x20, 0x76, 0xe0, 0x1f, 0x31, 0xdb, 0xb1, 0xa1, 0x43, 0xf0, 0x18,
- /* (2^256)P */ 0x74, 0xa9, 0xa4, 0xa9, 0xdd, 0x6e, 0x3e, 0x68, 0xe5, 0xc3, 0x2e, 0x92, 0x17, 0xa4, 0xcb, 0x80, 0xb1, 0xf0, 0x06, 0x93, 0xef, 0xe6, 0x00, 0xe6, 0x3b, 0xb1, 0x32, 0x65, 0x7b, 0x83, 0xb6, 0x8a, 0x49, 0x1b, 0x14, 0x89, 0xee, 0xba, 0xf5, 0x6a, 0x8d, 0x36, 0xef, 0xb0, 0xd8, 0xb2, 0x16, 0x99, 0x17, 0x35, 0x02, 0x16, 0x55, 0x58, 0xdd, 0x82,
- /* (2^257)P */ 0x36, 0x95, 0xe8, 0xf4, 0x36, 0x42, 0xbb, 0xc5, 0x3e, 0xfa, 0x30, 0x84, 0x9e, 0x59, 0xfd, 0xd2, 0x95, 0x42, 0xf8, 0x64, 0xd9, 0xb9, 0x0e, 0x9f, 0xfa, 0xd0, 0x7b, 0x20, 0x31, 0x77, 0x48, 0x29, 0x4d, 0xd0, 0x32, 0x57, 0x56, 0x30, 0xa6, 0x17, 0x53, 0x04, 0xbf, 0x08, 0x28, 0xec, 0xb8, 0x46, 0xc1, 0x03, 0x89, 0xdc, 0xed, 0xa0, 0x35, 0x53,
- /* (2^258)P */ 0xc5, 0x7f, 0x9e, 0xd8, 0xc5, 0xba, 0x5f, 0x68, 0xc8, 0x23, 0x75, 0xea, 0x0d, 0xd9, 0x5a, 0xfd, 0x61, 0x1a, 0xa3, 0x2e, 0x45, 0x63, 0x14, 0x55, 0x86, 0x21, 0x29, 0xbe, 0xef, 0x5e, 0x50, 0xe5, 0x18, 0x59, 0xe7, 0xe3, 0xce, 0x4d, 0x8c, 0x15, 0x8f, 0x89, 0x66, 0x44, 0x52, 0x3d, 0xfa, 0xc7, 0x9a, 0x59, 0x90, 0x8e, 0xc0, 0x06, 0x3f, 0xc9,
- /* (2^259)P */ 0x8e, 0x04, 0xd9, 0x16, 0x50, 0x1d, 0x8c, 0x9f, 0xd5, 0xe3, 0xce, 0xfd, 0x47, 0x04, 0x27, 0x4d, 0xc2, 0xfa, 0x71, 0xd9, 0x0b, 0xb8, 0x65, 0xf4, 0x11, 0xf3, 0x08, 0xee, 0x81, 0xc8, 0x67, 0x99, 0x0b, 0x8d, 0x77, 0xa3, 0x4f, 0xb5, 0x9b, 0xdb, 0x26, 0xf1, 0x97, 0xeb, 0x04, 0x54, 0xeb, 0x80, 0x08, 0x1d, 0x1d, 0xf6, 0x3d, 0x1f, 0x5a, 0xb8,
- /* (2^260)P */ 0xb7, 0x9c, 0x9d, 0xee, 0xb9, 0x5c, 0xad, 0x0d, 0x9e, 0xfd, 0x60, 0x3c, 0x27, 0x4e, 0xa2, 0x95, 0xfb, 0x64, 0x7e, 0x79, 0x64, 0x87, 0x10, 0xb4, 0x73, 0xe0, 0x9d, 0x46, 0x4d, 0x3d, 0xee, 0x83, 0xe4, 0x16, 0x88, 0x97, 0xe6, 0x4d, 0xba, 0x70, 0xb6, 0x96, 0x7b, 0xff, 0x4b, 0xc8, 0xcf, 0x72, 0x83, 0x3e, 0x5b, 0x24, 0x2e, 0x57, 0xf1, 0x82,
- /* (2^261)P */ 0x30, 0x71, 0x40, 0x51, 0x4f, 0x44, 0xbb, 0xc7, 0xf0, 0x54, 0x6e, 0x9d, 0xeb, 0x15, 0xad, 0xf8, 0x61, 0x43, 0x5a, 0xef, 0xc0, 0xb1, 0x57, 0xae, 0x03, 0x40, 0xe8, 0x68, 0x6f, 0x03, 0x20, 0x4f, 0x8a, 0x51, 0x2a, 0x9e, 0xd2, 0x45, 0xaf, 0xb4, 0xf5, 0xd4, 0x95, 0x7f, 0x3d, 0x3d, 0xb7, 0xb6, 0x28, 0xc5, 0x08, 0x8b, 0x44, 0xd6, 0x3f, 0xe7,
- /* (2^262)P */ 0xa9, 0x52, 0x04, 0x67, 0xcb, 0x20, 0x63, 0xf8, 0x18, 0x01, 0x44, 0x21, 0x6a, 0x8a, 0x83, 0x48, 0xd4, 0xaf, 0x23, 0x0f, 0x35, 0x8d, 0xe5, 0x5a, 0xc4, 0x7c, 0x55, 0x46, 0x19, 0x5f, 0x35, 0xe0, 0x5d, 0x97, 0x4c, 0x2d, 0x04, 0xed, 0x59, 0xd4, 0xb0, 0xb2, 0xc6, 0xe3, 0x51, 0xe1, 0x38, 0xc6, 0x30, 0x49, 0x8f, 0xae, 0x61, 0x64, 0xce, 0xa8,
- /* (2^263)P */ 0x9b, 0x64, 0x83, 0x3c, 0xd3, 0xdf, 0xb9, 0x27, 0xe7, 0x5b, 0x7f, 0xeb, 0xf3, 0x26, 0xcf, 0xb1, 0x8f, 0xaf, 0x26, 0xc8, 0x48, 0xce, 0xa1, 0xac, 0x7d, 0x10, 0x34, 0x28, 0xe1, 0x1f, 0x69, 0x03, 0x64, 0x77, 0x61, 0xdd, 0x4a, 0x9b, 0x18, 0x47, 0xf8, 0xca, 0x63, 0xc9, 0x03, 0x2d, 0x20, 0x2a, 0x69, 0x6e, 0x42, 0xd0, 0xe7, 0xaa, 0xb5, 0xf3,
- /* (2^264)P */ 0xea, 0x31, 0x0c, 0x57, 0x0f, 0x3e, 0xe3, 0x35, 0xd8, 0x30, 0xa5, 0x6f, 0xdd, 0x95, 0x43, 0xc6, 0x66, 0x07, 0x4f, 0x34, 0xc3, 0x7e, 0x04, 0x10, 0x2d, 0xc4, 0x1c, 0x94, 0x52, 0x2e, 0x5b, 0x9a, 0x65, 0x2f, 0x91, 0xaa, 0x4f, 0x3c, 0xdc, 0x23, 0x18, 0xe1, 0x4f, 0x85, 0xcd, 0xf4, 0x8c, 0x51, 0xf7, 0xab, 0x4f, 0xdc, 0x15, 0x5c, 0x9e, 0xc5,
- /* (2^265)P */ 0x54, 0x57, 0x23, 0x17, 0xe7, 0x82, 0x2f, 0x04, 0x7d, 0xfe, 0xe7, 0x1f, 0xa2, 0x57, 0x79, 0xe9, 0x58, 0x9b, 0xbe, 0xc6, 0x16, 0x4a, 0x17, 0x50, 0x90, 0x4a, 0x34, 0x70, 0x87, 0x37, 0x01, 0x26, 0xd8, 0xa3, 0x5f, 0x07, 0x7c, 0xd0, 0x7d, 0x05, 0x8a, 0x93, 0x51, 0x2f, 0x99, 0xea, 0xcf, 0x00, 0xd8, 0xc7, 0xe6, 0x9b, 0x8c, 0x62, 0x45, 0x87,
- /* (2^266)P */ 0xc3, 0xfd, 0x29, 0x66, 0xe7, 0x30, 0x29, 0x77, 0xe0, 0x0d, 0x63, 0x5b, 0xe6, 0x90, 0x1a, 0x1e, 0x99, 0xc2, 0xa7, 0xab, 0xff, 0xa7, 0xbd, 0x79, 0x01, 0x97, 0xfd, 0x27, 0x1b, 0x43, 0x2b, 0xe6, 0xfe, 0x5e, 0xf1, 0xb9, 0x35, 0x38, 0x08, 0x25, 0x55, 0x90, 0x68, 0x2e, 0xc3, 0x67, 0x39, 0x9f, 0x2b, 0x2c, 0x70, 0x48, 0x8c, 0x47, 0xee, 0x56,
- /* (2^267)P */ 0xf7, 0x32, 0x70, 0xb5, 0xe6, 0x42, 0xfd, 0x0a, 0x39, 0x9b, 0x07, 0xfe, 0x0e, 0xf4, 0x47, 0xba, 0x6a, 0x3f, 0xf5, 0x2c, 0x15, 0xf3, 0x60, 0x3f, 0xb1, 0x83, 0x7b, 0x2e, 0x34, 0x58, 0x1a, 0x6e, 0x4a, 0x49, 0x05, 0x45, 0xca, 0xdb, 0x00, 0x01, 0x0c, 0x42, 0x5e, 0x60, 0x40, 0x5f, 0xd9, 0xc7, 0x3a, 0x9e, 0x1c, 0x8d, 0xab, 0x11, 0x55, 0x65,
- /* (2^268)P */ 0x87, 0x40, 0xb7, 0x0d, 0xaa, 0x34, 0x89, 0x90, 0x75, 0x6d, 0xa2, 0xfe, 0x3b, 0x6d, 0x5c, 0x39, 0x98, 0x10, 0x9e, 0x15, 0xc5, 0x35, 0xa2, 0x27, 0x23, 0x0a, 0x2d, 0x60, 0xe2, 0xa8, 0x7f, 0x3e, 0x77, 0x8f, 0xcc, 0x44, 0xcc, 0x30, 0x28, 0xe2, 0xf0, 0x04, 0x8c, 0xee, 0xe4, 0x5f, 0x68, 0x8c, 0xdf, 0x70, 0xbf, 0x31, 0xee, 0x2a, 0xfc, 0xce,
- /* (2^269)P */ 0x92, 0xf2, 0xa0, 0xd9, 0x58, 0x3b, 0x7c, 0x1a, 0x99, 0x46, 0x59, 0x54, 0x60, 0x06, 0x8d, 0x5e, 0xf0, 0x22, 0xa1, 0xed, 0x92, 0x8a, 0x4d, 0x76, 0x95, 0x05, 0x0b, 0xff, 0xfc, 0x9a, 0xd1, 0xcc, 0x05, 0xb9, 0x5e, 0x99, 0xe8, 0x2a, 0x76, 0x7b, 0xfd, 0xa6, 0xe2, 0xd1, 0x1a, 0xd6, 0x76, 0x9f, 0x2f, 0x0e, 0xd1, 0xa8, 0x77, 0x5a, 0x40, 0x5a,
- /* (2^270)P */ 0xff, 0xf9, 0x3f, 0xa9, 0xa6, 0x6c, 0x6d, 0x03, 0x8b, 0xa7, 0x10, 0x5d, 0x3f, 0xec, 0x3e, 0x1c, 0x0b, 0x6b, 0xa2, 0x6a, 0x22, 0xa9, 0x28, 0xd0, 0x66, 0xc9, 0xc2, 0x3d, 0x47, 0x20, 0x7d, 0xa6, 0x1d, 0xd8, 0x25, 0xb5, 0xf2, 0xf9, 0x70, 0x19, 0x6b, 0xf8, 0x43, 0x36, 0xc5, 0x1f, 0xe4, 0x5a, 0x4c, 0x13, 0xe4, 0x6d, 0x08, 0x0b, 0x1d, 0xb1,
- /* (2^271)P */ 0x3f, 0x20, 0x9b, 0xfb, 0xec, 0x7d, 0x31, 0xc5, 0xfc, 0x88, 0x0b, 0x30, 0xed, 0x36, 0xc0, 0x63, 0xb1, 0x7d, 0x10, 0xda, 0xb6, 0x2e, 0xad, 0xf3, 0xec, 0x94, 0xe7, 0xec, 0xb5, 0x9c, 0xfe, 0xf5, 0x35, 0xf0, 0xa2, 0x2d, 0x7f, 0xca, 0x6b, 0x67, 0x1a, 0xf6, 0xb3, 0xda, 0x09, 0x2a, 0xaa, 0xdf, 0xb1, 0xca, 0x9b, 0xfb, 0xeb, 0xb3, 0xcd, 0xc0,
- /* (2^272)P */ 0xcd, 0x4d, 0x89, 0x00, 0xa4, 0x3b, 0x48, 0xf0, 0x76, 0x91, 0x35, 0xa5, 0xf8, 0xc9, 0xb6, 0x46, 0xbc, 0xf6, 0x9a, 0x45, 0x47, 0x17, 0x96, 0x80, 0x5b, 0x3a, 0x28, 0x33, 0xf9, 0x5a, 0xef, 0x43, 0x07, 0xfe, 0x3b, 0xf4, 0x8e, 0x19, 0xce, 0xd2, 0x94, 0x4b, 0x6d, 0x8e, 0x67, 0x20, 0xc7, 0x4f, 0x2f, 0x59, 0x8e, 0xe1, 0xa1, 0xa9, 0xf9, 0x0e,
- /* (2^273)P */ 0xdc, 0x7b, 0xb5, 0x50, 0x2e, 0xe9, 0x7e, 0x8b, 0x78, 0xa1, 0x38, 0x96, 0x22, 0xc3, 0x61, 0x67, 0x6d, 0xc8, 0x58, 0xed, 0x41, 0x1d, 0x5d, 0x86, 0x98, 0x7f, 0x2f, 0x1b, 0x8d, 0x3e, 0xaa, 0xc1, 0xd2, 0x0a, 0xf3, 0xbf, 0x95, 0x04, 0xf3, 0x10, 0x3c, 0x2b, 0x7f, 0x90, 0x46, 0x04, 0xaa, 0x6a, 0xa9, 0x35, 0x76, 0xac, 0x49, 0xb5, 0x00, 0x45,
- /* (2^274)P */ 0xb1, 0x93, 0x79, 0x84, 0x4a, 0x2a, 0x30, 0x78, 0x16, 0xaa, 0xc5, 0x74, 0x06, 0xce, 0xa5, 0xa7, 0x32, 0x86, 0xe0, 0xf9, 0x10, 0xd2, 0x58, 0x76, 0xfb, 0x66, 0x49, 0x76, 0x3a, 0x90, 0xba, 0xb5, 0xcc, 0x99, 0xcd, 0x09, 0xc1, 0x9a, 0x74, 0x23, 0xdf, 0x0c, 0xfe, 0x99, 0x52, 0x80, 0xa3, 0x7c, 0x1c, 0x71, 0x5f, 0x2c, 0x49, 0x57, 0xf4, 0xf9,
- /* (2^275)P */ 0x6d, 0xbf, 0x52, 0xe6, 0x25, 0x98, 0xed, 0xcf, 0xe3, 0xbc, 0x08, 0xa2, 0x1a, 0x90, 0xae, 0xa0, 0xbf, 0x07, 0x15, 0xad, 0x0a, 0x9f, 0x3e, 0x47, 0x44, 0xc2, 0x10, 0x46, 0xa6, 0x7a, 0x9e, 0x2f, 0x57, 0xbc, 0xe2, 0xf0, 0x1d, 0xd6, 0x9a, 0x06, 0xed, 0xfc, 0x54, 0x95, 0x92, 0x15, 0xa2, 0xf7, 0x8d, 0x6b, 0xef, 0xb2, 0x05, 0xed, 0x5c, 0x63,
- /* (2^276)P */ 0xbc, 0x0b, 0x27, 0x3a, 0x3a, 0xf8, 0xe1, 0x48, 0x02, 0x7e, 0x27, 0xe6, 0x81, 0x62, 0x07, 0x73, 0x74, 0xe5, 0x52, 0xd7, 0xf8, 0x26, 0xca, 0x93, 0x4d, 0x3e, 0x9b, 0x55, 0x09, 0x8e, 0xe3, 0xd7, 0xa6, 0xe3, 0xb6, 0x2a, 0xa9, 0xb3, 0xb0, 0xa0, 0x8c, 0x01, 0xbb, 0x07, 0x90, 0x78, 0x6d, 0x6d, 0xe9, 0xf0, 0x7a, 0x90, 0xbd, 0xdc, 0x0c, 0x36,
- /* (2^277)P */ 0x7f, 0x20, 0x12, 0x0f, 0x40, 0x00, 0x53, 0xd8, 0x0c, 0x27, 0x47, 0x47, 0x22, 0x80, 0xfb, 0x62, 0xe4, 0xa7, 0xf7, 0xbd, 0x42, 0xa5, 0xc3, 0x2b, 0xb2, 0x7f, 0x50, 0xcc, 0xe2, 0xfb, 0xd5, 0xc0, 0x63, 0xdd, 0x24, 0x5f, 0x7c, 0x08, 0x91, 0xbf, 0x6e, 0x47, 0x44, 0xd4, 0x6a, 0xc0, 0xc3, 0x09, 0x39, 0x27, 0xdd, 0xc7, 0xca, 0x06, 0x29, 0x55,
- /* (2^278)P */ 0x76, 0x28, 0x58, 0xb0, 0xd2, 0xf3, 0x0f, 0x04, 0xe9, 0xc9, 0xab, 0x66, 0x5b, 0x75, 0x51, 0xdc, 0xe5, 0x8f, 0xe8, 0x1f, 0xdb, 0x03, 0x0f, 0xb0, 0x7d, 0xf9, 0x20, 0x64, 0x89, 0xe9, 0xdc, 0xe6, 0x24, 0xc3, 0xd5, 0xd2, 0x41, 0xa6, 0xe4, 0xe3, 0xc4, 0x79, 0x7c, 0x0f, 0xa1, 0x61, 0x2f, 0xda, 0xa4, 0xc9, 0xfd, 0xad, 0x5c, 0x65, 0x6a, 0xf3,
- /* (2^279)P */ 0xd5, 0xab, 0x72, 0x7a, 0x3b, 0x59, 0xea, 0xcf, 0xd5, 0x17, 0xd2, 0xb2, 0x5f, 0x2d, 0xab, 0xad, 0x9e, 0x88, 0x64, 0x55, 0x96, 0x6e, 0xf3, 0x44, 0xa9, 0x11, 0xf5, 0xf8, 0x3a, 0xf1, 0xcd, 0x79, 0x4c, 0x99, 0x6d, 0x23, 0x6a, 0xa0, 0xc2, 0x1a, 0x19, 0x45, 0xb5, 0xd8, 0x95, 0x2f, 0x49, 0xe9, 0x46, 0x39, 0x26, 0x60, 0x04, 0x15, 0x8b, 0xcc,
- /* (2^280)P */ 0x66, 0x0c, 0xf0, 0x54, 0x41, 0x02, 0x91, 0xab, 0xe5, 0x85, 0x8a, 0x44, 0xa6, 0x34, 0x96, 0x32, 0xc0, 0xdf, 0x6c, 0x41, 0x39, 0xd4, 0xc6, 0xe1, 0xe3, 0x81, 0xb0, 0x4c, 0x34, 0x4f, 0xe5, 0xf4, 0x35, 0x46, 0x1f, 0xeb, 0x75, 0xfd, 0x43, 0x37, 0x50, 0x99, 0xab, 0xad, 0xb7, 0x8c, 0xa1, 0x57, 0xcb, 0xe6, 0xce, 0x16, 0x2e, 0x85, 0xcc, 0xf9,
- /* (2^281)P */ 0x63, 0xd1, 0x3f, 0x9e, 0xa2, 0x17, 0x2e, 0x1d, 0x3e, 0xce, 0x48, 0x2d, 0xbb, 0x8f, 0x69, 0xc9, 0xa6, 0x3d, 0x4e, 0xfe, 0x09, 0x56, 0xb3, 0x02, 0x5f, 0x99, 0x97, 0x0c, 0x54, 0xda, 0x32, 0x97, 0x9b, 0xf4, 0x95, 0xf1, 0xad, 0xe3, 0x2b, 0x04, 0xa7, 0x9b, 0x3f, 0xbb, 0xe7, 0x87, 0x2e, 0x1f, 0x8b, 0x4b, 0x7a, 0xa4, 0x43, 0x0c, 0x0f, 0x35,
- /* (2^282)P */ 0x05, 0xdc, 0xe0, 0x2c, 0xa1, 0xc1, 0xd0, 0xf1, 0x1f, 0x4e, 0xc0, 0x6c, 0x35, 0x7b, 0xca, 0x8f, 0x8b, 0x02, 0xb1, 0xf7, 0xd6, 0x2e, 0xe7, 0x93, 0x80, 0x85, 0x18, 0x88, 0x19, 0xb9, 0xb4, 0x4a, 0xbc, 0xeb, 0x5a, 0x78, 0x38, 0xed, 0xc6, 0x27, 0x2a, 0x74, 0x76, 0xf0, 0x1b, 0x79, 0x92, 0x2f, 0xd2, 0x81, 0x98, 0xdf, 0xa9, 0x50, 0x19, 0xeb,
- /* (2^283)P */ 0xb5, 0xe7, 0xb4, 0x11, 0x3a, 0x81, 0xb6, 0xb4, 0xf8, 0xa2, 0xb3, 0x6c, 0xfc, 0x9d, 0xe0, 0xc0, 0xe0, 0x59, 0x7f, 0x05, 0x37, 0xef, 0x2c, 0xa9, 0x3a, 0x24, 0xac, 0x7b, 0x25, 0xa0, 0x55, 0xd2, 0x44, 0x82, 0x82, 0x6e, 0x64, 0xa3, 0x58, 0xc8, 0x67, 0xae, 0x26, 0xa7, 0x0f, 0x42, 0x63, 0xe1, 0x93, 0x01, 0x52, 0x19, 0xaf, 0x49, 0x3e, 0x33,
- /* (2^284)P */ 0x05, 0x85, 0xe6, 0x66, 0xaf, 0x5f, 0xdf, 0xbf, 0x9d, 0x24, 0x62, 0x60, 0x90, 0xe2, 0x4c, 0x7d, 0x4e, 0xc3, 0x74, 0x5d, 0x4f, 0x53, 0xf3, 0x63, 0x13, 0xf4, 0x74, 0x28, 0x6b, 0x7d, 0x57, 0x0c, 0x9d, 0x84, 0xa7, 0x1a, 0xff, 0xa0, 0x79, 0xdf, 0xfc, 0x65, 0x98, 0x8e, 0x22, 0x0d, 0x62, 0x7e, 0xf2, 0x34, 0x60, 0x83, 0x05, 0x14, 0xb1, 0xc1,
- /* (2^285)P */ 0x64, 0x22, 0xcc, 0xdf, 0x5c, 0xbc, 0x88, 0x68, 0x4c, 0xd9, 0xbc, 0x0e, 0xc9, 0x8b, 0xb4, 0x23, 0x52, 0xad, 0xb0, 0xb3, 0xf1, 0x17, 0xd8, 0x15, 0x04, 0x6b, 0x99, 0xf0, 0xc4, 0x7d, 0x48, 0x22, 0x4a, 0xf8, 0x6f, 0xaa, 0x88, 0x0d, 0xc5, 0x5e, 0xa9, 0x1c, 0x61, 0x3d, 0x95, 0xa9, 0x7b, 0x6a, 0x79, 0x33, 0x0a, 0x2b, 0x99, 0xe3, 0x4e, 0x48,
- /* (2^286)P */ 0x6b, 0x9b, 0x6a, 0x2a, 0xf1, 0x60, 0x31, 0xb4, 0x73, 0xd1, 0x87, 0x45, 0x9c, 0x15, 0x58, 0x4b, 0x91, 0x6d, 0x94, 0x1c, 0x41, 0x11, 0x4a, 0x83, 0xec, 0xaf, 0x65, 0xbc, 0x34, 0xaa, 0x26, 0xe2, 0xaf, 0xed, 0x46, 0x05, 0x4e, 0xdb, 0xc6, 0x4e, 0x10, 0x28, 0x4e, 0x72, 0xe5, 0x31, 0xa3, 0x20, 0xd7, 0xb1, 0x96, 0x64, 0xf6, 0xce, 0x08, 0x08,
- /* (2^287)P */ 0x16, 0xa9, 0x5c, 0x9f, 0x9a, 0xb4, 0xb8, 0xc8, 0x32, 0x78, 0xc0, 0x3a, 0xd9, 0x5f, 0x94, 0xac, 0x3a, 0x42, 0x1f, 0x43, 0xd6, 0x80, 0x47, 0x2c, 0xdc, 0x76, 0x27, 0xfa, 0x50, 0xe5, 0xa1, 0xe4, 0xc3, 0xcb, 0x61, 0x31, 0xe1, 0x2e, 0xde, 0x81, 0x3b, 0x77, 0x1c, 0x39, 0x3c, 0xdb, 0xda, 0x87, 0x4b, 0x84, 0x12, 0xeb, 0xdd, 0x54, 0xbf, 0xe7,
- /* (2^288)P */ 0xbf, 0xcb, 0x73, 0x21, 0x3d, 0x7e, 0x13, 0x8c, 0xa6, 0x34, 0x21, 0x2b, 0xa5, 0xe4, 0x9f, 0x8e, 0x9c, 0x01, 0x9c, 0x43, 0xd9, 0xc7, 0xb9, 0xf1, 0xbe, 0x7f, 0x45, 0x51, 0x97, 0xa1, 0x8e, 0x01, 0xf8, 0xbd, 0xd2, 0xbf, 0x81, 0x3a, 0x8b, 0xab, 0xe4, 0x89, 0xb7, 0xbd, 0xf2, 0xcd, 0xa9, 0x8a, 0x8a, 0xde, 0xfb, 0x8a, 0x55, 0x12, 0x7b, 0x17,
- /* (2^289)P */ 0x1b, 0x95, 0x58, 0x4d, 0xe6, 0x51, 0x31, 0x52, 0x1c, 0xd8, 0x15, 0x84, 0xb1, 0x0d, 0x36, 0x25, 0x88, 0x91, 0x46, 0x71, 0x42, 0x56, 0xe2, 0x90, 0x08, 0x9e, 0x77, 0x1b, 0xee, 0x22, 0x3f, 0xec, 0xee, 0x8c, 0x7b, 0x2e, 0x79, 0xc4, 0x6c, 0x07, 0xa1, 0x7e, 0x52, 0xf5, 0x26, 0x5c, 0x84, 0x2a, 0x50, 0x6e, 0x82, 0xb3, 0x76, 0xda, 0x35, 0x16,
- /* (2^290)P */ 0x0a, 0x6f, 0x99, 0x87, 0xc0, 0x7d, 0x8a, 0xb2, 0xca, 0xae, 0xe8, 0x65, 0x98, 0x0f, 0xb3, 0x44, 0xe1, 0xdc, 0x52, 0x79, 0x75, 0xec, 0x8f, 0x95, 0x87, 0x45, 0xd1, 0x32, 0x18, 0x55, 0x15, 0xce, 0x64, 0x9b, 0x08, 0x4f, 0x2c, 0xea, 0xba, 0x1c, 0x57, 0x06, 0x63, 0xc8, 0xb1, 0xfd, 0xc5, 0x67, 0xe7, 0x1f, 0x87, 0x9e, 0xde, 0x72, 0x7d, 0xec,
- /* (2^291)P */ 0x36, 0x8b, 0x4d, 0x2c, 0xc2, 0x46, 0xe8, 0x96, 0xac, 0x0b, 0x8c, 0xc5, 0x09, 0x10, 0xfc, 0xf2, 0xda, 0xea, 0x22, 0xb2, 0xd3, 0x89, 0xeb, 0xb2, 0x85, 0x0f, 0xff, 0x59, 0x50, 0x2c, 0x99, 0x5a, 0x1f, 0xec, 0x2a, 0x6f, 0xec, 0xcf, 0xe9, 0xce, 0x12, 0x6b, 0x19, 0xd8, 0xde, 0x9b, 0xce, 0x0e, 0x6a, 0xaa, 0xe1, 0x32, 0xea, 0x4c, 0xfe, 0x92,
- /* (2^292)P */ 0x5f, 0x17, 0x70, 0x53, 0x26, 0x03, 0x0b, 0xab, 0xd1, 0xc1, 0x42, 0x0b, 0xab, 0x2b, 0x3d, 0x31, 0xa4, 0xd5, 0x2b, 0x5e, 0x00, 0xd5, 0x9a, 0x22, 0x34, 0xe0, 0x53, 0x3f, 0x59, 0x7f, 0x2c, 0x6d, 0x72, 0x9a, 0xa4, 0xbe, 0x3d, 0x42, 0x05, 0x1b, 0xf2, 0x7f, 0x88, 0x56, 0xd1, 0x7c, 0x7d, 0x6b, 0x9f, 0x43, 0xfe, 0x65, 0x19, 0xae, 0x9c, 0x4c,
- /* (2^293)P */ 0xf3, 0x7c, 0x20, 0xa9, 0xfc, 0xf2, 0xf2, 0x3b, 0x3c, 0x57, 0x41, 0x94, 0xe5, 0xcc, 0x6a, 0x37, 0x5d, 0x09, 0xf2, 0xab, 0xc2, 0xca, 0x60, 0x38, 0x6b, 0x7a, 0xe1, 0x78, 0x2b, 0xc1, 0x1d, 0xe8, 0xfd, 0xbc, 0x3d, 0x5c, 0xa2, 0xdb, 0x49, 0x20, 0x79, 0xe6, 0x1b, 0x9b, 0x65, 0xd9, 0x6d, 0xec, 0x57, 0x1d, 0xd2, 0xe9, 0x90, 0xeb, 0x43, 0x7b,
- /* (2^294)P */ 0x2a, 0x8b, 0x2e, 0x19, 0x18, 0x10, 0xb8, 0x83, 0xe7, 0x7d, 0x2d, 0x9a, 0x3a, 0xe5, 0xd1, 0xe4, 0x7c, 0x38, 0xe5, 0x59, 0x2a, 0x6e, 0xd9, 0x01, 0x29, 0x3d, 0x23, 0xf7, 0x52, 0xba, 0x61, 0x04, 0x9a, 0xde, 0xc4, 0x31, 0x50, 0xeb, 0x1b, 0xaa, 0xde, 0x39, 0x58, 0xd8, 0x1b, 0x1e, 0xfc, 0x57, 0x9a, 0x28, 0x43, 0x9e, 0x97, 0x5e, 0xaa, 0xa3,
- /* (2^295)P */ 0x97, 0x0a, 0x74, 0xc4, 0x39, 0x99, 0x6b, 0x40, 0xc7, 0x3e, 0x8c, 0xa7, 0xb1, 0x4e, 0x9a, 0x59, 0x6e, 0x1c, 0xfe, 0xfc, 0x2a, 0x5e, 0x73, 0x2b, 0x8c, 0xa9, 0x71, 0xf5, 0xda, 0x6b, 0x15, 0xab, 0xf7, 0xbe, 0x2a, 0x44, 0x5f, 0xba, 0xae, 0x67, 0x93, 0xc5, 0x86, 0xc1, 0xb8, 0xdf, 0xdc, 0xcb, 0xd7, 0xff, 0xb1, 0x71, 0x7c, 0x6f, 0x88, 0xf8,
- /* (2^296)P */ 0x3f, 0x89, 0xb1, 0xbf, 0x24, 0x16, 0xac, 0x56, 0xfe, 0xdf, 0x94, 0x71, 0xbf, 0xd6, 0x57, 0x0c, 0xb4, 0x77, 0x37, 0xaa, 0x2a, 0x70, 0x76, 0x49, 0xaf, 0x0c, 0x97, 0x8e, 0x78, 0x2a, 0x67, 0xc9, 0x3b, 0x3d, 0x5b, 0x01, 0x2f, 0xda, 0xd5, 0xa8, 0xde, 0x02, 0xa9, 0xac, 0x76, 0x00, 0x0b, 0x46, 0xc6, 0x2d, 0xdc, 0x08, 0xf4, 0x10, 0x2c, 0xbe,
- /* (2^297)P */ 0xcb, 0x07, 0xf9, 0x91, 0xc6, 0xd5, 0x3e, 0x54, 0x63, 0xae, 0xfc, 0x10, 0xbe, 0x3a, 0x20, 0x73, 0x4e, 0x65, 0x0e, 0x2d, 0x86, 0x77, 0x83, 0x9d, 0xe2, 0x0a, 0xe9, 0xac, 0x22, 0x52, 0x76, 0xd4, 0x6e, 0xfa, 0xe0, 0x09, 0xef, 0x78, 0x82, 0x9f, 0x26, 0xf9, 0x06, 0xb5, 0xe7, 0x05, 0x0e, 0xf2, 0x46, 0x72, 0x93, 0xd3, 0x24, 0xbd, 0x87, 0x60,
- /* (2^298)P */ 0x14, 0x55, 0x84, 0x7b, 0x6c, 0x60, 0x80, 0x73, 0x8c, 0xbe, 0x2d, 0xd6, 0x69, 0xd6, 0x17, 0x26, 0x44, 0x9f, 0x88, 0xa2, 0x39, 0x7c, 0x89, 0xbc, 0x6d, 0x9e, 0x46, 0xb6, 0x68, 0x66, 0xea, 0xdc, 0x31, 0xd6, 0x21, 0x51, 0x9f, 0x28, 0x28, 0xaf, 0x9e, 0x47, 0x2c, 0x4c, 0x8f, 0xf3, 0xaf, 0x1f, 0xe4, 0xab, 0xac, 0xe9, 0x0c, 0x91, 0x3a, 0x61,
- /* (2^299)P */ 0xb0, 0x37, 0x55, 0x4b, 0xe9, 0xc3, 0xb1, 0xce, 0x42, 0xe6, 0xc5, 0x11, 0x7f, 0x2c, 0x11, 0xfc, 0x4e, 0x71, 0x17, 0x00, 0x74, 0x7f, 0xbf, 0x07, 0x4d, 0xfd, 0x40, 0xb2, 0x87, 0xb0, 0xef, 0x1f, 0x35, 0x2c, 0x2d, 0xd7, 0xe1, 0xe4, 0xad, 0x0e, 0x7f, 0x63, 0x66, 0x62, 0x23, 0x41, 0xf6, 0xc1, 0x14, 0xa6, 0xd7, 0xa9, 0x11, 0x56, 0x9d, 0x1b,
- /* (2^300)P */ 0x02, 0x82, 0x42, 0x18, 0x4f, 0x1b, 0xc9, 0x5d, 0x78, 0x5f, 0xee, 0xed, 0x01, 0x49, 0x8f, 0xf2, 0xa0, 0xe2, 0x6e, 0xbb, 0x6b, 0x04, 0x8d, 0xb2, 0x41, 0xae, 0xc8, 0x1b, 0x59, 0x34, 0xb8, 0x2a, 0xdb, 0x1f, 0xd2, 0x52, 0xdf, 0x3f, 0x35, 0x00, 0x8b, 0x61, 0xbc, 0x97, 0xa0, 0xc4, 0x77, 0xd1, 0xe4, 0x2c, 0x59, 0x68, 0xff, 0x30, 0xf2, 0xe2,
- /* (2^301)P */ 0x79, 0x08, 0xb1, 0xdb, 0x55, 0xae, 0xd0, 0xed, 0xda, 0xa0, 0xec, 0x6c, 0xae, 0x68, 0xf2, 0x0b, 0x61, 0xb3, 0xf5, 0x21, 0x69, 0x87, 0x0b, 0x03, 0xea, 0x8a, 0x15, 0xd9, 0x7e, 0xca, 0xf7, 0xcd, 0xf3, 0x33, 0xb3, 0x4c, 0x5b, 0x23, 0x4e, 0x6f, 0x90, 0xad, 0x91, 0x4b, 0x4f, 0x46, 0x37, 0xe5, 0xe8, 0xb7, 0xeb, 0xd5, 0xca, 0x34, 0x4e, 0x23,
- /* (2^302)P */ 0x09, 0x02, 0xdd, 0xfd, 0x70, 0xac, 0x56, 0x80, 0x36, 0x5e, 0x49, 0xd0, 0x3f, 0xc2, 0xe0, 0xba, 0x46, 0x7f, 0x5c, 0xf7, 0xc5, 0xbd, 0xd5, 0x55, 0x7d, 0x3f, 0xd5, 0x7d, 0x06, 0xdf, 0x27, 0x20, 0x4f, 0xe9, 0x30, 0xec, 0x1b, 0xa0, 0x0c, 0xd4, 0x2c, 0xe1, 0x2b, 0x65, 0x73, 0xea, 0x75, 0x35, 0xe8, 0xe6, 0x56, 0xd6, 0x07, 0x15, 0x99, 0xdf,
- /* (2^303)P */ 0x4e, 0x10, 0xb7, 0xd0, 0x63, 0x8c, 0xcf, 0x16, 0x00, 0x7c, 0x58, 0xdf, 0x86, 0xdc, 0x4e, 0xca, 0x9c, 0x40, 0x5a, 0x42, 0xfd, 0xec, 0x98, 0xa4, 0x42, 0x53, 0xae, 0x16, 0x9d, 0xfd, 0x75, 0x5a, 0x12, 0x56, 0x1e, 0xc6, 0x57, 0xcc, 0x79, 0x27, 0x96, 0x00, 0xcf, 0x80, 0x4f, 0x8a, 0x36, 0x5c, 0xbb, 0xe9, 0x12, 0xdb, 0xb6, 0x2b, 0xad, 0x96,
- /* (2^304)P */ 0x92, 0x32, 0x1f, 0xfd, 0xc6, 0x02, 0x94, 0x08, 0x1b, 0x60, 0x6a, 0x9f, 0x8b, 0xd6, 0xc8, 0xad, 0xd5, 0x1b, 0x27, 0x4e, 0xa4, 0x4d, 0x4a, 0x00, 0x10, 0x5f, 0x86, 0x11, 0xf5, 0xe3, 0x14, 0x32, 0x43, 0xee, 0xb9, 0xc7, 0xab, 0xf4, 0x6f, 0xe5, 0x66, 0x0c, 0x06, 0x0d, 0x96, 0x79, 0x28, 0xaf, 0x45, 0x2b, 0x56, 0xbe, 0xe4, 0x4a, 0x52, 0xd6,
- /* (2^305)P */ 0x15, 0x16, 0x69, 0xef, 0x60, 0xca, 0x82, 0x25, 0x0f, 0xc6, 0x30, 0xa0, 0x0a, 0xd1, 0x83, 0x29, 0xcd, 0xb6, 0x89, 0x6c, 0xf5, 0xb2, 0x08, 0x38, 0xe6, 0xca, 0x6b, 0x19, 0x93, 0xc6, 0x5f, 0x75, 0x8e, 0x60, 0x34, 0x23, 0xc4, 0x13, 0x17, 0x69, 0x55, 0xcc, 0x72, 0x9c, 0x2b, 0x6c, 0x80, 0xf4, 0x4b, 0x8b, 0xb6, 0x97, 0x65, 0x07, 0xb6, 0xfb,
- /* (2^306)P */ 0x01, 0x99, 0x74, 0x28, 0xa6, 0x67, 0xa3, 0xe5, 0x25, 0xfb, 0xdf, 0x82, 0x93, 0xe7, 0x35, 0x74, 0xce, 0xe3, 0x15, 0x1c, 0x1d, 0x79, 0x52, 0x84, 0x08, 0x04, 0x2f, 0x5c, 0xb8, 0xcd, 0x7f, 0x89, 0xb0, 0x39, 0x93, 0x63, 0xc9, 0x5d, 0x06, 0x01, 0x59, 0xf7, 0x7e, 0xf1, 0x4c, 0x3d, 0x12, 0x8d, 0x69, 0x1d, 0xb7, 0x21, 0x5e, 0x88, 0x82, 0xa2,
- /* (2^307)P */ 0x8e, 0x69, 0xaf, 0x9a, 0x41, 0x0d, 0x9d, 0xcf, 0x8e, 0x8d, 0x5c, 0x51, 0x6e, 0xde, 0x0e, 0x48, 0x23, 0x89, 0xe5, 0x37, 0x80, 0xd6, 0x9d, 0x72, 0x32, 0x26, 0x38, 0x2d, 0x63, 0xa0, 0xfa, 0xd3, 0x40, 0xc0, 0x8c, 0x68, 0x6f, 0x2b, 0x1e, 0x9a, 0x39, 0x51, 0x78, 0x74, 0x9a, 0x7b, 0x4a, 0x8f, 0x0c, 0xa0, 0x88, 0x60, 0xa5, 0x21, 0xcd, 0xc7,
- /* (2^308)P */ 0x3a, 0x7f, 0x73, 0x14, 0xbf, 0x89, 0x6a, 0x4c, 0x09, 0x5d, 0xf2, 0x93, 0x20, 0x2d, 0xc4, 0x29, 0x86, 0x06, 0x95, 0xab, 0x22, 0x76, 0x4c, 0x54, 0xe1, 0x7e, 0x80, 0x6d, 0xab, 0x29, 0x61, 0x87, 0x77, 0xf6, 0xc0, 0x3e, 0xda, 0xab, 0x65, 0x7e, 0x39, 0x12, 0xa1, 0x6b, 0x42, 0xf7, 0xc5, 0x97, 0x77, 0xec, 0x6f, 0x22, 0xbe, 0x44, 0xc7, 0x03,
- /* (2^309)P */ 0xa5, 0x23, 0x90, 0x41, 0xa3, 0xc5, 0x3e, 0xe0, 0xa5, 0x32, 0x49, 0x1f, 0x39, 0x78, 0xb1, 0xd8, 0x24, 0xea, 0xd4, 0x87, 0x53, 0x42, 0x51, 0xf4, 0xd9, 0x46, 0x25, 0x2f, 0x62, 0xa9, 0x90, 0x9a, 0x4a, 0x25, 0x8a, 0xd2, 0x10, 0xe7, 0x3c, 0xbc, 0x58, 0x8d, 0x16, 0x14, 0x96, 0xa4, 0x6f, 0xf8, 0x12, 0x69, 0x91, 0x73, 0xe2, 0xfa, 0xf4, 0x57,
- /* (2^310)P */ 0x51, 0x45, 0x3f, 0x96, 0xdc, 0x97, 0x38, 0xa6, 0x01, 0x63, 0x09, 0xea, 0xc2, 0x13, 0x30, 0xb0, 0x00, 0xb8, 0x0a, 0xce, 0xd1, 0x8f, 0x3e, 0x69, 0x62, 0x46, 0x33, 0x9c, 0xbf, 0x4b, 0xcb, 0x0c, 0x90, 0x1c, 0x45, 0xcf, 0x37, 0x5b, 0xf7, 0x4b, 0x5e, 0x95, 0xc3, 0x28, 0x9f, 0x08, 0x83, 0x53, 0x74, 0xab, 0x0c, 0xb4, 0xc0, 0xa1, 0xbc, 0x89,
- /* (2^311)P */ 0x06, 0xb1, 0x51, 0x15, 0x65, 0x60, 0x21, 0x17, 0x7a, 0x20, 0x65, 0xee, 0x12, 0x35, 0x4d, 0x46, 0xf4, 0xf8, 0xd0, 0xb1, 0xca, 0x09, 0x30, 0x08, 0x89, 0x23, 0x3b, 0xe7, 0xab, 0x8b, 0x77, 0xa6, 0xad, 0x25, 0xdd, 0xea, 0x3c, 0x7d, 0xa5, 0x24, 0xb3, 0xe8, 0xfa, 0xfb, 0xc9, 0xf2, 0x71, 0xe9, 0xfa, 0xf2, 0xdc, 0x54, 0xdd, 0x55, 0x2e, 0x2f,
- /* (2^312)P */ 0x7f, 0x96, 0x96, 0xfb, 0x52, 0x86, 0xcf, 0xea, 0x62, 0x18, 0xf1, 0x53, 0x1f, 0x61, 0x2a, 0x9f, 0x8c, 0x51, 0xca, 0x2c, 0xde, 0x6d, 0xce, 0xab, 0x58, 0x32, 0x0b, 0x33, 0x9b, 0x99, 0xb4, 0x5c, 0x88, 0x2a, 0x76, 0xcc, 0x3e, 0x54, 0x1e, 0x9d, 0xa2, 0x89, 0xe4, 0x19, 0xba, 0x80, 0xc8, 0x39, 0x32, 0x7f, 0x0f, 0xc7, 0x84, 0xbb, 0x43, 0x56,
- /* (2^313)P */ 0x9b, 0x07, 0xb4, 0x42, 0xa9, 0xa0, 0x78, 0x4f, 0x28, 0x70, 0x2b, 0x7e, 0x61, 0xe0, 0xdd, 0x02, 0x98, 0xfc, 0xed, 0x31, 0x80, 0xf1, 0x15, 0x52, 0x89, 0x23, 0xcd, 0x5d, 0x2b, 0xc5, 0x19, 0x32, 0xfb, 0x70, 0x50, 0x7a, 0x97, 0x6b, 0x42, 0xdb, 0xca, 0xdb, 0xc4, 0x59, 0x99, 0xe0, 0x12, 0x1f, 0x17, 0xba, 0x8b, 0xf0, 0xc4, 0x38, 0x5d, 0x27,
- /* (2^314)P */ 0x29, 0x1d, 0xdc, 0x2b, 0xf6, 0x5b, 0x04, 0x61, 0x36, 0x76, 0xa0, 0x56, 0x36, 0x6e, 0xd7, 0x24, 0x4d, 0xe7, 0xef, 0x44, 0xd2, 0xd5, 0x07, 0xcd, 0xc4, 0x9d, 0x80, 0x48, 0xc3, 0x38, 0xcf, 0xd8, 0xa3, 0xdd, 0xb2, 0x5e, 0xb5, 0x70, 0x15, 0xbb, 0x36, 0x85, 0x8a, 0xd7, 0xfb, 0x56, 0x94, 0x73, 0x9c, 0x81, 0xbe, 0xb1, 0x44, 0x28, 0xf1, 0x37,
- /* (2^315)P */ 0xbf, 0xcf, 0x5c, 0xd2, 0xe2, 0xea, 0xc2, 0xcd, 0x70, 0x7a, 0x9d, 0xcb, 0x81, 0xc1, 0xe9, 0xf1, 0x56, 0x71, 0x52, 0xf7, 0x1b, 0x87, 0xc6, 0xd8, 0xcc, 0xb2, 0x69, 0xf3, 0xb0, 0xbd, 0xba, 0x83, 0x12, 0x26, 0xc4, 0xce, 0x72, 0xde, 0x3b, 0x21, 0x28, 0x9e, 0x5a, 0x94, 0xf5, 0x04, 0xa3, 0xc8, 0x0f, 0x5e, 0xbc, 0x71, 0xf9, 0x0d, 0xce, 0xf5,
- /* (2^316)P */ 0x93, 0x97, 0x00, 0x85, 0xf4, 0xb4, 0x40, 0xec, 0xd9, 0x2b, 0x6c, 0xd6, 0x63, 0x9e, 0x93, 0x0a, 0x5a, 0xf4, 0xa7, 0x9a, 0xe3, 0x3c, 0xf0, 0x55, 0xd1, 0x96, 0x6c, 0xf5, 0x2a, 0xce, 0xd7, 0x95, 0x72, 0xbf, 0xc5, 0x0c, 0xce, 0x79, 0xa2, 0x0a, 0x78, 0xe0, 0x72, 0xd0, 0x66, 0x28, 0x05, 0x75, 0xd3, 0x23, 0x09, 0x91, 0xed, 0x7e, 0xc4, 0xbc,
- /* (2^317)P */ 0x77, 0xc2, 0x9a, 0xf7, 0xa6, 0xe6, 0x18, 0xb4, 0xe7, 0xf6, 0xda, 0xec, 0x44, 0x6d, 0xfb, 0x08, 0xee, 0x65, 0xa8, 0x92, 0x85, 0x1f, 0xba, 0x38, 0x93, 0x20, 0x5c, 0x4d, 0xd2, 0x18, 0x0f, 0x24, 0xbe, 0x1a, 0x96, 0x44, 0x7d, 0xeb, 0xb3, 0xda, 0x95, 0xf4, 0xaf, 0x6c, 0x06, 0x0f, 0x47, 0x37, 0xc8, 0x77, 0x63, 0xe1, 0x29, 0xef, 0xff, 0xa5,
- /* (2^318)P */ 0x16, 0x12, 0xd9, 0x47, 0x90, 0x22, 0x9b, 0x05, 0xf2, 0xa5, 0x9a, 0xae, 0x83, 0x98, 0xb5, 0xac, 0xab, 0x29, 0xaa, 0xdc, 0x5f, 0xde, 0xcd, 0xf7, 0x42, 0xad, 0x3b, 0x96, 0xd6, 0x3e, 0x6e, 0x52, 0x47, 0xb1, 0xab, 0x51, 0xde, 0x49, 0x7c, 0x87, 0x8d, 0x86, 0xe2, 0x70, 0x13, 0x21, 0x51, 0x1c, 0x0c, 0x25, 0xc1, 0xb0, 0xe6, 0x19, 0xcf, 0x12,
- /* (2^319)P */ 0xf0, 0xbc, 0x97, 0x8f, 0x4b, 0x2f, 0xd1, 0x1f, 0x8c, 0x57, 0xed, 0x3c, 0xf4, 0x26, 0x19, 0xbb, 0x60, 0xca, 0x24, 0xc5, 0xd9, 0x97, 0xe2, 0x5f, 0x76, 0x49, 0x39, 0x7e, 0x2d, 0x12, 0x21, 0x98, 0xda, 0xe6, 0xdb, 0xd2, 0xd8, 0x9f, 0x18, 0xd8, 0x83, 0x6c, 0xba, 0x89, 0x8d, 0x29, 0xfa, 0x46, 0x33, 0x8c, 0x28, 0xdf, 0x6a, 0xb3, 0x69, 0x28,
- /* (2^320)P */ 0x86, 0x17, 0xbc, 0xd6, 0x7c, 0xba, 0x1e, 0x83, 0xbb, 0x84, 0xb5, 0x8c, 0xad, 0xdf, 0xa1, 0x24, 0x81, 0x70, 0x40, 0x0f, 0xad, 0xad, 0x3b, 0x23, 0xd0, 0x93, 0xa0, 0x49, 0x5c, 0x4b, 0x51, 0xbe, 0x20, 0x49, 0x4e, 0xda, 0x2d, 0xd3, 0xad, 0x1b, 0x74, 0x08, 0x41, 0xf0, 0xef, 0x19, 0xe9, 0x45, 0x5d, 0x02, 0xae, 0x26, 0x25, 0xd9, 0xd1, 0xc2,
- /* (2^321)P */ 0x48, 0x81, 0x3e, 0xb2, 0x83, 0xf8, 0x4d, 0xb3, 0xd0, 0x4c, 0x75, 0xb3, 0xa0, 0x52, 0x26, 0xf2, 0xaf, 0x5d, 0x36, 0x70, 0x72, 0xd6, 0xb7, 0x88, 0x08, 0x69, 0xbd, 0x15, 0x25, 0xb1, 0x45, 0x1b, 0xb7, 0x0b, 0x5f, 0x71, 0x5d, 0x83, 0x49, 0xb9, 0x84, 0x3b, 0x7c, 0xc1, 0x50, 0x93, 0x05, 0x53, 0xe0, 0x61, 0xea, 0xc1, 0xef, 0xdb, 0x82, 0x97,
- /* (2^322)P */ 0x00, 0xd5, 0xc3, 0x3a, 0x4d, 0x8a, 0x23, 0x7a, 0xef, 0xff, 0x37, 0xef, 0xf3, 0xbc, 0xa9, 0xb6, 0xae, 0xd7, 0x3a, 0x7b, 0xfd, 0x3e, 0x8e, 0x9b, 0xab, 0x44, 0x54, 0x60, 0x28, 0x6c, 0xbf, 0x15, 0x24, 0x4a, 0x56, 0x60, 0x7f, 0xa9, 0x7a, 0x28, 0x59, 0x2c, 0x8a, 0xd1, 0x7d, 0x6b, 0x00, 0xfd, 0xa5, 0xad, 0xbc, 0x19, 0x3f, 0xcb, 0x73, 0xe0,
- /* (2^323)P */ 0xcf, 0x9e, 0x66, 0x06, 0x4d, 0x2b, 0xf5, 0x9c, 0xc2, 0x9d, 0x9e, 0xed, 0x5a, 0x5c, 0x2d, 0x00, 0xbf, 0x29, 0x90, 0x88, 0xe4, 0x5d, 0xfd, 0xe2, 0xf0, 0x38, 0xec, 0x4d, 0x26, 0xea, 0x54, 0xf0, 0x3c, 0x84, 0x10, 0x6a, 0xf9, 0x66, 0x9c, 0xe7, 0x21, 0xfd, 0x0f, 0xc7, 0x13, 0x50, 0x81, 0xb6, 0x50, 0xf9, 0x04, 0x7f, 0xa4, 0x37, 0x85, 0x14,
- /* (2^324)P */ 0xdb, 0x87, 0x49, 0xc7, 0xa8, 0x39, 0x0c, 0x32, 0x98, 0x0c, 0xb9, 0x1a, 0x1b, 0x4d, 0xe0, 0x8a, 0x9a, 0x8e, 0x8f, 0xab, 0x5a, 0x17, 0x3d, 0x04, 0x21, 0xce, 0x3e, 0x2c, 0xf9, 0xa3, 0x97, 0xe4, 0x77, 0x95, 0x0e, 0xb6, 0xa5, 0x15, 0xad, 0x3a, 0x1e, 0x46, 0x53, 0x17, 0x09, 0x83, 0x71, 0x4e, 0x86, 0x38, 0xd5, 0x23, 0x44, 0x16, 0x8d, 0xc8,
- /* (2^325)P */ 0x05, 0x5e, 0x99, 0x08, 0xbb, 0xc3, 0xc0, 0xb7, 0x6c, 0x12, 0xf2, 0xf3, 0xf4, 0x7c, 0x6a, 0x4d, 0x9e, 0xeb, 0x3d, 0xb9, 0x63, 0x94, 0xce, 0x81, 0xd8, 0x11, 0xcb, 0x55, 0x69, 0x4a, 0x20, 0x0b, 0x4c, 0x2e, 0x14, 0xb8, 0xd4, 0x6a, 0x7c, 0xf0, 0xed, 0xfc, 0x8f, 0xef, 0xa0, 0xeb, 0x6c, 0x01, 0xe2, 0xdc, 0x10, 0x22, 0xa2, 0x01, 0x85, 0x64,
- /* (2^326)P */ 0x58, 0xe1, 0x9c, 0x27, 0x55, 0xc6, 0x25, 0xa6, 0x7d, 0x67, 0x88, 0x65, 0x99, 0x6c, 0xcb, 0xdb, 0x27, 0x4f, 0x44, 0x29, 0xf5, 0x4a, 0x23, 0x10, 0xbc, 0x03, 0x3f, 0x36, 0x1e, 0xef, 0xb0, 0xba, 0x75, 0xe8, 0x74, 0x5f, 0x69, 0x3e, 0x26, 0x40, 0xb4, 0x2f, 0xdc, 0x43, 0xbf, 0xa1, 0x8b, 0xbd, 0xca, 0x6e, 0xc1, 0x6e, 0x21, 0x79, 0xa0, 0xd0,
- /* (2^327)P */ 0x78, 0x93, 0x4a, 0x2d, 0x22, 0x6e, 0x6e, 0x7d, 0x74, 0xd2, 0x66, 0x58, 0xce, 0x7b, 0x1d, 0x97, 0xb1, 0xf2, 0xda, 0x1c, 0x79, 0xfb, 0xba, 0xd1, 0xc0, 0xc5, 0x6e, 0xc9, 0x11, 0x89, 0xd2, 0x41, 0x8d, 0x70, 0xb9, 0xcc, 0xea, 0x6a, 0xb3, 0x45, 0xb6, 0x05, 0x2e, 0xf2, 0x17, 0xf1, 0x27, 0xb8, 0xed, 0x06, 0x1f, 0xdb, 0x9d, 0x1f, 0x69, 0x28,
- /* (2^328)P */ 0x93, 0x12, 0xa8, 0x11, 0xe1, 0x92, 0x30, 0x8d, 0xac, 0xe1, 0x1c, 0x60, 0x7c, 0xed, 0x2d, 0x2e, 0xd3, 0x03, 0x5c, 0x9c, 0xc5, 0xbd, 0x64, 0x4a, 0x8c, 0xba, 0x76, 0xfe, 0xc6, 0xc1, 0xea, 0xc2, 0x4f, 0xbe, 0x70, 0x3d, 0x64, 0xcf, 0x8e, 0x18, 0xcb, 0xcd, 0x57, 0xa7, 0xf7, 0x36, 0xa9, 0x6b, 0x3e, 0xb8, 0x69, 0xee, 0x47, 0xa2, 0x7e, 0xb2,
- /* (2^329)P */ 0x96, 0xaf, 0x3a, 0xf5, 0xed, 0xcd, 0xaf, 0xf7, 0x82, 0xaf, 0x59, 0x62, 0x0b, 0x36, 0x85, 0xf9, 0xaf, 0xd6, 0x38, 0xff, 0x87, 0x2e, 0x1d, 0x6c, 0x8b, 0xaf, 0x3b, 0xdf, 0x28, 0xa2, 0xd6, 0x4d, 0x80, 0x92, 0xc3, 0x0f, 0x34, 0xa8, 0xae, 0x69, 0x5d, 0x7b, 0x9d, 0xbc, 0xf5, 0xfd, 0x1d, 0xb1, 0x96, 0x55, 0x86, 0xe1, 0x5c, 0xb6, 0xac, 0xb9,
- /* (2^330)P */ 0x50, 0x9e, 0x37, 0x28, 0x7d, 0xa8, 0x33, 0x63, 0xda, 0x3f, 0x20, 0x98, 0x0e, 0x09, 0xa8, 0x77, 0x3b, 0x7a, 0xfc, 0x16, 0x85, 0x44, 0x64, 0x77, 0x65, 0x68, 0x92, 0x41, 0xc6, 0x1f, 0xdf, 0x27, 0xf9, 0xec, 0xa0, 0x61, 0x22, 0xea, 0x19, 0xe7, 0x75, 0x8b, 0x4e, 0xe5, 0x0f, 0xb7, 0xf7, 0xd2, 0x53, 0xf4, 0xdd, 0x4a, 0xaa, 0x78, 0x40, 0xb7,
- /* (2^331)P */ 0xd4, 0x89, 0xe3, 0x79, 0xba, 0xb6, 0xc3, 0xda, 0xe6, 0x78, 0x65, 0x7d, 0x6e, 0x22, 0x62, 0xb1, 0x3d, 0xea, 0x90, 0x84, 0x30, 0x5e, 0xd4, 0x39, 0x84, 0x78, 0xd9, 0x75, 0xd6, 0xce, 0x2a, 0x11, 0x29, 0x69, 0xa4, 0x5e, 0xaa, 0x2a, 0x98, 0x5a, 0xe5, 0x91, 0x8f, 0xb2, 0xfb, 0xda, 0x97, 0xe8, 0x83, 0x6f, 0x04, 0xb9, 0x5d, 0xaf, 0xe1, 0x9b,
- /* (2^332)P */ 0x8b, 0xe4, 0xe1, 0x48, 0x9c, 0xc4, 0x83, 0x89, 0xdf, 0x65, 0xd3, 0x35, 0x55, 0x13, 0xf4, 0x1f, 0x36, 0x92, 0x33, 0x38, 0xcb, 0xed, 0x15, 0xe6, 0x60, 0x2d, 0x25, 0xf5, 0x36, 0x60, 0x3a, 0x37, 0x9b, 0x71, 0x9d, 0x42, 0xb0, 0x14, 0xc8, 0xba, 0x62, 0xa3, 0x49, 0xb0, 0x88, 0xc1, 0x72, 0x73, 0xdd, 0x62, 0x40, 0xa9, 0x62, 0x88, 0x99, 0xca,
- /* (2^333)P */ 0x47, 0x7b, 0xea, 0xda, 0x46, 0x2f, 0x45, 0xc6, 0xe3, 0xb4, 0x4d, 0x8d, 0xac, 0x0b, 0x54, 0x22, 0x06, 0x31, 0x16, 0x66, 0x3e, 0xe4, 0x38, 0x12, 0xcd, 0xf3, 0xe7, 0x99, 0x37, 0xd9, 0x62, 0x24, 0x4b, 0x05, 0xf2, 0x58, 0xe6, 0x29, 0x4b, 0x0d, 0xf6, 0xc1, 0xba, 0xa0, 0x1e, 0x0f, 0xcb, 0x1f, 0xc6, 0x2b, 0x19, 0xfc, 0x82, 0x01, 0xd0, 0x86,
- /* (2^334)P */ 0xa2, 0xae, 0x77, 0x20, 0xfb, 0xa8, 0x18, 0xb4, 0x61, 0xef, 0xe8, 0x52, 0x79, 0xbb, 0x86, 0x90, 0x5d, 0x2e, 0x76, 0xed, 0x66, 0x60, 0x5d, 0x00, 0xb5, 0xa4, 0x00, 0x40, 0x89, 0xec, 0xd1, 0xd2, 0x0d, 0x26, 0xb9, 0x30, 0xb2, 0xd2, 0xb8, 0xe8, 0x0e, 0x56, 0xf9, 0x67, 0x94, 0x2e, 0x62, 0xe1, 0x79, 0x48, 0x2b, 0xa9, 0xfa, 0xea, 0xdb, 0x28,
- /* (2^335)P */ 0x35, 0xf1, 0xb0, 0x43, 0xbd, 0x27, 0xef, 0x18, 0x44, 0xa2, 0x04, 0xb4, 0x69, 0xa1, 0x97, 0x1f, 0x8c, 0x04, 0x82, 0x9b, 0x00, 0x6d, 0xf8, 0xbf, 0x7d, 0xc1, 0x5b, 0xab, 0xe8, 0xb2, 0x34, 0xbd, 0xaf, 0x7f, 0xb2, 0x0d, 0xf3, 0xed, 0xfc, 0x5b, 0x50, 0xee, 0xe7, 0x4a, 0x20, 0xd9, 0xf5, 0xc6, 0x9a, 0x97, 0x6d, 0x07, 0x2f, 0xb9, 0x31, 0x02,
- /* (2^336)P */ 0xf9, 0x54, 0x4a, 0xc5, 0x61, 0x7e, 0x1d, 0xa6, 0x0e, 0x1a, 0xa8, 0xd3, 0x8c, 0x36, 0x7d, 0xf1, 0x06, 0xb1, 0xac, 0x93, 0xcd, 0xe9, 0x8f, 0x61, 0x6c, 0x5d, 0x03, 0x23, 0xdf, 0x85, 0x53, 0x39, 0x63, 0x5e, 0xeb, 0xf3, 0xd3, 0xd3, 0x75, 0x97, 0x9b, 0x62, 0x9b, 0x01, 0xb3, 0x19, 0xd8, 0x2b, 0x36, 0xf2, 0x2c, 0x2c, 0x6f, 0x36, 0xc6, 0x3c,
- /* (2^337)P */ 0x05, 0x74, 0x43, 0x10, 0xb6, 0xb0, 0xf8, 0xbf, 0x02, 0x46, 0x9a, 0xee, 0xc1, 0xaf, 0xc1, 0xe5, 0x5a, 0x2e, 0xbb, 0xe1, 0xdc, 0xc6, 0xce, 0x51, 0x29, 0x50, 0xbf, 0x1b, 0xde, 0xff, 0xba, 0x4d, 0x8d, 0x8b, 0x7e, 0xe7, 0xbd, 0x5b, 0x8f, 0xbe, 0xe3, 0x75, 0x71, 0xff, 0x37, 0x05, 0x5a, 0x10, 0xeb, 0x54, 0x7e, 0x44, 0x72, 0x2c, 0xd4, 0xfc,
- /* (2^338)P */ 0x03, 0x12, 0x1c, 0xb2, 0x08, 0x90, 0xa1, 0x2d, 0x50, 0xa0, 0xad, 0x7f, 0x8d, 0xa6, 0x97, 0xc1, 0xbd, 0xdc, 0xc3, 0xa7, 0xad, 0x31, 0xdf, 0xb8, 0x03, 0x84, 0xc3, 0xb9, 0x29, 0x3d, 0x92, 0x2e, 0xc3, 0x90, 0x07, 0xe8, 0xa7, 0xc7, 0xbc, 0x61, 0xe9, 0x3e, 0xa0, 0x35, 0xda, 0x1d, 0xab, 0x48, 0xfe, 0x50, 0xc9, 0x25, 0x59, 0x23, 0x69, 0x3f,
- /* (2^339)P */ 0x8e, 0x91, 0xab, 0x6b, 0x91, 0x4f, 0x89, 0x76, 0x67, 0xad, 0xb2, 0x65, 0x9d, 0xad, 0x02, 0x36, 0xdc, 0xac, 0x96, 0x93, 0x97, 0x21, 0x14, 0xd0, 0xe8, 0x11, 0x60, 0x1e, 0xeb, 0x96, 0x06, 0xf2, 0x53, 0xf2, 0x6d, 0xb7, 0x93, 0x6f, 0x26, 0x91, 0x23, 0xe3, 0x34, 0x04, 0x92, 0x91, 0x37, 0x08, 0x50, 0xd6, 0x28, 0x09, 0x27, 0xa1, 0x0c, 0x00,
- /* (2^340)P */ 0x1f, 0xbb, 0x21, 0x26, 0x33, 0xcb, 0xa4, 0xd1, 0xee, 0x85, 0xf9, 0xd9, 0x3c, 0x90, 0xc3, 0xd1, 0x26, 0xa2, 0x25, 0x93, 0x43, 0x61, 0xed, 0x91, 0x6e, 0x54, 0x03, 0x2e, 0x42, 0x9d, 0xf7, 0xa6, 0x02, 0x0f, 0x2f, 0x9c, 0x7a, 0x8d, 0x12, 0xc2, 0x18, 0xfc, 0x41, 0xff, 0x85, 0x26, 0x1a, 0x44, 0x55, 0x0b, 0x89, 0xab, 0x6f, 0x62, 0x33, 0x8c,
- /* (2^341)P */ 0xe0, 0x3c, 0x5d, 0x70, 0x64, 0x87, 0x81, 0x35, 0xf2, 0x37, 0xa6, 0x24, 0x3e, 0xe0, 0x62, 0xd5, 0x71, 0xe7, 0x93, 0xfb, 0xac, 0xc3, 0xe7, 0xc7, 0x04, 0xe2, 0x70, 0xd3, 0x29, 0x5b, 0x21, 0xbf, 0xf4, 0x26, 0x5d, 0xf3, 0x95, 0xb4, 0x2a, 0x6a, 0x07, 0x55, 0xa6, 0x4b, 0x3b, 0x15, 0xf2, 0x25, 0x8a, 0x95, 0x3f, 0x63, 0x2f, 0x7a, 0x23, 0x96,
- /* (2^342)P */ 0x0d, 0x3d, 0xd9, 0x13, 0xa7, 0xb3, 0x5e, 0x67, 0xf7, 0x02, 0x23, 0xee, 0x84, 0xff, 0x99, 0xda, 0xb9, 0x53, 0xf8, 0xf0, 0x0e, 0x39, 0x2f, 0x3c, 0x64, 0x34, 0xe3, 0x09, 0xfd, 0x2b, 0x33, 0xc7, 0xfe, 0x62, 0x2b, 0x84, 0xdf, 0x2b, 0xd2, 0x7c, 0x26, 0x01, 0x70, 0x66, 0x5b, 0x85, 0xc2, 0xbe, 0x88, 0x37, 0xf1, 0x30, 0xac, 0xb8, 0x76, 0xa3,
- /* (2^343)P */ 0x6e, 0x01, 0xf0, 0x55, 0x35, 0xe4, 0xbd, 0x43, 0x62, 0x9d, 0xd6, 0x11, 0xef, 0x6f, 0xb8, 0x8c, 0xaa, 0x98, 0x87, 0xc6, 0x6d, 0xc4, 0xcc, 0x74, 0x92, 0x53, 0x4a, 0xdf, 0xe4, 0x08, 0x89, 0x17, 0xd0, 0x0f, 0xf4, 0x00, 0x60, 0x78, 0x08, 0x44, 0xb5, 0xda, 0x18, 0xed, 0x98, 0xc8, 0x61, 0x3d, 0x39, 0xdb, 0xcf, 0x1d, 0x49, 0x40, 0x65, 0x75,
- /* (2^344)P */ 0x8e, 0x10, 0xae, 0x5f, 0x06, 0xd2, 0x95, 0xfd, 0x20, 0x16, 0x49, 0x5b, 0x57, 0xbe, 0x22, 0x8b, 0x43, 0xfb, 0xe6, 0xcc, 0x26, 0xa5, 0x5d, 0xd3, 0x68, 0xc5, 0xf9, 0x5a, 0x86, 0x24, 0x87, 0x27, 0x05, 0xfd, 0xe2, 0xff, 0xb3, 0xa3, 0x7b, 0x37, 0x59, 0xc5, 0x4e, 0x14, 0x94, 0xf9, 0x3b, 0xcb, 0x7c, 0xed, 0xca, 0x1d, 0xb2, 0xac, 0x05, 0x4a,
- /* (2^345)P */ 0xf4, 0xd1, 0x81, 0xeb, 0x89, 0xbf, 0xfe, 0x1e, 0x41, 0x92, 0x29, 0xee, 0xe1, 0x43, 0xf5, 0x86, 0x1d, 0x2f, 0xbb, 0x1e, 0x84, 0x5d, 0x7b, 0x8d, 0xd5, 0xda, 0xee, 0x1e, 0x8a, 0xd0, 0x27, 0xf2, 0x60, 0x51, 0x59, 0x82, 0xf4, 0x84, 0x2b, 0x5b, 0x14, 0x2d, 0x81, 0x82, 0x3e, 0x2b, 0xb4, 0x6d, 0x51, 0x4f, 0xc5, 0xcb, 0xbf, 0x74, 0xe3, 0xb4,
- /* (2^346)P */ 0x19, 0x2f, 0x22, 0xb3, 0x04, 0x5f, 0x81, 0xca, 0x05, 0x60, 0xb9, 0xaa, 0xee, 0x0e, 0x2f, 0x48, 0x38, 0xf9, 0x91, 0xb4, 0x66, 0xe4, 0x57, 0x28, 0x54, 0x10, 0xe9, 0x61, 0x9d, 0xd4, 0x90, 0x75, 0xb1, 0x39, 0x23, 0xb6, 0xfc, 0x82, 0xe0, 0xfa, 0xbb, 0x5c, 0x6e, 0xc3, 0x44, 0x13, 0x00, 0x83, 0x55, 0x9e, 0x8e, 0x10, 0x61, 0x81, 0x91, 0x04,
- /* (2^347)P */ 0x5f, 0x2a, 0xd7, 0x81, 0xd9, 0x9c, 0xbb, 0x79, 0xbc, 0x62, 0x56, 0x98, 0x03, 0x5a, 0x18, 0x85, 0x2a, 0x9c, 0xd0, 0xfb, 0xd2, 0xb1, 0xaf, 0xef, 0x0d, 0x24, 0xc5, 0xfa, 0x39, 0xbb, 0x6b, 0xed, 0xa4, 0xdf, 0xe4, 0x87, 0xcd, 0x41, 0xd3, 0x72, 0x32, 0xc6, 0x28, 0x21, 0xb1, 0xba, 0x8b, 0xa3, 0x91, 0x79, 0x76, 0x22, 0x25, 0x10, 0x61, 0xd1,
- /* (2^348)P */ 0x73, 0xb5, 0x32, 0x97, 0xdd, 0xeb, 0xdd, 0x22, 0x22, 0xf1, 0x33, 0x3c, 0x77, 0x56, 0x7d, 0x6b, 0x48, 0x2b, 0x05, 0x81, 0x03, 0x03, 0x91, 0x9a, 0xe3, 0x5e, 0xd4, 0xee, 0x3f, 0xf8, 0xbb, 0x50, 0x21, 0x32, 0x4c, 0x4a, 0x58, 0x49, 0xde, 0x0c, 0xde, 0x30, 0x82, 0x3d, 0x92, 0xf0, 0x6c, 0xcc, 0x32, 0x3e, 0xd2, 0x78, 0x8a, 0x6e, 0x2c, 0xd0,
- /* (2^349)P */ 0xf0, 0xf7, 0xa1, 0x0b, 0xc1, 0x74, 0x85, 0xa8, 0xe9, 0xdd, 0x48, 0xa1, 0xc0, 0x16, 0xd8, 0x2b, 0x61, 0x08, 0xc2, 0x2b, 0x30, 0x26, 0x79, 0xce, 0x9e, 0xfd, 0x39, 0xd7, 0x81, 0xa4, 0x63, 0x8c, 0xd5, 0x74, 0xa0, 0x88, 0xfa, 0x03, 0x30, 0xe9, 0x7f, 0x2b, 0xc6, 0x02, 0xc9, 0x5e, 0xe4, 0xd5, 0x4d, 0x92, 0xd0, 0xf6, 0xf2, 0x5b, 0x79, 0x08,
- /* (2^350)P */ 0x34, 0x89, 0x81, 0x43, 0xd1, 0x94, 0x2c, 0x10, 0x54, 0x9b, 0xa0, 0xe5, 0x44, 0xe8, 0xc2, 0x2f, 0x3e, 0x0e, 0x74, 0xae, 0xba, 0xe2, 0xac, 0x85, 0x6b, 0xd3, 0x5c, 0x97, 0xf7, 0x90, 0xf1, 0x12, 0xc0, 0x03, 0xc8, 0x1f, 0x37, 0x72, 0x8c, 0x9b, 0x9c, 0x17, 0x96, 0x9d, 0xc7, 0xbf, 0xa3, 0x3f, 0x44, 0x3d, 0x87, 0x81, 0xbd, 0x81, 0xa6, 0x5f,
- /* (2^351)P */ 0xe4, 0xff, 0x78, 0x62, 0x82, 0x5b, 0x76, 0x58, 0xf5, 0x5b, 0xa6, 0xc4, 0x53, 0x11, 0x3b, 0x7b, 0xaa, 0x67, 0xf8, 0xea, 0x3b, 0x5d, 0x9a, 0x2e, 0x04, 0xeb, 0x4a, 0x24, 0xfb, 0x56, 0xf0, 0xa8, 0xd4, 0x14, 0xed, 0x0f, 0xfd, 0xc5, 0x26, 0x17, 0x2a, 0xf0, 0xb9, 0x13, 0x8c, 0xbd, 0x65, 0x14, 0x24, 0x95, 0x27, 0x12, 0x63, 0x2a, 0x09, 0x18,
- /* (2^352)P */ 0xe1, 0x5c, 0xe7, 0xe0, 0x00, 0x6a, 0x96, 0xf2, 0x49, 0x6a, 0x39, 0xa5, 0xe0, 0x17, 0x79, 0x4a, 0x63, 0x07, 0x62, 0x09, 0x61, 0x1b, 0x6e, 0xa9, 0xb5, 0x62, 0xb7, 0xde, 0xdf, 0x80, 0x4c, 0x5a, 0x99, 0x73, 0x59, 0x9d, 0xfb, 0xb1, 0x5e, 0xbe, 0xb8, 0xb7, 0x63, 0x93, 0xe8, 0xad, 0x5e, 0x1f, 0xae, 0x59, 0x1c, 0xcd, 0xb4, 0xc2, 0xb3, 0x8a,
- /* (2^353)P */ 0x78, 0x53, 0xa1, 0x4c, 0x70, 0x9c, 0x63, 0x7e, 0xb3, 0x12, 0x40, 0x5f, 0xbb, 0x23, 0xa7, 0xf7, 0x77, 0x96, 0x5b, 0x4d, 0x91, 0x10, 0x52, 0x85, 0x9e, 0xa5, 0x38, 0x0b, 0xfd, 0x25, 0x01, 0x4b, 0xfa, 0x4d, 0xd3, 0x3f, 0x78, 0x74, 0x42, 0xff, 0x62, 0x2d, 0x27, 0xdc, 0x9d, 0xd1, 0x29, 0x76, 0x2e, 0x78, 0xb3, 0x35, 0xfa, 0x15, 0xd5, 0x38,
- /* (2^354)P */ 0x8b, 0xc7, 0x43, 0xce, 0xf0, 0x5e, 0xf1, 0x0d, 0x02, 0x38, 0xe8, 0x82, 0xc9, 0x25, 0xad, 0x2d, 0x27, 0xa4, 0x54, 0x18, 0xb2, 0x30, 0x73, 0xa4, 0x41, 0x08, 0xe4, 0x86, 0xe6, 0x8c, 0xe9, 0x2a, 0x34, 0xb3, 0xd6, 0x61, 0x8f, 0x66, 0x26, 0x08, 0xb6, 0x06, 0x33, 0xaa, 0x12, 0xac, 0x72, 0xec, 0x2e, 0x52, 0xa3, 0x25, 0x3e, 0xd7, 0x62, 0xe8,
- /* (2^355)P */ 0xc4, 0xbb, 0x89, 0xc8, 0x40, 0xcc, 0x84, 0xec, 0x4a, 0xd9, 0xc4, 0x55, 0x78, 0x00, 0xcf, 0xd8, 0xe9, 0x24, 0x59, 0xdc, 0x5e, 0xf0, 0x66, 0xa1, 0x83, 0xae, 0x97, 0x18, 0xc5, 0x54, 0x27, 0xa2, 0x21, 0x52, 0x03, 0x31, 0x5b, 0x11, 0x67, 0xf6, 0x12, 0x00, 0x87, 0x2f, 0xff, 0x59, 0x70, 0x8f, 0x6d, 0x71, 0xab, 0xab, 0x24, 0xb8, 0xba, 0x35,
- /* (2^356)P */ 0x69, 0x43, 0xa7, 0x14, 0x06, 0x96, 0xe9, 0xc2, 0xe3, 0x2b, 0x45, 0x22, 0xc0, 0xd0, 0x2f, 0x34, 0xd1, 0x01, 0x99, 0xfc, 0x99, 0x38, 0xa1, 0x25, 0x2e, 0x59, 0x6c, 0x27, 0xc9, 0xeb, 0x7b, 0xdc, 0x4e, 0x26, 0x68, 0xba, 0xfa, 0xec, 0x02, 0x05, 0x64, 0x80, 0x30, 0x20, 0x5c, 0x26, 0x7f, 0xaf, 0x95, 0x17, 0x3d, 0x5c, 0x9e, 0x96, 0x96, 0xaf,
- /* (2^357)P */ 0xa6, 0xba, 0x21, 0x29, 0x32, 0xe2, 0x98, 0xde, 0x9b, 0x6d, 0x0b, 0x44, 0x91, 0xa8, 0x3e, 0xd4, 0xb8, 0x04, 0x6c, 0xf6, 0x04, 0x39, 0xbd, 0x52, 0x05, 0x15, 0x27, 0x78, 0x8e, 0x55, 0xac, 0x79, 0xc5, 0xe6, 0x00, 0x7f, 0x90, 0xa2, 0xdd, 0x07, 0x13, 0xe0, 0x24, 0x70, 0x5c, 0x0f, 0x4d, 0xa9, 0xf9, 0xae, 0xcb, 0x34, 0x10, 0x9d, 0x89, 0x9d,
- /* (2^358)P */ 0x12, 0xe0, 0xb3, 0x9f, 0xc4, 0x96, 0x1d, 0xcf, 0xed, 0x99, 0x64, 0x28, 0x8d, 0xc7, 0x31, 0x82, 0xee, 0x5e, 0x75, 0x48, 0xff, 0x3a, 0xf2, 0x09, 0x34, 0x03, 0x93, 0x52, 0x19, 0xb2, 0xc5, 0x81, 0x93, 0x45, 0x5e, 0x59, 0x21, 0x2b, 0xec, 0x89, 0xba, 0x36, 0x6e, 0xf9, 0x82, 0x75, 0x7e, 0x82, 0x3f, 0xaa, 0xe2, 0xe3, 0x3b, 0x94, 0xfd, 0x98,
- /* (2^359)P */ 0x7c, 0xdb, 0x75, 0x31, 0x61, 0xfb, 0x15, 0x28, 0x94, 0xd7, 0xc3, 0x5a, 0xa9, 0xa1, 0x0a, 0x66, 0x0f, 0x2b, 0x13, 0x3e, 0x42, 0xb5, 0x28, 0x3a, 0xca, 0x83, 0xf3, 0x61, 0x22, 0xf4, 0x40, 0xc5, 0xdf, 0xe7, 0x31, 0x9f, 0x7e, 0x51, 0x75, 0x06, 0x9d, 0x51, 0xc8, 0xe7, 0x9f, 0xc3, 0x71, 0x4f, 0x3d, 0x5b, 0xfb, 0xe9, 0x8e, 0x08, 0x40, 0x8e,
- /* (2^360)P */ 0xf7, 0x31, 0xad, 0x50, 0x5d, 0x25, 0x93, 0x73, 0x68, 0xf6, 0x7c, 0x89, 0x5a, 0x3d, 0x9f, 0x9b, 0x05, 0x82, 0xe7, 0x70, 0x4b, 0x19, 0xaa, 0xcf, 0xff, 0xde, 0x50, 0x8f, 0x2f, 0x69, 0xd3, 0xf0, 0x99, 0x51, 0x6b, 0x9d, 0xb6, 0x56, 0x6f, 0xf8, 0x4c, 0x74, 0x8b, 0x4c, 0x91, 0xf9, 0xa9, 0xb1, 0x3e, 0x07, 0xdf, 0x0b, 0x27, 0x8a, 0xb1, 0xed,
- /* (2^361)P */ 0xfb, 0x67, 0xd9, 0x48, 0xd2, 0xe4, 0x44, 0x9b, 0x43, 0x15, 0x8a, 0xeb, 0x00, 0x53, 0xad, 0x25, 0xc7, 0x7e, 0x19, 0x30, 0x87, 0xb7, 0xd5, 0x5f, 0x04, 0xf8, 0xaa, 0xdd, 0x57, 0xae, 0x34, 0x75, 0xe2, 0x84, 0x4b, 0x54, 0x60, 0x37, 0x95, 0xe4, 0xd3, 0xec, 0xac, 0xef, 0x47, 0x31, 0xa3, 0xc8, 0x31, 0x22, 0xdb, 0x26, 0xe7, 0x6a, 0xb5, 0xad,
- /* (2^362)P */ 0x44, 0x09, 0x5c, 0x95, 0xe4, 0x72, 0x3c, 0x1a, 0xd1, 0xac, 0x42, 0x51, 0x99, 0x6f, 0xfa, 0x1f, 0xf2, 0x22, 0xbe, 0xff, 0x7b, 0x66, 0xf5, 0x6c, 0xb3, 0x66, 0xc7, 0x4d, 0x78, 0x31, 0x83, 0x80, 0xf5, 0x41, 0xe9, 0x7f, 0xbe, 0xf7, 0x23, 0x49, 0x6b, 0x84, 0x4e, 0x7e, 0x47, 0x07, 0x6e, 0x74, 0xdf, 0xe5, 0x9d, 0x9e, 0x56, 0x2a, 0xc0, 0xbc,
- /* (2^363)P */ 0xac, 0x10, 0x80, 0x8c, 0x7c, 0xfa, 0x83, 0xdf, 0xb3, 0xd0, 0xc4, 0xbe, 0xfb, 0x9f, 0xac, 0xc9, 0xc3, 0x40, 0x95, 0x0b, 0x09, 0x23, 0xda, 0x63, 0x67, 0xcf, 0xe7, 0x9f, 0x7d, 0x7b, 0x6b, 0xe2, 0xe6, 0x6d, 0xdb, 0x87, 0x9e, 0xa6, 0xff, 0x6d, 0xab, 0xbd, 0xfb, 0x54, 0x84, 0x68, 0xcf, 0x89, 0xf1, 0xd0, 0xe2, 0x85, 0x61, 0xdc, 0x22, 0xd1,
- /* (2^364)P */ 0xa8, 0x48, 0xfb, 0x8c, 0x6a, 0x63, 0x01, 0x72, 0x43, 0x43, 0xeb, 0x21, 0xa3, 0x00, 0x8a, 0xc0, 0x87, 0x51, 0x9e, 0x86, 0x75, 0x16, 0x79, 0xf9, 0x6b, 0x11, 0x80, 0x62, 0xc2, 0x9d, 0xb8, 0x8c, 0x30, 0x8e, 0x8d, 0x03, 0x52, 0x7e, 0x31, 0x59, 0x38, 0xf9, 0x25, 0xc7, 0x0f, 0xc7, 0xa8, 0x2b, 0x5c, 0x80, 0xfa, 0x90, 0xa2, 0x63, 0xca, 0xe7,
- /* (2^365)P */ 0xf1, 0x5d, 0xb5, 0xd9, 0x20, 0x10, 0x7d, 0x0f, 0xc5, 0x50, 0x46, 0x07, 0xff, 0x02, 0x75, 0x2b, 0x4a, 0xf3, 0x39, 0x91, 0x72, 0xb7, 0xd5, 0xcc, 0x38, 0xb8, 0xe7, 0x36, 0x26, 0x5e, 0x11, 0x97, 0x25, 0xfb, 0x49, 0x68, 0xdc, 0xb4, 0x46, 0x87, 0x5c, 0xc2, 0x7f, 0xaa, 0x7d, 0x36, 0x23, 0xa6, 0xc6, 0x53, 0xec, 0xbc, 0x57, 0x47, 0xc1, 0x2b,
- /* (2^366)P */ 0x25, 0x5d, 0x7d, 0x95, 0xda, 0x0b, 0x8f, 0x78, 0x1e, 0x19, 0x09, 0xfa, 0x67, 0xe0, 0xa0, 0x17, 0x24, 0x76, 0x6c, 0x30, 0x1f, 0x62, 0x3d, 0xbe, 0x45, 0x70, 0xcc, 0xb6, 0x1e, 0x68, 0x06, 0x25, 0x68, 0x16, 0x1a, 0x33, 0x3f, 0x90, 0xc7, 0x78, 0x2d, 0x98, 0x3c, 0x2f, 0xb9, 0x2d, 0x94, 0x0b, 0xfb, 0x49, 0x56, 0x30, 0xd7, 0xc1, 0xe6, 0x48,
- /* (2^367)P */ 0x7a, 0xd1, 0xe0, 0x8e, 0x67, 0xfc, 0x0b, 0x50, 0x1f, 0x84, 0x98, 0xfa, 0xaf, 0xae, 0x2e, 0x31, 0x27, 0xcf, 0x3f, 0xf2, 0x6e, 0x8d, 0x81, 0x8f, 0xd2, 0x5f, 0xde, 0xd3, 0x5e, 0xe9, 0xe7, 0x13, 0x48, 0x83, 0x5a, 0x4e, 0x84, 0xd1, 0x58, 0xcf, 0x6b, 0x84, 0xdf, 0x13, 0x1d, 0x91, 0x85, 0xe8, 0xcb, 0x29, 0x79, 0xd2, 0xca, 0xac, 0x6a, 0x93,
- /* (2^368)P */ 0x53, 0x82, 0xce, 0x61, 0x96, 0x88, 0x6f, 0xe1, 0x4a, 0x4c, 0x1e, 0x30, 0x73, 0xe8, 0x74, 0xde, 0x40, 0x2b, 0xe0, 0xc4, 0xb5, 0xd8, 0x7c, 0x15, 0xe7, 0xe1, 0xb1, 0xe0, 0xd6, 0x88, 0xb1, 0x6a, 0x57, 0x19, 0x6a, 0x22, 0x66, 0x57, 0xf6, 0x8d, 0xfd, 0xc0, 0xf2, 0xa3, 0x03, 0x56, 0xfb, 0x2e, 0x75, 0x5e, 0xc7, 0x8e, 0x22, 0x96, 0x5c, 0x06,
- /* (2^369)P */ 0x98, 0x7e, 0xbf, 0x3e, 0xbf, 0x24, 0x9d, 0x15, 0xd3, 0xf6, 0xd3, 0xd2, 0xf0, 0x11, 0xf2, 0xdb, 0x36, 0x23, 0x38, 0xf7, 0x1d, 0x71, 0x20, 0xd2, 0x54, 0x7f, 0x1e, 0x24, 0x8f, 0xe2, 0xaa, 0xf7, 0x3f, 0x6b, 0x41, 0x4e, 0xdc, 0x0e, 0xec, 0xe8, 0x35, 0x0a, 0x08, 0x6d, 0x89, 0x5b, 0x32, 0x91, 0x01, 0xb6, 0xe0, 0x2c, 0xc6, 0xa1, 0xbe, 0xb4,
- /* (2^370)P */ 0x29, 0xf2, 0x1e, 0x1c, 0xdc, 0x68, 0x8a, 0x43, 0x87, 0x2c, 0x48, 0xb3, 0x9e, 0xed, 0xd2, 0x82, 0x46, 0xac, 0x2f, 0xef, 0x93, 0x34, 0x37, 0xca, 0x64, 0x8d, 0xc9, 0x06, 0x90, 0xbb, 0x78, 0x0a, 0x3c, 0x4c, 0xcf, 0x35, 0x7a, 0x0f, 0xf7, 0xa7, 0xf4, 0x2f, 0x45, 0x69, 0x3f, 0xa9, 0x5d, 0xce, 0x7b, 0x8a, 0x84, 0xc3, 0xae, 0xf4, 0xda, 0xd5,
- /* (2^371)P */ 0xca, 0xba, 0x95, 0x43, 0x05, 0x7b, 0x06, 0xd9, 0x5c, 0x0a, 0x18, 0x5f, 0x6a, 0x6a, 0xce, 0xc0, 0x3d, 0x95, 0x51, 0x0e, 0x1a, 0xbe, 0x85, 0x7a, 0xf2, 0x69, 0xec, 0xc0, 0x8c, 0xca, 0xa3, 0x32, 0x0a, 0x76, 0x50, 0xc6, 0x76, 0x61, 0x00, 0x89, 0xbf, 0x6e, 0x0f, 0x48, 0x90, 0x31, 0x93, 0xec, 0x34, 0x70, 0xf0, 0xc3, 0x8d, 0xf0, 0x0f, 0xb5,
- /* (2^372)P */ 0xbe, 0x23, 0xe2, 0x18, 0x99, 0xf1, 0xed, 0x8a, 0xf6, 0xc9, 0xac, 0xb8, 0x1e, 0x9a, 0x3c, 0x15, 0xae, 0xd7, 0x6d, 0xb3, 0x04, 0xee, 0x5b, 0x0d, 0x1e, 0x79, 0xb7, 0xf9, 0xf9, 0x8d, 0xad, 0xf9, 0x8f, 0x5a, 0x6a, 0x7b, 0xd7, 0x9b, 0xca, 0x62, 0xfe, 0x9c, 0xc0, 0x6f, 0x6d, 0x9d, 0x76, 0xa3, 0x69, 0xb9, 0x4c, 0xa1, 0xc4, 0x0c, 0x76, 0xaa,
- /* (2^373)P */ 0x1c, 0x06, 0xfe, 0x3f, 0x45, 0x70, 0xcd, 0x97, 0xa9, 0xa2, 0xb1, 0xd3, 0xf2, 0xa5, 0x0c, 0x49, 0x2c, 0x75, 0x73, 0x1f, 0xcf, 0x00, 0xaf, 0xd5, 0x2e, 0xde, 0x0d, 0x8f, 0x8f, 0x7c, 0xc4, 0x58, 0xce, 0xd4, 0xf6, 0x24, 0x19, 0x2e, 0xd8, 0xc5, 0x1d, 0x1a, 0x3f, 0xb8, 0x4f, 0xbc, 0x7d, 0xbd, 0x68, 0xe3, 0x81, 0x98, 0x1b, 0xa8, 0xc9, 0xd9,
- /* (2^374)P */ 0x39, 0x95, 0x78, 0x24, 0x6c, 0x38, 0xe4, 0xe7, 0xd0, 0x8d, 0xb9, 0x38, 0x71, 0x5e, 0xc1, 0x62, 0x80, 0xcc, 0xcb, 0x8c, 0x97, 0xca, 0xf8, 0xb9, 0xd9, 0x9c, 0xce, 0x72, 0x7b, 0x70, 0xee, 0x5f, 0xea, 0xa2, 0xdf, 0xa9, 0x14, 0x10, 0xf9, 0x6e, 0x59, 0x9f, 0x9c, 0xe0, 0x0c, 0xb2, 0x07, 0x97, 0xcd, 0xd2, 0x89, 0x16, 0xfd, 0x9c, 0xa8, 0xa5,
- /* (2^375)P */ 0x5a, 0x61, 0xf1, 0x59, 0x7c, 0x38, 0xda, 0xe2, 0x85, 0x99, 0x68, 0xe9, 0xc9, 0xf7, 0x32, 0x7e, 0xc4, 0xca, 0xb7, 0x11, 0x08, 0x69, 0x2b, 0x66, 0x02, 0xf7, 0x2e, 0x18, 0xc3, 0x8e, 0xe1, 0xf9, 0xc5, 0x19, 0x9a, 0x0a, 0x9c, 0x07, 0xba, 0xc7, 0x9c, 0x03, 0x34, 0x89, 0x99, 0x67, 0x0b, 0x16, 0x4b, 0x07, 0x36, 0x16, 0x36, 0x2c, 0xe2, 0xa1,
- /* (2^376)P */ 0x70, 0x10, 0x91, 0x27, 0xa8, 0x24, 0x8e, 0x29, 0x04, 0x6f, 0x79, 0x1f, 0xd3, 0xa5, 0x68, 0xd3, 0x0b, 0x7d, 0x56, 0x4d, 0x14, 0x57, 0x7b, 0x2e, 0x00, 0x9f, 0x9a, 0xfd, 0x6c, 0x63, 0x18, 0x81, 0xdb, 0x9d, 0xb7, 0xd7, 0xa4, 0x1e, 0xe8, 0x40, 0xf1, 0x4c, 0xa3, 0x01, 0xd5, 0x4b, 0x75, 0xea, 0xdd, 0x97, 0xfd, 0x5b, 0xb2, 0x66, 0x6a, 0x24,
- /* (2^377)P */ 0x72, 0x11, 0xfe, 0x73, 0x1b, 0xd3, 0xea, 0x7f, 0x93, 0x15, 0x15, 0x05, 0xfe, 0x40, 0xe8, 0x28, 0xd8, 0x50, 0x47, 0x66, 0xfa, 0xb7, 0xb5, 0x04, 0xba, 0x35, 0x1e, 0x32, 0x9f, 0x5f, 0x32, 0xba, 0x3d, 0xd1, 0xed, 0x9a, 0x76, 0xca, 0xa3, 0x3e, 0x77, 0xd8, 0xd8, 0x7c, 0x5f, 0x68, 0x42, 0xb5, 0x86, 0x7f, 0x3b, 0xc9, 0xc1, 0x89, 0x64, 0xda,
- /* (2^378)P */ 0xd5, 0xd4, 0x17, 0x31, 0xfc, 0x6a, 0xfd, 0xb8, 0xe8, 0xe5, 0x3e, 0x39, 0x06, 0xe4, 0xd1, 0x90, 0x2a, 0xca, 0xf6, 0x54, 0x6c, 0x1b, 0x2f, 0x49, 0x97, 0xb1, 0x2a, 0x82, 0x43, 0x3d, 0x1f, 0x8b, 0xe2, 0x47, 0xc5, 0x24, 0xa8, 0xd5, 0x53, 0x29, 0x7d, 0xc6, 0x87, 0xa6, 0x25, 0x3a, 0x64, 0xdd, 0x71, 0x08, 0x9e, 0xcd, 0xe9, 0x45, 0xc7, 0xba,
- /* (2^379)P */ 0x37, 0x72, 0x6d, 0x13, 0x7a, 0x8d, 0x04, 0x31, 0xe6, 0xe3, 0x9e, 0x36, 0x71, 0x3e, 0xc0, 0x1e, 0xe3, 0x71, 0xd3, 0x49, 0x4e, 0x4a, 0x36, 0x42, 0x68, 0x68, 0x61, 0xc7, 0x3c, 0xdb, 0x81, 0x49, 0xf7, 0x91, 0x4d, 0xea, 0x4c, 0x4f, 0x98, 0xc6, 0x7e, 0x60, 0x84, 0x4b, 0x6a, 0x37, 0xbb, 0x52, 0xf7, 0xce, 0x02, 0xe4, 0xad, 0xd1, 0x3c, 0xa7,
- /* (2^380)P */ 0x51, 0x06, 0x2d, 0xf8, 0x08, 0xe8, 0xf1, 0x0c, 0xe5, 0xa9, 0xac, 0x29, 0x73, 0x3b, 0xed, 0x98, 0x5f, 0x55, 0x08, 0x38, 0x51, 0x44, 0x36, 0x5d, 0xea, 0xc3, 0xb8, 0x0e, 0xa0, 0x4f, 0xd2, 0x79, 0xe9, 0x98, 0xc3, 0xf5, 0x00, 0xb9, 0x26, 0x27, 0x42, 0xa8, 0x07, 0xc1, 0x12, 0x31, 0xc1, 0xc3, 0x3c, 0x3b, 0x7a, 0x72, 0x97, 0xc2, 0x70, 0x3a,
- /* (2^381)P */ 0xf4, 0xb2, 0xba, 0x32, 0xbc, 0xa9, 0x2f, 0x87, 0xc7, 0x3c, 0x45, 0xcd, 0xae, 0xe2, 0x13, 0x6d, 0x3a, 0xf2, 0xf5, 0x66, 0x97, 0x29, 0xaf, 0x53, 0x9f, 0xda, 0xea, 0x14, 0xdf, 0x04, 0x98, 0x19, 0x95, 0x9e, 0x2a, 0x00, 0x5c, 0x9d, 0x1d, 0xf0, 0x39, 0x23, 0xff, 0xfc, 0xca, 0x36, 0xb7, 0xde, 0xdf, 0x37, 0x78, 0x52, 0x21, 0xfa, 0x19, 0x10,
- /* (2^382)P */ 0x50, 0x20, 0x73, 0x74, 0x62, 0x21, 0xf2, 0xf7, 0x9b, 0x66, 0x85, 0x34, 0x74, 0xd4, 0x9d, 0x60, 0xd7, 0xbc, 0xc8, 0x46, 0x3b, 0xb8, 0x80, 0x42, 0x15, 0x0a, 0x6c, 0x35, 0x1a, 0x69, 0xf0, 0x1d, 0x4b, 0x29, 0x54, 0x5a, 0x9a, 0x48, 0xec, 0x9f, 0x37, 0x74, 0x91, 0xd0, 0xd1, 0x9e, 0x00, 0xc2, 0x76, 0x56, 0xd6, 0xa0, 0x15, 0x14, 0x83, 0x59,
- /* (2^383)P */ 0xc2, 0xf8, 0x22, 0x20, 0x23, 0x07, 0xbd, 0x1d, 0x6f, 0x1e, 0x8c, 0x56, 0x06, 0x6a, 0x4b, 0x9f, 0xe2, 0xa9, 0x92, 0x46, 0x4b, 0x46, 0x59, 0xd7, 0xe1, 0xda, 0x14, 0x98, 0x07, 0x65, 0x7e, 0x28, 0x20, 0xf2, 0x9d, 0x4f, 0x36, 0x5c, 0x92, 0xe0, 0x9d, 0xfe, 0x3e, 0xda, 0xe4, 0x47, 0x19, 0x3c, 0x00, 0x7f, 0x22, 0xf2, 0x9e, 0x51, 0xae, 0x4d,
- /* (2^384)P */ 0xbe, 0x8c, 0x1b, 0x10, 0xb6, 0xad, 0xcc, 0xcc, 0xd8, 0x5e, 0x21, 0xa6, 0xfb, 0xf1, 0xf6, 0xbd, 0x0a, 0x24, 0x67, 0xb4, 0x57, 0x7a, 0xbc, 0xe8, 0xe9, 0xff, 0xee, 0x0a, 0x1f, 0xee, 0xbd, 0xc8, 0x44, 0xed, 0x2b, 0xbb, 0x55, 0x1f, 0xdd, 0x7c, 0xb3, 0xeb, 0x3f, 0x63, 0xa1, 0x28, 0x91, 0x21, 0xab, 0x71, 0xc6, 0x4c, 0xd0, 0xe9, 0xb0, 0x21,
- /* (2^385)P */ 0xad, 0xc9, 0x77, 0x2b, 0xee, 0x89, 0xa4, 0x7b, 0xfd, 0xf9, 0xf6, 0x14, 0xe4, 0xed, 0x1a, 0x16, 0x9b, 0x78, 0x41, 0x43, 0xa8, 0x83, 0x72, 0x06, 0x2e, 0x7c, 0xdf, 0xeb, 0x7e, 0xdd, 0xd7, 0x8b, 0xea, 0x9a, 0x2b, 0x03, 0xba, 0x57, 0xf3, 0xf1, 0xd9, 0xe5, 0x09, 0xc5, 0x98, 0x61, 0x1c, 0x51, 0x6d, 0x5d, 0x6e, 0xfb, 0x5e, 0x95, 0x9f, 0xb5,
- /* (2^386)P */ 0x23, 0xe2, 0x1e, 0x95, 0xa3, 0x5e, 0x42, 0x10, 0xc7, 0xc3, 0x70, 0xbf, 0x4b, 0x6b, 0x83, 0x36, 0x93, 0xb7, 0x68, 0x47, 0x88, 0x3a, 0x10, 0x88, 0x48, 0x7f, 0x8c, 0xae, 0x54, 0x10, 0x02, 0xa4, 0x52, 0x8f, 0x8d, 0xf7, 0x26, 0x4f, 0x50, 0xc3, 0x6a, 0xe2, 0x4e, 0x3b, 0x4c, 0xb9, 0x8a, 0x14, 0x15, 0x6d, 0x21, 0x29, 0xb3, 0x6e, 0x4e, 0xd0,
- /* (2^387)P */ 0x4c, 0x8a, 0x18, 0x3f, 0xb7, 0x20, 0xfd, 0x3e, 0x54, 0xca, 0x68, 0x3c, 0xea, 0x6f, 0xf4, 0x6b, 0xa2, 0xbd, 0x01, 0xbd, 0xfe, 0x08, 0xa8, 0xd8, 0xc2, 0x20, 0x36, 0x05, 0xcd, 0xe9, 0xf3, 0x9e, 0xfa, 0x85, 0x66, 0x8f, 0x4b, 0x1d, 0x8c, 0x64, 0x4f, 0xb8, 0xc6, 0x0f, 0x5b, 0x57, 0xd8, 0x24, 0x19, 0x5a, 0x14, 0x4b, 0x92, 0xd3, 0x96, 0xbc,
- /* (2^388)P */ 0xa9, 0x3f, 0xc9, 0x6c, 0xca, 0x64, 0x1e, 0x6f, 0xdf, 0x65, 0x7f, 0x9a, 0x47, 0x6b, 0x8a, 0x60, 0x31, 0xa6, 0x06, 0xac, 0x69, 0x30, 0xe6, 0xea, 0x63, 0x42, 0x26, 0x5f, 0xdb, 0xd0, 0xf2, 0x8e, 0x34, 0x0a, 0x3a, 0xeb, 0xf3, 0x79, 0xc8, 0xb7, 0x60, 0x56, 0x5c, 0x37, 0x95, 0x71, 0xf8, 0x7f, 0x49, 0x3e, 0x9e, 0x01, 0x26, 0x1e, 0x80, 0x9f,
- /* (2^389)P */ 0xf8, 0x16, 0x9a, 0xaa, 0xb0, 0x28, 0xb5, 0x8e, 0xd0, 0x60, 0xe5, 0x26, 0xa9, 0x47, 0xc4, 0x5c, 0xa9, 0x39, 0xfe, 0x0a, 0xd8, 0x07, 0x2b, 0xb3, 0xce, 0xf1, 0xea, 0x1a, 0xf4, 0x7b, 0x98, 0x31, 0x3d, 0x13, 0x29, 0x80, 0xe8, 0x0d, 0xcf, 0x56, 0x39, 0x86, 0x50, 0x0c, 0xb3, 0x18, 0xf4, 0xc5, 0xca, 0xf2, 0x6f, 0xcd, 0x8d, 0xd5, 0x02, 0xb0,
- /* (2^390)P */ 0xbf, 0x39, 0x3f, 0xac, 0x6d, 0x1a, 0x6a, 0xe4, 0x42, 0x24, 0xd6, 0x41, 0x9d, 0xb9, 0x5b, 0x46, 0x73, 0x93, 0x76, 0xaa, 0xb7, 0x37, 0x36, 0xa6, 0x09, 0xe5, 0x04, 0x3b, 0x66, 0xc4, 0x29, 0x3e, 0x41, 0xc2, 0xcb, 0xe5, 0x17, 0xd7, 0x34, 0x67, 0x1d, 0x2c, 0x12, 0xec, 0x24, 0x7a, 0x40, 0xa2, 0x45, 0x41, 0xf0, 0x75, 0xed, 0x43, 0x30, 0xc9,
- /* (2^391)P */ 0x80, 0xf6, 0x47, 0x5b, 0xad, 0x54, 0x02, 0xbc, 0xdd, 0xa4, 0xb2, 0xd7, 0x42, 0x95, 0xf2, 0x0d, 0x1b, 0xef, 0x37, 0xa7, 0xb4, 0x34, 0x04, 0x08, 0x71, 0x1b, 0xd3, 0xdf, 0xa1, 0xf0, 0x2b, 0xfa, 0xc0, 0x1f, 0xf3, 0x44, 0xb5, 0xc6, 0x47, 0x3d, 0x65, 0x67, 0x45, 0x4d, 0x2f, 0xde, 0x52, 0x73, 0xfc, 0x30, 0x01, 0x6b, 0xc1, 0x03, 0xd8, 0xd7,
- /* (2^392)P */ 0x1c, 0x67, 0x55, 0x3e, 0x01, 0x17, 0x0f, 0x3e, 0xe5, 0x34, 0x58, 0xfc, 0xcb, 0x71, 0x24, 0x74, 0x5d, 0x36, 0x1e, 0x89, 0x2a, 0x63, 0xf8, 0xf8, 0x9f, 0x50, 0x9f, 0x32, 0x92, 0x29, 0xd8, 0x1a, 0xec, 0x76, 0x57, 0x6c, 0x67, 0x12, 0x6a, 0x6e, 0xef, 0x97, 0x1f, 0xc3, 0x77, 0x60, 0x3c, 0x22, 0xcb, 0xc7, 0x04, 0x1a, 0x89, 0x2d, 0x10, 0xa6,
- /* (2^393)P */ 0x12, 0xf5, 0xa9, 0x26, 0x16, 0xd9, 0x3c, 0x65, 0x5d, 0x83, 0xab, 0xd1, 0x70, 0x6b, 0x1c, 0xdb, 0xe7, 0x86, 0x0d, 0xfb, 0xe7, 0xf8, 0x2a, 0x58, 0x6e, 0x7a, 0x66, 0x13, 0x53, 0x3a, 0x6f, 0x8d, 0x43, 0x5f, 0x14, 0x23, 0x14, 0xff, 0x3d, 0x52, 0x7f, 0xee, 0xbd, 0x7a, 0x34, 0x8b, 0x35, 0x24, 0xc3, 0x7a, 0xdb, 0xcf, 0x22, 0x74, 0x9a, 0x8f,
- /* (2^394)P */ 0xdb, 0x20, 0xfc, 0xe5, 0x39, 0x4e, 0x7d, 0x78, 0xee, 0x0b, 0xbf, 0x1d, 0x80, 0xd4, 0x05, 0x4f, 0xb9, 0xd7, 0x4e, 0x94, 0x88, 0x9a, 0x50, 0x78, 0x1a, 0x70, 0x8c, 0xcc, 0x25, 0xb6, 0x61, 0x09, 0xdc, 0x7b, 0xea, 0x3f, 0x7f, 0xea, 0x2a, 0x0d, 0x47, 0x1c, 0x8e, 0xa6, 0x5b, 0xd2, 0xa3, 0x61, 0x93, 0x3c, 0x68, 0x9f, 0x8b, 0xea, 0xb0, 0xcb,
- /* (2^395)P */ 0xff, 0x54, 0x02, 0x19, 0xae, 0x8b, 0x4c, 0x2c, 0x3a, 0xe0, 0xe4, 0xac, 0x87, 0xf7, 0x51, 0x45, 0x41, 0x43, 0xdc, 0xaa, 0xcd, 0xcb, 0xdc, 0x40, 0xe3, 0x44, 0x3b, 0x1d, 0x9e, 0x3d, 0xb9, 0x82, 0xcc, 0x7a, 0xc5, 0x12, 0xf8, 0x1e, 0xdd, 0xdb, 0x8d, 0xb0, 0x2a, 0xe8, 0xe6, 0x6c, 0x94, 0x3b, 0xb7, 0x2d, 0xba, 0x79, 0x3b, 0xb5, 0x86, 0xfb,
- /* (2^396)P */ 0x82, 0x88, 0x13, 0xdd, 0x6c, 0xcd, 0x85, 0x2b, 0x90, 0x86, 0xb7, 0xac, 0x16, 0xa6, 0x6e, 0x6a, 0x94, 0xd8, 0x1e, 0x4e, 0x41, 0x0f, 0xce, 0x81, 0x6a, 0xa8, 0x26, 0x56, 0x43, 0x52, 0x52, 0xe6, 0xff, 0x88, 0xcf, 0x47, 0x05, 0x1d, 0xff, 0xf3, 0xa0, 0x10, 0xb2, 0x97, 0x87, 0xeb, 0x47, 0xbb, 0xfa, 0x1f, 0xe8, 0x4c, 0xce, 0xc4, 0xcd, 0x93,
- /* (2^397)P */ 0xf4, 0x11, 0xf5, 0x8d, 0x89, 0x29, 0x79, 0xb3, 0x59, 0x0b, 0x29, 0x7d, 0x9c, 0x12, 0x4a, 0x65, 0x72, 0x3a, 0xf9, 0xec, 0x37, 0x18, 0x86, 0xef, 0x44, 0x07, 0x25, 0x74, 0x76, 0x53, 0xed, 0x51, 0x01, 0xc6, 0x28, 0xc5, 0xc3, 0x4a, 0x0f, 0x99, 0xec, 0xc8, 0x40, 0x5a, 0x83, 0x30, 0x79, 0xa2, 0x3e, 0x63, 0x09, 0x2d, 0x6f, 0x23, 0x54, 0x1c,
- /* (2^398)P */ 0x5c, 0x6f, 0x3b, 0x1c, 0x30, 0x77, 0x7e, 0x87, 0x66, 0x83, 0x2e, 0x7e, 0x85, 0x50, 0xfd, 0xa0, 0x7a, 0xc2, 0xf5, 0x0f, 0xc1, 0x64, 0xe7, 0x0b, 0xbd, 0x59, 0xa7, 0xe7, 0x65, 0x53, 0xc3, 0xf5, 0x55, 0x5b, 0xe1, 0x82, 0x30, 0x5a, 0x61, 0xcd, 0xa0, 0x89, 0x32, 0xdb, 0x87, 0xfc, 0x21, 0x8a, 0xab, 0x6d, 0x82, 0xa8, 0x42, 0x81, 0x4f, 0xf2,
- /* (2^399)P */ 0xb3, 0xeb, 0x88, 0x18, 0xf6, 0x56, 0x96, 0xbf, 0xba, 0x5d, 0x71, 0xa1, 0x5a, 0xd1, 0x04, 0x7b, 0xd5, 0x46, 0x01, 0x74, 0xfe, 0x15, 0x25, 0xb7, 0xff, 0x0c, 0x24, 0x47, 0xac, 0xfd, 0xab, 0x47, 0x32, 0xe1, 0x6a, 0x4e, 0xca, 0xcf, 0x7f, 0xdd, 0xf8, 0xd2, 0x4b, 0x3b, 0xf5, 0x17, 0xba, 0xba, 0x8b, 0xa1, 0xec, 0x28, 0x3f, 0x97, 0xab, 0x2a,
- /* (2^400)P */ 0x51, 0x38, 0xc9, 0x5e, 0xc6, 0xb3, 0x64, 0xf2, 0x24, 0x4d, 0x04, 0x7d, 0xc8, 0x39, 0x0c, 0x4a, 0xc9, 0x73, 0x74, 0x1b, 0x5c, 0xb2, 0xc5, 0x41, 0x62, 0xa0, 0x4c, 0x6d, 0x8d, 0x91, 0x9a, 0x7b, 0x88, 0xab, 0x9c, 0x7e, 0x23, 0xdb, 0x6f, 0xb5, 0x72, 0xd6, 0x47, 0x40, 0xef, 0x22, 0x58, 0x62, 0x19, 0x6c, 0x38, 0xba, 0x5b, 0x00, 0x30, 0x9f,
- /* (2^401)P */ 0x65, 0xbb, 0x3b, 0x9b, 0xe9, 0xae, 0xbf, 0xbe, 0xe4, 0x13, 0x95, 0xf3, 0xe3, 0x77, 0xcb, 0xe4, 0x9a, 0x22, 0xb5, 0x4a, 0x08, 0x9d, 0xb3, 0x9e, 0x27, 0xe0, 0x15, 0x6c, 0x9f, 0x7e, 0x9a, 0x5e, 0x15, 0x45, 0x25, 0x8d, 0x01, 0x0a, 0xd2, 0x2b, 0xbd, 0x48, 0x06, 0x0d, 0x18, 0x97, 0x4b, 0xdc, 0xbc, 0xf0, 0xcd, 0xb2, 0x52, 0x3c, 0xac, 0xf5,
- /* (2^402)P */ 0x3e, 0xed, 0x47, 0x6b, 0x5c, 0xf6, 0x76, 0xd0, 0xe9, 0x15, 0xa3, 0xcb, 0x36, 0x00, 0x21, 0xa3, 0x79, 0x20, 0xa5, 0x3e, 0x88, 0x03, 0xcb, 0x7e, 0x63, 0xbb, 0xed, 0xa9, 0x13, 0x35, 0x16, 0xaf, 0x2e, 0xb4, 0x70, 0x14, 0x93, 0xfb, 0xc4, 0x9b, 0xd8, 0xb1, 0xbe, 0x43, 0xd1, 0x85, 0xb8, 0x97, 0xef, 0xea, 0x88, 0xa1, 0x25, 0x52, 0x62, 0x75,
- /* (2^403)P */ 0x8e, 0x4f, 0xaa, 0x23, 0x62, 0x7e, 0x2b, 0x37, 0x89, 0x00, 0x11, 0x30, 0xc5, 0x33, 0x4a, 0x89, 0x8a, 0xe2, 0xfc, 0x5c, 0x6a, 0x75, 0xe5, 0xf7, 0x02, 0x4a, 0x9b, 0xf7, 0xb5, 0x6a, 0x85, 0x31, 0xd3, 0x5a, 0xcf, 0xc3, 0xf8, 0xde, 0x2f, 0xcf, 0xb5, 0x24, 0xf4, 0xe3, 0xa1, 0xad, 0x42, 0xae, 0x09, 0xb9, 0x2e, 0x04, 0x2d, 0x01, 0x22, 0x3f,
- /* (2^404)P */ 0x41, 0x16, 0xfb, 0x7d, 0x50, 0xfd, 0xb5, 0xba, 0x88, 0x24, 0xba, 0xfd, 0x3d, 0xb2, 0x90, 0x15, 0xb7, 0xfa, 0xa2, 0xe1, 0x4c, 0x7d, 0xb9, 0xc6, 0xff, 0x81, 0x57, 0xb6, 0xc2, 0x9e, 0xcb, 0xc4, 0x35, 0xbd, 0x01, 0xb7, 0xaa, 0xce, 0xd0, 0xe9, 0xb5, 0xd6, 0x72, 0xbf, 0xd2, 0xee, 0xc7, 0xac, 0x94, 0xff, 0x29, 0x57, 0x02, 0x49, 0x09, 0xad,
- /* (2^405)P */ 0x27, 0xa5, 0x78, 0x1b, 0xbf, 0x6b, 0xaf, 0x0b, 0x8c, 0xd9, 0xa8, 0x37, 0xb0, 0x67, 0x18, 0xb6, 0xc7, 0x05, 0x8a, 0x67, 0x03, 0x30, 0x62, 0x6e, 0x56, 0x82, 0xa9, 0x54, 0x3e, 0x0c, 0x4e, 0x07, 0xe1, 0x5a, 0x38, 0xed, 0xfa, 0xc8, 0x55, 0x6b, 0x08, 0xa3, 0x6b, 0x64, 0x2a, 0x15, 0xd6, 0x39, 0x6f, 0x47, 0x99, 0x42, 0x3f, 0x33, 0x84, 0x8f,
- /* (2^406)P */ 0xbc, 0x45, 0x29, 0x81, 0x0e, 0xa4, 0xc5, 0x72, 0x3a, 0x10, 0xe1, 0xc4, 0x1e, 0xda, 0xc3, 0xfe, 0xb0, 0xce, 0xd2, 0x13, 0x34, 0x67, 0x21, 0xc6, 0x7e, 0xf9, 0x8c, 0xff, 0x39, 0x50, 0xae, 0x92, 0x60, 0x35, 0x2f, 0x8b, 0x6e, 0xc9, 0xc1, 0x27, 0x3a, 0x94, 0x66, 0x3e, 0x26, 0x84, 0x93, 0xc8, 0x6c, 0xcf, 0xd2, 0x03, 0xa1, 0x10, 0xcf, 0xb7,
- /* (2^407)P */ 0x64, 0xda, 0x19, 0xf6, 0xc5, 0x73, 0x17, 0x44, 0x88, 0x81, 0x07, 0x0d, 0x34, 0xb2, 0x75, 0xf9, 0xd9, 0xe2, 0xe0, 0x8b, 0x71, 0xcf, 0x72, 0x34, 0x83, 0xb4, 0xce, 0xfc, 0xd7, 0x29, 0x09, 0x5a, 0x98, 0xbf, 0x14, 0xac, 0x77, 0x55, 0x38, 0x47, 0x5b, 0x0f, 0x40, 0x24, 0xe5, 0xa5, 0xa6, 0xac, 0x2d, 0xa6, 0xff, 0x9c, 0x73, 0xfe, 0x5c, 0x7e,
- /* (2^408)P */ 0x1e, 0x33, 0xcc, 0x68, 0xb2, 0xbc, 0x8c, 0x93, 0xaf, 0xcc, 0x38, 0xf8, 0xd9, 0x16, 0x72, 0x50, 0xac, 0xd9, 0xb5, 0x0b, 0x9a, 0xbe, 0x46, 0x7a, 0xf1, 0xee, 0xf1, 0xad, 0xec, 0x5b, 0x59, 0x27, 0x9c, 0x05, 0xa3, 0x87, 0xe0, 0x37, 0x2c, 0x83, 0xce, 0xb3, 0x65, 0x09, 0x8e, 0xc3, 0x9c, 0xbf, 0x6a, 0xa2, 0x00, 0xcc, 0x12, 0x36, 0xc5, 0x95,
- /* (2^409)P */ 0x36, 0x11, 0x02, 0x14, 0x9c, 0x3c, 0xeb, 0x2f, 0x23, 0x5b, 0x6b, 0x2b, 0x08, 0x54, 0x53, 0xac, 0xb2, 0xa3, 0xe0, 0x26, 0x62, 0x3c, 0xe4, 0xe1, 0x81, 0xee, 0x13, 0x3e, 0xa4, 0x97, 0xef, 0xf9, 0x92, 0x27, 0x01, 0xce, 0x54, 0x8b, 0x3e, 0x31, 0xbe, 0xa7, 0x88, 0xcf, 0x47, 0x99, 0x3c, 0x10, 0x6f, 0x60, 0xb3, 0x06, 0x4e, 0xee, 0x1b, 0xf0,
- /* (2^410)P */ 0x59, 0x49, 0x66, 0xcf, 0x22, 0xe6, 0xf6, 0x73, 0xfe, 0xa3, 0x1c, 0x09, 0xfa, 0x5f, 0x65, 0xa8, 0xf0, 0x82, 0xc2, 0xef, 0x16, 0x63, 0x6e, 0x79, 0x69, 0x51, 0x39, 0x07, 0x65, 0xc4, 0x81, 0xec, 0x73, 0x0f, 0x15, 0x93, 0xe1, 0x30, 0x33, 0xe9, 0x37, 0x86, 0x42, 0x4c, 0x1f, 0x9b, 0xad, 0xee, 0x3f, 0xf1, 0x2a, 0x8e, 0x6a, 0xa3, 0xc8, 0x35,
- /* (2^411)P */ 0x1e, 0x49, 0xf1, 0xdd, 0xd2, 0x9c, 0x8e, 0x78, 0xb2, 0x06, 0xe4, 0x6a, 0xab, 0x3a, 0xdc, 0xcd, 0xf4, 0xeb, 0xe1, 0xe7, 0x2f, 0xaa, 0xeb, 0x40, 0x31, 0x9f, 0xb9, 0xab, 0x13, 0xa9, 0x78, 0xbf, 0x38, 0x89, 0x0e, 0x85, 0x14, 0x8b, 0x46, 0x76, 0x14, 0xda, 0xcf, 0x33, 0xc8, 0x79, 0xd3, 0xd5, 0xa3, 0x6a, 0x69, 0x45, 0x70, 0x34, 0xc3, 0xe9,
- /* (2^412)P */ 0x5e, 0xe7, 0x78, 0xe9, 0x24, 0xcc, 0xe9, 0xf4, 0xc8, 0x6b, 0xe0, 0xfb, 0x3a, 0xbe, 0xcc, 0x42, 0x4a, 0x00, 0x22, 0xf8, 0xe6, 0x32, 0xbe, 0x6d, 0x18, 0x55, 0x60, 0xe9, 0x72, 0x69, 0x50, 0x56, 0xca, 0x04, 0x18, 0x38, 0xa1, 0xee, 0xd8, 0x38, 0x3c, 0xa7, 0x70, 0xe2, 0xb9, 0x4c, 0xa0, 0xc8, 0x89, 0x72, 0xcf, 0x49, 0x7f, 0xdf, 0xbc, 0x67,
- /* (2^413)P */ 0x1d, 0x17, 0xcb, 0x0b, 0xbd, 0xb2, 0x36, 0xe3, 0xa8, 0x99, 0x31, 0xb6, 0x26, 0x9c, 0x0c, 0x74, 0xaf, 0x4d, 0x24, 0x61, 0xcf, 0x31, 0x7b, 0xed, 0xdd, 0xc3, 0xf6, 0x32, 0x70, 0xfe, 0x17, 0xf6, 0x51, 0x37, 0x65, 0xce, 0x5d, 0xaf, 0xa5, 0x2f, 0x2a, 0xfe, 0x00, 0x71, 0x7c, 0x50, 0xbe, 0x21, 0xc7, 0xed, 0xc6, 0xfc, 0x67, 0xcf, 0x9c, 0xdd,
- /* (2^414)P */ 0x26, 0x3e, 0xf8, 0xbb, 0xd0, 0xb1, 0x01, 0xd8, 0xeb, 0x0b, 0x62, 0x87, 0x35, 0x4c, 0xde, 0xca, 0x99, 0x9c, 0x6d, 0xf7, 0xb6, 0xf0, 0x57, 0x0a, 0x52, 0x29, 0x6a, 0x3f, 0x26, 0x31, 0x04, 0x07, 0x2a, 0xc9, 0xfa, 0x9b, 0x0e, 0x62, 0x8e, 0x72, 0xf2, 0xad, 0xce, 0xb6, 0x35, 0x7a, 0xc1, 0xae, 0x35, 0xc7, 0xa3, 0x14, 0xcf, 0x0c, 0x28, 0xb7,
- /* (2^415)P */ 0xa6, 0xf1, 0x32, 0x3a, 0x20, 0xd2, 0x24, 0x97, 0xcf, 0x5d, 0x37, 0x99, 0xaf, 0x33, 0x7a, 0x5b, 0x7a, 0xcc, 0x4e, 0x41, 0x38, 0xb1, 0x4e, 0xad, 0xc9, 0xd9, 0x71, 0x7e, 0xb2, 0xf5, 0xd5, 0x01, 0x6c, 0x4d, 0xfd, 0xa1, 0xda, 0x03, 0x38, 0x9b, 0x3d, 0x92, 0x92, 0xf2, 0xca, 0xbf, 0x1f, 0x24, 0xa4, 0xbb, 0x30, 0x6a, 0x74, 0x56, 0xc8, 0xce,
- /* (2^416)P */ 0x27, 0xf4, 0xed, 0xc9, 0xc3, 0xb1, 0x79, 0x85, 0xbe, 0xf6, 0xeb, 0xf3, 0x55, 0xc7, 0xaa, 0xa6, 0xe9, 0x07, 0x5d, 0xf4, 0xeb, 0xa6, 0x81, 0xe3, 0x0e, 0xcf, 0xa3, 0xc1, 0xef, 0xe7, 0x34, 0xb2, 0x03, 0x73, 0x8a, 0x91, 0xf1, 0xad, 0x05, 0xc7, 0x0b, 0x43, 0x99, 0x12, 0x31, 0xc8, 0xc7, 0xc5, 0xa4, 0x3d, 0xcd, 0xe5, 0x4e, 0x6d, 0x24, 0xdd,
- /* (2^417)P */ 0x61, 0x54, 0xd0, 0x95, 0x2c, 0x45, 0x75, 0xac, 0xb5, 0x1a, 0x9d, 0x11, 0xeb, 0xed, 0x6b, 0x57, 0xa3, 0xe6, 0xcd, 0x77, 0xd4, 0x83, 0x8e, 0x39, 0xf1, 0x0f, 0x98, 0xcb, 0x40, 0x02, 0x6e, 0x10, 0x82, 0x9e, 0xb4, 0x93, 0x76, 0xd7, 0x97, 0xa3, 0x53, 0x12, 0x86, 0xc6, 0x15, 0x78, 0x73, 0x93, 0xe7, 0x7f, 0xcf, 0x1f, 0xbf, 0xcd, 0xd2, 0x7a,
- /* (2^418)P */ 0xc2, 0x21, 0xdc, 0xd5, 0x69, 0xff, 0xca, 0x49, 0x3a, 0xe1, 0xc3, 0x69, 0x41, 0x56, 0xc1, 0x76, 0x63, 0x24, 0xbd, 0x64, 0x1b, 0x3d, 0x92, 0xf9, 0x13, 0x04, 0x25, 0xeb, 0x27, 0xa6, 0xef, 0x39, 0x3a, 0x80, 0xe0, 0xf8, 0x27, 0xee, 0xc9, 0x49, 0x77, 0xef, 0x3f, 0x29, 0x3d, 0x5e, 0xe6, 0x66, 0x83, 0xd1, 0xf6, 0xfe, 0x9d, 0xbc, 0xf1, 0x96,
- /* (2^419)P */ 0x6b, 0xc6, 0x99, 0x26, 0x3c, 0xf3, 0x63, 0xf9, 0xc7, 0x29, 0x8c, 0x52, 0x62, 0x2d, 0xdc, 0x8a, 0x66, 0xce, 0x2c, 0xa7, 0xe4, 0xf0, 0xd7, 0x37, 0x17, 0x1e, 0xe4, 0xa3, 0x53, 0x7b, 0x29, 0x8e, 0x60, 0x99, 0xf9, 0x0c, 0x7c, 0x6f, 0xa2, 0xcc, 0x9f, 0x80, 0xdd, 0x5e, 0x46, 0xaa, 0x0d, 0x6c, 0xc9, 0x6c, 0xf7, 0x78, 0x5b, 0x38, 0xe3, 0x24,
- /* (2^420)P */ 0x4b, 0x75, 0x6a, 0x2f, 0x08, 0xe1, 0x72, 0x76, 0xab, 0x82, 0x96, 0xdf, 0x3b, 0x1f, 0x9b, 0xd8, 0xed, 0xdb, 0xcd, 0x15, 0x09, 0x5a, 0x1e, 0xb7, 0xc5, 0x26, 0x72, 0x07, 0x0c, 0x50, 0xcd, 0x3b, 0x4d, 0x3f, 0xa2, 0x67, 0xc2, 0x02, 0x61, 0x2e, 0x68, 0xe9, 0x6f, 0xf0, 0x21, 0x2a, 0xa7, 0x3b, 0x88, 0x04, 0x11, 0x64, 0x49, 0x0d, 0xb4, 0x46,
- /* (2^421)P */ 0x63, 0x85, 0xf3, 0xc5, 0x2b, 0x5a, 0x9f, 0xf0, 0x17, 0xcb, 0x45, 0x0a, 0xf3, 0x6e, 0x7e, 0xb0, 0x7c, 0xbc, 0xf0, 0x4f, 0x3a, 0xb0, 0xbc, 0x36, 0x36, 0x52, 0x51, 0xcb, 0xfe, 0x9a, 0xcb, 0xe8, 0x7e, 0x4b, 0x06, 0x7f, 0xaa, 0x35, 0xc8, 0x0e, 0x7a, 0x30, 0xa3, 0xb1, 0x09, 0xbb, 0x86, 0x4c, 0xbe, 0xb8, 0xbd, 0xe0, 0x32, 0xa5, 0xd4, 0xf7,
- /* (2^422)P */ 0x7d, 0x50, 0x37, 0x68, 0x4e, 0x22, 0xb2, 0x2c, 0xd5, 0x0f, 0x2b, 0x6d, 0xb1, 0x51, 0xf2, 0x82, 0xe9, 0x98, 0x7c, 0x50, 0xc7, 0x96, 0x7e, 0x0e, 0xdc, 0xb1, 0x0e, 0xb2, 0x63, 0x8c, 0x30, 0x37, 0x72, 0x21, 0x9c, 0x61, 0xc2, 0xa7, 0x33, 0xd9, 0xb2, 0x63, 0x93, 0xd1, 0x6b, 0x6a, 0x73, 0xa5, 0x58, 0x80, 0xff, 0x04, 0xc7, 0x83, 0x21, 0x29,
- /* (2^423)P */ 0x29, 0x04, 0xbc, 0x99, 0x39, 0xc9, 0x58, 0xc9, 0x6b, 0x17, 0xe8, 0x90, 0xb3, 0xe6, 0xa9, 0xb6, 0x28, 0x9b, 0xcb, 0x3b, 0x28, 0x90, 0x68, 0x71, 0xff, 0xcf, 0x08, 0x78, 0xc9, 0x8d, 0xa8, 0x4e, 0x43, 0xd1, 0x1c, 0x9e, 0xa4, 0xe3, 0xdf, 0xbf, 0x92, 0xf4, 0xf9, 0x41, 0xba, 0x4d, 0x1c, 0xf9, 0xdd, 0x74, 0x76, 0x1c, 0x6e, 0x3e, 0x94, 0x87,
- /* (2^424)P */ 0xe4, 0xda, 0xc5, 0xd7, 0xfb, 0x87, 0xc5, 0x4d, 0x6b, 0x19, 0xaa, 0xb9, 0xbc, 0x8c, 0xf2, 0x8a, 0xd8, 0x5d, 0xdb, 0x4d, 0xef, 0xa6, 0xf2, 0x65, 0xf1, 0x22, 0x9c, 0xf1, 0x46, 0x30, 0x71, 0x7c, 0xe4, 0x53, 0x8e, 0x55, 0x2e, 0x9c, 0x9a, 0x31, 0x2a, 0xc3, 0xab, 0x0f, 0xde, 0xe4, 0xbe, 0xd8, 0x96, 0x50, 0x6e, 0x0c, 0x54, 0x49, 0xe6, 0xec,
- /* (2^425)P */ 0x3c, 0x1d, 0x5a, 0xa5, 0xda, 0xad, 0xdd, 0xc2, 0xae, 0xac, 0x6f, 0x86, 0x75, 0x31, 0x91, 0x64, 0x45, 0x9d, 0xa4, 0xf0, 0x81, 0xf1, 0x0e, 0xba, 0x74, 0xaf, 0x7b, 0xcd, 0x6f, 0xfe, 0xac, 0x4e, 0xdb, 0x4e, 0x45, 0x35, 0x36, 0xc5, 0xc0, 0x6c, 0x3d, 0x64, 0xf4, 0xd8, 0x07, 0x62, 0xd1, 0xec, 0xf3, 0xfc, 0x93, 0xc9, 0x28, 0x0c, 0x2c, 0xf3,
- /* (2^426)P */ 0x0c, 0x69, 0x2b, 0x5c, 0xb6, 0x41, 0x69, 0xf1, 0xa4, 0xf1, 0x5b, 0x75, 0x4c, 0x42, 0x8b, 0x47, 0xeb, 0x69, 0xfb, 0xa8, 0xe6, 0xf9, 0x7b, 0x48, 0x50, 0xaf, 0xd3, 0xda, 0xb2, 0x35, 0x10, 0xb5, 0x5b, 0x40, 0x90, 0x39, 0xc9, 0x07, 0x06, 0x73, 0x26, 0x20, 0x95, 0x01, 0xa4, 0x2d, 0xf0, 0xe7, 0x2e, 0x00, 0x7d, 0x41, 0x09, 0x68, 0x13, 0xc4,
- /* (2^427)P */ 0xbe, 0x38, 0x78, 0xcf, 0xc9, 0x4f, 0x36, 0xca, 0x09, 0x61, 0x31, 0x3c, 0x57, 0x2e, 0xec, 0x17, 0xa4, 0x7d, 0x19, 0x2b, 0x9b, 0x5b, 0xbe, 0x8f, 0xd6, 0xc5, 0x2f, 0x86, 0xf2, 0x64, 0x76, 0x17, 0x00, 0x6e, 0x1a, 0x8c, 0x67, 0x1b, 0x68, 0xeb, 0x15, 0xa2, 0xd6, 0x09, 0x91, 0xdd, 0x23, 0x0d, 0x98, 0xb2, 0x10, 0x19, 0x55, 0x9b, 0x63, 0xf2,
- /* (2^428)P */ 0x51, 0x1f, 0x93, 0xea, 0x2a, 0x3a, 0xfa, 0x41, 0xc0, 0x57, 0xfb, 0x74, 0xa6, 0x65, 0x09, 0x56, 0x14, 0xb6, 0x12, 0xaa, 0xb3, 0x1a, 0x8d, 0x3b, 0x76, 0x91, 0x7a, 0x23, 0x56, 0x9c, 0x6a, 0xc0, 0xe0, 0x3c, 0x3f, 0xb5, 0x1a, 0xf4, 0x57, 0x71, 0x93, 0x2b, 0xb1, 0xa7, 0x70, 0x57, 0x22, 0x80, 0xf5, 0xb8, 0x07, 0x77, 0x87, 0x0c, 0xbe, 0x83,
- /* (2^429)P */ 0x07, 0x9b, 0x0e, 0x52, 0x38, 0x63, 0x13, 0x86, 0x6a, 0xa6, 0xb4, 0xd2, 0x60, 0x68, 0x9a, 0x99, 0x82, 0x0a, 0x04, 0x5f, 0x89, 0x7a, 0x1a, 0x2a, 0xae, 0x2d, 0x35, 0x0c, 0x1e, 0xad, 0xef, 0x4f, 0x9a, 0xfc, 0xc8, 0xd9, 0xcf, 0x9d, 0x48, 0x71, 0xa5, 0x55, 0x79, 0x73, 0x39, 0x1b, 0xd8, 0x73, 0xec, 0x9b, 0x03, 0x16, 0xd8, 0x82, 0xf7, 0x67,
- /* (2^430)P */ 0x52, 0x67, 0x42, 0x21, 0xc9, 0x40, 0x78, 0x82, 0x2b, 0x95, 0x2d, 0x20, 0x92, 0xd1, 0xe2, 0x61, 0x25, 0xb0, 0xc6, 0x9c, 0x20, 0x59, 0x8e, 0x28, 0x6f, 0xf3, 0xfd, 0xd3, 0xc1, 0x32, 0x43, 0xc9, 0xa6, 0x08, 0x7a, 0x77, 0x9c, 0x4c, 0x8c, 0x33, 0x71, 0x13, 0x69, 0xe3, 0x52, 0x30, 0xa7, 0xf5, 0x07, 0x67, 0xac, 0xad, 0x46, 0x8a, 0x26, 0x25,
- /* (2^431)P */ 0xda, 0x86, 0xc4, 0xa2, 0x71, 0x56, 0xdd, 0xd2, 0x48, 0xd3, 0xde, 0x42, 0x63, 0x01, 0xa7, 0x2c, 0x92, 0x83, 0x6f, 0x2e, 0xd8, 0x1e, 0x3f, 0xc1, 0xc5, 0x42, 0x4e, 0x34, 0x19, 0x54, 0x6e, 0x35, 0x2c, 0x51, 0x2e, 0xfd, 0x0f, 0x9a, 0x45, 0x66, 0x5e, 0x4a, 0x83, 0xda, 0x0a, 0x53, 0x68, 0x63, 0xfa, 0xce, 0x47, 0x20, 0xd3, 0x34, 0xba, 0x0d,
- /* (2^432)P */ 0xd0, 0xe9, 0x64, 0xa4, 0x61, 0x4b, 0x86, 0xe5, 0x93, 0x6f, 0xda, 0x0e, 0x31, 0x7e, 0x6e, 0xe3, 0xc6, 0x73, 0xd8, 0xa3, 0x08, 0x57, 0x52, 0xcd, 0x51, 0x63, 0x1d, 0x9f, 0x93, 0x00, 0x62, 0x91, 0x26, 0x21, 0xa7, 0xdd, 0x25, 0x0f, 0x09, 0x0d, 0x35, 0xad, 0xcf, 0x11, 0x8e, 0x6e, 0xe8, 0xae, 0x1d, 0x95, 0xcb, 0x88, 0xf8, 0x70, 0x7b, 0x91,
- /* (2^433)P */ 0x0c, 0x19, 0x5c, 0xd9, 0x8d, 0xda, 0x9d, 0x2c, 0x90, 0x54, 0x65, 0xe8, 0xb6, 0x35, 0x50, 0xae, 0xea, 0xae, 0x43, 0xb7, 0x1e, 0x99, 0x8b, 0x4c, 0x36, 0x4e, 0xe4, 0x1e, 0xc4, 0x64, 0x43, 0xb6, 0xeb, 0xd4, 0xe9, 0x60, 0x22, 0xee, 0xcf, 0xb8, 0x52, 0x1b, 0xf0, 0x04, 0xce, 0xbc, 0x2b, 0xf0, 0xbe, 0xcd, 0x44, 0x74, 0x1e, 0x1f, 0x63, 0xf9,
- /* (2^434)P */ 0xe1, 0x3f, 0x95, 0x94, 0xb2, 0xb6, 0x31, 0xa9, 0x1b, 0xdb, 0xfd, 0x0e, 0xdb, 0xdd, 0x1a, 0x22, 0x78, 0x60, 0x9f, 0x75, 0x5f, 0x93, 0x06, 0x0c, 0xd8, 0xbb, 0xa2, 0x85, 0x2b, 0x5e, 0xc0, 0x9b, 0xa8, 0x5d, 0xaf, 0x93, 0x91, 0x91, 0x47, 0x41, 0x1a, 0xfc, 0xb4, 0x51, 0x85, 0xad, 0x69, 0x4d, 0x73, 0x69, 0xd5, 0x4e, 0x82, 0xfb, 0x66, 0xcb,
- /* (2^435)P */ 0x7c, 0xbe, 0xc7, 0x51, 0xc4, 0x74, 0x6e, 0xab, 0xfd, 0x41, 0x4f, 0x76, 0x4f, 0x24, 0x03, 0xd6, 0x2a, 0xb7, 0x42, 0xb4, 0xda, 0x41, 0x2c, 0x82, 0x48, 0x4c, 0x7f, 0x6f, 0x25, 0x5d, 0x36, 0xd4, 0x69, 0xf5, 0xef, 0x02, 0x81, 0xea, 0x6f, 0x19, 0x69, 0xe8, 0x6f, 0x5b, 0x2f, 0x14, 0x0e, 0x6f, 0x89, 0xb4, 0xb5, 0xd8, 0xae, 0xef, 0x7b, 0x87,
- /* (2^436)P */ 0xe9, 0x91, 0xa0, 0x8b, 0xc9, 0xe0, 0x01, 0x90, 0x37, 0xc1, 0x6f, 0xdc, 0x5e, 0xf7, 0xbf, 0x43, 0x00, 0xaa, 0x10, 0x76, 0x76, 0x18, 0x6e, 0x19, 0x1e, 0x94, 0x50, 0x11, 0x0a, 0xd1, 0xe2, 0xdb, 0x08, 0x21, 0xa0, 0x1f, 0xdb, 0x54, 0xfe, 0xea, 0x6e, 0xa3, 0x68, 0x56, 0x87, 0x0b, 0x22, 0x4e, 0x66, 0xf3, 0x82, 0x82, 0x00, 0xcd, 0xd4, 0x12,
- /* (2^437)P */ 0x25, 0x8e, 0x24, 0x77, 0x64, 0x4c, 0xe0, 0xf8, 0x18, 0xc0, 0xdc, 0xc7, 0x1b, 0x35, 0x65, 0xde, 0x67, 0x41, 0x5e, 0x6f, 0x90, 0x82, 0xa7, 0x2e, 0x6d, 0xf1, 0x47, 0xb4, 0x92, 0x9c, 0xfd, 0x6a, 0x9a, 0x41, 0x36, 0x20, 0x24, 0x58, 0xc3, 0x59, 0x07, 0x9a, 0xfa, 0x9f, 0x03, 0xcb, 0xc7, 0x69, 0x37, 0x60, 0xe1, 0xab, 0x13, 0x72, 0xee, 0xa2,
- /* (2^438)P */ 0x74, 0x78, 0xfb, 0x13, 0xcb, 0x8e, 0x37, 0x1a, 0xf6, 0x1d, 0x17, 0x83, 0x06, 0xd4, 0x27, 0x06, 0x21, 0xe8, 0xda, 0xdf, 0x6b, 0xf3, 0x83, 0x6b, 0x34, 0x8a, 0x8c, 0xee, 0x01, 0x05, 0x5b, 0xed, 0xd3, 0x1b, 0xc9, 0x64, 0x83, 0xc9, 0x49, 0xc2, 0x57, 0x1b, 0xdd, 0xcf, 0xf1, 0x9d, 0x63, 0xee, 0x1c, 0x0d, 0xa0, 0x0a, 0x73, 0x1f, 0x5b, 0x32,
- /* (2^439)P */ 0x29, 0xce, 0x1e, 0xc0, 0x6a, 0xf5, 0xeb, 0x99, 0x5a, 0x39, 0x23, 0xe9, 0xdd, 0xac, 0x44, 0x88, 0xbc, 0x80, 0x22, 0xde, 0x2c, 0xcb, 0xa8, 0x3b, 0xff, 0xf7, 0x6f, 0xc7, 0x71, 0x72, 0xa8, 0xa3, 0xf6, 0x4d, 0xc6, 0x75, 0xda, 0x80, 0xdc, 0xd9, 0x30, 0xd9, 0x07, 0x50, 0x5a, 0x54, 0x7d, 0xda, 0x39, 0x6f, 0x78, 0x94, 0xbf, 0x25, 0x98, 0xdc,
- /* (2^440)P */ 0x01, 0x26, 0x62, 0x44, 0xfb, 0x0f, 0x11, 0x72, 0x73, 0x0a, 0x16, 0xc7, 0x16, 0x9c, 0x9b, 0x37, 0xd8, 0xff, 0x4f, 0xfe, 0x57, 0xdb, 0xae, 0xef, 0x7d, 0x94, 0x30, 0x04, 0x70, 0x83, 0xde, 0x3c, 0xd4, 0xb5, 0x70, 0xda, 0xa7, 0x55, 0xc8, 0x19, 0xe1, 0x36, 0x15, 0x61, 0xe7, 0x3b, 0x7d, 0x85, 0xbb, 0xf3, 0x42, 0x5a, 0x94, 0xf4, 0x53, 0x2a,
- /* (2^441)P */ 0x14, 0x60, 0xa6, 0x0b, 0x83, 0xe1, 0x23, 0x77, 0xc0, 0xce, 0x50, 0xed, 0x35, 0x8d, 0x98, 0x99, 0x7d, 0xf5, 0x8d, 0xce, 0x94, 0x25, 0xc8, 0x0f, 0x6d, 0xfa, 0x4a, 0xa4, 0x3a, 0x1f, 0x66, 0xfb, 0x5a, 0x64, 0xaf, 0x8b, 0x54, 0x54, 0x44, 0x3f, 0x5b, 0x88, 0x61, 0xe4, 0x48, 0x45, 0x26, 0x20, 0xbe, 0x0d, 0x06, 0xbb, 0x65, 0x59, 0xe1, 0x36,
- /* (2^442)P */ 0xb7, 0x98, 0xce, 0xa3, 0xe3, 0xee, 0x11, 0x1b, 0x9e, 0x24, 0x59, 0x75, 0x31, 0x37, 0x44, 0x6f, 0x6b, 0x9e, 0xec, 0xb7, 0x44, 0x01, 0x7e, 0xab, 0xbb, 0x69, 0x5d, 0x11, 0xb0, 0x30, 0x64, 0xea, 0x91, 0xb4, 0x7a, 0x8c, 0x02, 0x4c, 0xb9, 0x10, 0xa7, 0xc7, 0x79, 0xe6, 0xdc, 0x77, 0xe3, 0xc8, 0xef, 0x3e, 0xf9, 0x38, 0x81, 0xce, 0x9a, 0xb2,
- /* (2^443)P */ 0x91, 0x12, 0x76, 0xd0, 0x10, 0xb4, 0xaf, 0xe1, 0x89, 0x3a, 0x93, 0x6b, 0x5c, 0x19, 0x5f, 0x24, 0xed, 0x04, 0x92, 0xc7, 0xf0, 0x00, 0x08, 0xc1, 0x92, 0xff, 0x90, 0xdb, 0xb2, 0xbf, 0xdf, 0x49, 0xcd, 0xbd, 0x5c, 0x6e, 0xbf, 0x16, 0xbb, 0x61, 0xf9, 0x20, 0x33, 0x35, 0x93, 0x11, 0xbc, 0x59, 0x69, 0xce, 0x18, 0x9f, 0xf8, 0x7b, 0xa1, 0x6e,
- /* (2^444)P */ 0xa1, 0xf4, 0xaf, 0xad, 0xf8, 0xe6, 0x99, 0xd2, 0xa1, 0x4d, 0xde, 0x56, 0xc9, 0x7b, 0x0b, 0x11, 0x3e, 0xbf, 0x89, 0x1a, 0x9a, 0x90, 0xe5, 0xe2, 0xa6, 0x37, 0x88, 0xa1, 0x68, 0x59, 0xae, 0x8c, 0xec, 0x02, 0x14, 0x8d, 0xb7, 0x2e, 0x25, 0x75, 0x7f, 0x76, 0x1a, 0xd3, 0x4d, 0xad, 0x8a, 0x00, 0x6c, 0x96, 0x49, 0xa4, 0xc3, 0x2e, 0x5c, 0x7b,
- /* (2^445)P */ 0x26, 0x53, 0xf7, 0xda, 0xa8, 0x01, 0x14, 0xb1, 0x63, 0xe3, 0xc3, 0x89, 0x88, 0xb0, 0x85, 0x40, 0x2b, 0x26, 0x9a, 0x10, 0x1a, 0x70, 0x33, 0xf4, 0x50, 0x9d, 0x4d, 0xd8, 0x64, 0xc6, 0x0f, 0xe1, 0x17, 0xc8, 0x10, 0x4b, 0xfc, 0xa0, 0xc9, 0xba, 0x2c, 0x98, 0x09, 0xf5, 0x84, 0xb6, 0x7c, 0x4e, 0xa3, 0xe3, 0x81, 0x1b, 0x32, 0x60, 0x02, 0xdd,
- /* (2^446)P */ 0xa3, 0xe5, 0x86, 0xd4, 0x43, 0xa8, 0xd1, 0x98, 0x9d, 0x9d, 0xdb, 0x04, 0xcf, 0x6e, 0x35, 0x05, 0x30, 0x53, 0x3b, 0xbc, 0x90, 0x00, 0x4a, 0xc5, 0x40, 0x2a, 0x0f, 0xde, 0x1a, 0xd7, 0x36, 0x27, 0x44, 0x62, 0xa6, 0xac, 0x9d, 0xd2, 0x70, 0x69, 0x14, 0x39, 0x9b, 0xd1, 0xc3, 0x0a, 0x3a, 0x82, 0x0e, 0xf1, 0x94, 0xd7, 0x42, 0x94, 0xd5, 0x7d,
- /* (2^447)P */ 0x04, 0xc0, 0x6e, 0x12, 0x90, 0x70, 0xf9, 0xdf, 0xf7, 0xc9, 0x86, 0xc0, 0xe6, 0x92, 0x8b, 0x0a, 0xa1, 0xc1, 0x3b, 0xcc, 0x33, 0xb7, 0xf0, 0xeb, 0x51, 0x50, 0x80, 0x20, 0x69, 0x1c, 0x4f, 0x89, 0x05, 0x1e, 0xe4, 0x7a, 0x0a, 0xc2, 0xf0, 0xf5, 0x78, 0x91, 0x76, 0x34, 0x45, 0xdc, 0x24, 0x53, 0x24, 0x98, 0xe2, 0x73, 0x6f, 0xe6, 0x46, 0x67,
-}
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go
deleted file mode 100644
index b6b236e5..00000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/constants.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package goldilocks
-
-import fp "github.com/cloudflare/circl/math/fp448"
-
-var (
- // genX is the x-coordinate of the generator of Goldilocks curve.
- genX = fp.Elt{
- 0x5e, 0xc0, 0x0c, 0xc7, 0x2b, 0xa8, 0x26, 0x26,
- 0x8e, 0x93, 0x00, 0x8b, 0xe1, 0x80, 0x3b, 0x43,
- 0x11, 0x65, 0xb6, 0x2a, 0xf7, 0x1a, 0xae, 0x12,
- 0x64, 0xa4, 0xd3, 0xa3, 0x24, 0xe3, 0x6d, 0xea,
- 0x67, 0x17, 0x0f, 0x47, 0x70, 0x65, 0x14, 0x9e,
- 0xda, 0x36, 0xbf, 0x22, 0xa6, 0x15, 0x1d, 0x22,
- 0xed, 0x0d, 0xed, 0x6b, 0xc6, 0x70, 0x19, 0x4f,
- }
- // genY is the y-coordinate of the generator of Goldilocks curve.
- genY = fp.Elt{
- 0x14, 0xfa, 0x30, 0xf2, 0x5b, 0x79, 0x08, 0x98,
- 0xad, 0xc8, 0xd7, 0x4e, 0x2c, 0x13, 0xbd, 0xfd,
- 0xc4, 0x39, 0x7c, 0xe6, 0x1c, 0xff, 0xd3, 0x3a,
- 0xd7, 0xc2, 0xa0, 0x05, 0x1e, 0x9c, 0x78, 0x87,
- 0x40, 0x98, 0xa3, 0x6c, 0x73, 0x73, 0xea, 0x4b,
- 0x62, 0xc7, 0xc9, 0x56, 0x37, 0x20, 0x76, 0x88,
- 0x24, 0xbc, 0xb6, 0x6e, 0x71, 0x46, 0x3f, 0x69,
- }
- // paramD is -39081 in Fp.
- paramD = fp.Elt{
- 0x56, 0x67, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- }
- // order is 2^446-0x8335dc163bb124b65129c96fde933d8d723a70aadc873d6d54a7bb0d,
- // which is the number of points in the prime subgroup.
- order = Scalar{
- 0xf3, 0x44, 0x58, 0xab, 0x92, 0xc2, 0x78, 0x23,
- 0x55, 0x8f, 0xc5, 0x8d, 0x72, 0xc2, 0x6c, 0x21,
- 0x90, 0x36, 0xd6, 0xae, 0x49, 0xdb, 0x4e, 0xc4,
- 0xe9, 0x23, 0xca, 0x7c, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f,
- }
- // residue448 is 2^448 mod order.
- residue448 = [4]uint64{
- 0x721cf5b5529eec34, 0x7a4cf635c8e9c2ab, 0xeec492d944a725bf, 0x20cd77058,
- }
- // invFour is 1/4 mod order.
- invFour = Scalar{
- 0x3d, 0x11, 0xd6, 0xaa, 0xa4, 0x30, 0xde, 0x48,
- 0xd5, 0x63, 0x71, 0xa3, 0x9c, 0x30, 0x5b, 0x08,
- 0xa4, 0x8d, 0xb5, 0x6b, 0xd2, 0xb6, 0x13, 0x71,
- 0xfa, 0x88, 0x32, 0xdf, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f,
- }
- // paramDTwist is -39082 in Fp. The D parameter of the twist curve.
- paramDTwist = fp.Elt{
- 0x55, 0x67, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- }
-)
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go
deleted file mode 100644
index 5a939100..00000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/curve.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Package goldilocks provides elliptic curve operations over the goldilocks curve.
-package goldilocks
-
-import fp "github.com/cloudflare/circl/math/fp448"
-
-// Curve is the Goldilocks curve x^2+y^2=z^2-39081x^2y^2.
-type Curve struct{}
-
-// Identity returns the identity point.
-func (Curve) Identity() *Point {
- return &Point{
- y: fp.One(),
- z: fp.One(),
- }
-}
-
-// IsOnCurve returns true if the point lies on the curve.
-func (Curve) IsOnCurve(P *Point) bool {
- x2, y2, t, t2, z2 := &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{}
- rhs, lhs := &fp.Elt{}, &fp.Elt{}
- fp.Mul(t, &P.ta, &P.tb) // t = ta*tb
- fp.Sqr(x2, &P.x) // x^2
- fp.Sqr(y2, &P.y) // y^2
- fp.Sqr(z2, &P.z) // z^2
- fp.Sqr(t2, t) // t^2
- fp.Add(lhs, x2, y2) // x^2 + y^2
- fp.Mul(rhs, t2, ¶mD) // dt^2
- fp.Add(rhs, rhs, z2) // z^2 + dt^2
- fp.Sub(lhs, lhs, rhs) // x^2 + y^2 - (z^2 + dt^2)
- eq0 := fp.IsZero(lhs)
-
- fp.Mul(lhs, &P.x, &P.y) // xy
- fp.Mul(rhs, t, &P.z) // tz
- fp.Sub(lhs, lhs, rhs) // xy - tz
- eq1 := fp.IsZero(lhs)
- return eq0 && eq1
-}
-
-// Generator returns the generator point.
-func (Curve) Generator() *Point {
- return &Point{
- x: genX,
- y: genY,
- z: fp.One(),
- ta: genX,
- tb: genY,
- }
-}
-
-// Order returns the number of points in the prime subgroup.
-func (Curve) Order() Scalar { return order }
-
-// Double returns 2P.
-func (Curve) Double(P *Point) *Point { R := *P; R.Double(); return &R }
-
-// Add returns P+Q.
-func (Curve) Add(P, Q *Point) *Point { R := *P; R.Add(Q); return &R }
-
-// ScalarMult returns kP. This function runs in constant time.
-func (e Curve) ScalarMult(k *Scalar, P *Point) *Point {
- k4 := &Scalar{}
- k4.divBy4(k)
- return e.pull(twistCurve{}.ScalarMult(k4, e.push(P)))
-}
-
-// ScalarBaseMult returns kG where G is the generator point. This function runs in constant time.
-func (e Curve) ScalarBaseMult(k *Scalar) *Point {
- k4 := &Scalar{}
- k4.divBy4(k)
- return e.pull(twistCurve{}.ScalarBaseMult(k4))
-}
-
-// CombinedMult returns mG+nP, where G is the generator point. This function is non-constant time.
-func (e Curve) CombinedMult(m, n *Scalar, P *Point) *Point {
- m4 := &Scalar{}
- n4 := &Scalar{}
- m4.divBy4(m)
- n4.divBy4(n)
- return e.pull(twistCurve{}.CombinedMult(m4, n4, twistCurve{}.pull(P)))
-}
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go
deleted file mode 100644
index b1daab85..00000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/isogeny.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package goldilocks
-
-import fp "github.com/cloudflare/circl/math/fp448"
-
-func (Curve) pull(P *twistPoint) *Point { return twistCurve{}.push(P) }
-func (twistCurve) pull(P *Point) *twistPoint { return Curve{}.push(P) }
-
-// push sends a point on the Goldilocks curve to a point on the twist curve.
-func (Curve) push(P *Point) *twistPoint {
- Q := &twistPoint{}
- Px, Py, Pz := &P.x, &P.y, &P.z
- a, b, c, d, e, f, g, h := &Q.x, &Q.y, &Q.z, &fp.Elt{}, &Q.ta, &Q.x, &Q.y, &Q.tb
- fp.Add(e, Px, Py) // x+y
- fp.Sqr(a, Px) // A = x^2
- fp.Sqr(b, Py) // B = y^2
- fp.Sqr(c, Pz) // z^2
- fp.Add(c, c, c) // C = 2*z^2
- *d = *a // D = A
- fp.Sqr(e, e) // (x+y)^2
- fp.Sub(e, e, a) // (x+y)^2-A
- fp.Sub(e, e, b) // E = (x+y)^2-A-B
- fp.Add(h, b, d) // H = B+D
- fp.Sub(g, b, d) // G = B-D
- fp.Sub(f, c, h) // F = C-H
- fp.Mul(&Q.z, f, g) // Z = F * G
- fp.Mul(&Q.x, e, f) // X = E * F
- fp.Mul(&Q.y, g, h) // Y = G * H, // T = E * H
- return Q
-}
-
-// push sends a point on the twist curve to a point on the Goldilocks curve.
-func (twistCurve) push(P *twistPoint) *Point {
- Q := &Point{}
- Px, Py, Pz := &P.x, &P.y, &P.z
- a, b, c, d, e, f, g, h := &Q.x, &Q.y, &Q.z, &fp.Elt{}, &Q.ta, &Q.x, &Q.y, &Q.tb
- fp.Add(e, Px, Py) // x+y
- fp.Sqr(a, Px) // A = x^2
- fp.Sqr(b, Py) // B = y^2
- fp.Sqr(c, Pz) // z^2
- fp.Add(c, c, c) // C = 2*z^2
- fp.Neg(d, a) // D = -A
- fp.Sqr(e, e) // (x+y)^2
- fp.Sub(e, e, a) // (x+y)^2-A
- fp.Sub(e, e, b) // E = (x+y)^2-A-B
- fp.Add(h, b, d) // H = B+D
- fp.Sub(g, b, d) // G = B-D
- fp.Sub(f, c, h) // F = C-H
- fp.Mul(&Q.z, f, g) // Z = F * G
- fp.Mul(&Q.x, e, f) // X = E * F
- fp.Mul(&Q.y, g, h) // Y = G * H, // T = E * H
- return Q
-}
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go
deleted file mode 100644
index 11f73de0..00000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/point.go
+++ /dev/null
@@ -1,171 +0,0 @@
-package goldilocks
-
-import (
- "errors"
- "fmt"
-
- fp "github.com/cloudflare/circl/math/fp448"
-)
-
-// Point is a point on the Goldilocks Curve.
-type Point struct{ x, y, z, ta, tb fp.Elt }
-
-func (P Point) String() string {
- return fmt.Sprintf("x: %v\ny: %v\nz: %v\nta: %v\ntb: %v", P.x, P.y, P.z, P.ta, P.tb)
-}
-
-// FromAffine creates a point from affine coordinates.
-func FromAffine(x, y *fp.Elt) (*Point, error) {
- P := &Point{
- x: *x,
- y: *y,
- z: fp.One(),
- ta: *x,
- tb: *y,
- }
- if !(Curve{}).IsOnCurve(P) {
- return P, errors.New("point not on curve")
- }
- return P, nil
-}
-
-// isLessThan returns true if 0 <= x < y, and assumes that slices are of the
-// same length and are interpreted in little-endian order.
-func isLessThan(x, y []byte) bool {
- i := len(x) - 1
- for i > 0 && x[i] == y[i] {
- i--
- }
- return x[i] < y[i]
-}
-
-// FromBytes returns a point from the input buffer.
-func FromBytes(in []byte) (*Point, error) {
- if len(in) < fp.Size+1 {
- return nil, errors.New("wrong input length")
- }
- err := errors.New("invalid decoding")
- P := &Point{}
- signX := in[fp.Size] >> 7
- copy(P.y[:], in[:fp.Size])
- p := fp.P()
- if !isLessThan(P.y[:], p[:]) {
- return nil, err
- }
-
- u, v := &fp.Elt{}, &fp.Elt{}
- one := fp.One()
- fp.Sqr(u, &P.y) // u = y^2
- fp.Mul(v, u, ¶mD) // v = dy^2
- fp.Sub(u, u, &one) // u = y^2-1
- fp.Sub(v, v, &one) // v = dy^2-1
- isQR := fp.InvSqrt(&P.x, u, v) // x = sqrt(u/v)
- if !isQR {
- return nil, err
- }
- fp.Modp(&P.x) // x = x mod p
- if fp.IsZero(&P.x) && signX == 1 {
- return nil, err
- }
- if signX != (P.x[0] & 1) {
- fp.Neg(&P.x, &P.x)
- }
- P.ta = P.x
- P.tb = P.y
- P.z = fp.One()
- return P, nil
-}
-
-// IsIdentity returns true is P is the identity Point.
-func (P *Point) IsIdentity() bool {
- return fp.IsZero(&P.x) && !fp.IsZero(&P.y) && !fp.IsZero(&P.z) && P.y == P.z
-}
-
-// IsEqual returns true if P is equivalent to Q.
-func (P *Point) IsEqual(Q *Point) bool {
- l, r := &fp.Elt{}, &fp.Elt{}
- fp.Mul(l, &P.x, &Q.z)
- fp.Mul(r, &Q.x, &P.z)
- fp.Sub(l, l, r)
- b := fp.IsZero(l)
- fp.Mul(l, &P.y, &Q.z)
- fp.Mul(r, &Q.y, &P.z)
- fp.Sub(l, l, r)
- b = b && fp.IsZero(l)
- fp.Mul(l, &P.ta, &P.tb)
- fp.Mul(l, l, &Q.z)
- fp.Mul(r, &Q.ta, &Q.tb)
- fp.Mul(r, r, &P.z)
- fp.Sub(l, l, r)
- b = b && fp.IsZero(l)
- return b
-}
-
-// Neg obtains the inverse of the Point.
-func (P *Point) Neg() { fp.Neg(&P.x, &P.x); fp.Neg(&P.ta, &P.ta) }
-
-// ToAffine returns the x,y affine coordinates of P.
-func (P *Point) ToAffine() (x, y fp.Elt) {
- fp.Inv(&P.z, &P.z) // 1/z
- fp.Mul(&P.x, &P.x, &P.z) // x/z
- fp.Mul(&P.y, &P.y, &P.z) // y/z
- fp.Modp(&P.x)
- fp.Modp(&P.y)
- fp.SetOne(&P.z)
- P.ta = P.x
- P.tb = P.y
- return P.x, P.y
-}
-
-// ToBytes stores P into a slice of bytes.
-func (P *Point) ToBytes(out []byte) error {
- if len(out) < fp.Size+1 {
- return errors.New("invalid decoding")
- }
- x, y := P.ToAffine()
- out[fp.Size] = (x[0] & 1) << 7
- return fp.ToBytes(out[:fp.Size], &y)
-}
-
-// MarshalBinary encodes the receiver into a binary form and returns the result.
-func (P *Point) MarshalBinary() (data []byte, err error) {
- data = make([]byte, fp.Size+1)
- err = P.ToBytes(data[:fp.Size+1])
- return data, err
-}
-
-// UnmarshalBinary must be able to decode the form generated by MarshalBinary.
-func (P *Point) UnmarshalBinary(data []byte) error { Q, err := FromBytes(data); *P = *Q; return err }
-
-// Double sets P = 2Q.
-func (P *Point) Double() { P.Add(P) }
-
-// Add sets P =P+Q..
-func (P *Point) Add(Q *Point) {
- // This is formula (5) from "Twisted Edwards Curves Revisited" by
- // Hisil H., Wong K.KH., Carter G., Dawson E. (2008)
- // https://doi.org/10.1007/978-3-540-89255-7_20
- x1, y1, z1, ta1, tb1 := &P.x, &P.y, &P.z, &P.ta, &P.tb
- x2, y2, z2, ta2, tb2 := &Q.x, &Q.y, &Q.z, &Q.ta, &Q.tb
- x3, y3, z3, E, H := &P.x, &P.y, &P.z, &P.ta, &P.tb
- A, B, C, D := &fp.Elt{}, &fp.Elt{}, &fp.Elt{}, &fp.Elt{}
- t1, t2, F, G := C, D, &fp.Elt{}, &fp.Elt{}
- fp.Mul(t1, ta1, tb1) // t1 = ta1*tb1
- fp.Mul(t2, ta2, tb2) // t2 = ta2*tb2
- fp.Mul(A, x1, x2) // A = x1*x2
- fp.Mul(B, y1, y2) // B = y1*y2
- fp.Mul(C, t1, t2) // t1*t2
- fp.Mul(C, C, ¶mD) // C = d*t1*t2
- fp.Mul(D, z1, z2) // D = z1*z2
- fp.Add(F, x1, y1) // x1+y1
- fp.Add(E, x2, y2) // x2+y2
- fp.Mul(E, E, F) // (x1+y1)*(x2+y2)
- fp.Sub(E, E, A) // (x1+y1)*(x2+y2)-A
- fp.Sub(E, E, B) // E = (x1+y1)*(x2+y2)-A-B
- fp.Sub(F, D, C) // F = D-C
- fp.Add(G, D, C) // G = D+C
- fp.Sub(H, B, A) // H = B-A
- fp.Mul(z3, F, G) // Z = F * G
- fp.Mul(x3, E, F) // X = E * F
- fp.Mul(y3, G, H) // Y = G * H, T = E * H
-}
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go
deleted file mode 100644
index f98117b2..00000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/scalar.go
+++ /dev/null
@@ -1,203 +0,0 @@
-package goldilocks
-
-import (
- "encoding/binary"
- "math/bits"
-)
-
-// ScalarSize is the size (in bytes) of scalars.
-const ScalarSize = 56 // 448 / 8
-
-// _N is the number of 64-bit words to store scalars.
-const _N = 7 // 448 / 64
-
-// Scalar represents a positive integer stored in little-endian order.
-type Scalar [ScalarSize]byte
-
-type scalar64 [_N]uint64
-
-func (z *scalar64) fromScalar(x *Scalar) {
- z[0] = binary.LittleEndian.Uint64(x[0*8 : 1*8])
- z[1] = binary.LittleEndian.Uint64(x[1*8 : 2*8])
- z[2] = binary.LittleEndian.Uint64(x[2*8 : 3*8])
- z[3] = binary.LittleEndian.Uint64(x[3*8 : 4*8])
- z[4] = binary.LittleEndian.Uint64(x[4*8 : 5*8])
- z[5] = binary.LittleEndian.Uint64(x[5*8 : 6*8])
- z[6] = binary.LittleEndian.Uint64(x[6*8 : 7*8])
-}
-
-func (z *scalar64) toScalar(x *Scalar) {
- binary.LittleEndian.PutUint64(x[0*8:1*8], z[0])
- binary.LittleEndian.PutUint64(x[1*8:2*8], z[1])
- binary.LittleEndian.PutUint64(x[2*8:3*8], z[2])
- binary.LittleEndian.PutUint64(x[3*8:4*8], z[3])
- binary.LittleEndian.PutUint64(x[4*8:5*8], z[4])
- binary.LittleEndian.PutUint64(x[5*8:6*8], z[5])
- binary.LittleEndian.PutUint64(x[6*8:7*8], z[6])
-}
-
-// add calculates z = x + y. Assumes len(z) > max(len(x),len(y)).
-func add(z, x, y []uint64) uint64 {
- l, L, zz := len(x), len(y), y
- if l > L {
- l, L, zz = L, l, x
- }
- c := uint64(0)
- for i := 0; i < l; i++ {
- z[i], c = bits.Add64(x[i], y[i], c)
- }
- for i := l; i < L; i++ {
- z[i], c = bits.Add64(zz[i], 0, c)
- }
- return c
-}
-
-// sub calculates z = x - y. Assumes len(z) > max(len(x),len(y)).
-func sub(z, x, y []uint64) uint64 {
- l, L, zz := len(x), len(y), y
- if l > L {
- l, L, zz = L, l, x
- }
- c := uint64(0)
- for i := 0; i < l; i++ {
- z[i], c = bits.Sub64(x[i], y[i], c)
- }
- for i := l; i < L; i++ {
- z[i], c = bits.Sub64(zz[i], 0, c)
- }
- return c
-}
-
-// mulWord calculates z = x * y. Assumes len(z) >= len(x)+1.
-func mulWord(z, x []uint64, y uint64) {
- for i := range z {
- z[i] = 0
- }
- carry := uint64(0)
- for i := range x {
- hi, lo := bits.Mul64(x[i], y)
- lo, cc := bits.Add64(lo, z[i], 0)
- hi, _ = bits.Add64(hi, 0, cc)
- z[i], cc = bits.Add64(lo, carry, 0)
- carry, _ = bits.Add64(hi, 0, cc)
- }
- z[len(x)] = carry
-}
-
-// Cmov moves x into z if b=1.
-func (z *scalar64) Cmov(b uint64, x *scalar64) {
- m := uint64(0) - b
- for i := range z {
- z[i] = (z[i] &^ m) | (x[i] & m)
- }
-}
-
-// leftShift shifts to the left the words of z returning the more significant word.
-func (z *scalar64) leftShift(low uint64) uint64 {
- high := z[_N-1]
- for i := _N - 1; i > 0; i-- {
- z[i] = z[i-1]
- }
- z[0] = low
- return high
-}
-
-// reduceOneWord calculates z = z + 2^448*x such that the result fits in a Scalar.
-func (z *scalar64) reduceOneWord(x uint64) {
- prod := (&scalar64{})[:]
- mulWord(prod, residue448[:], x)
- cc := add(z[:], z[:], prod)
- mulWord(prod, residue448[:], cc)
- add(z[:], z[:], prod)
-}
-
-// modOrder reduces z mod order.
-func (z *scalar64) modOrder() {
- var o64, x scalar64
- o64.fromScalar(&order)
- // Performs: while (z >= order) { z = z-order }
- // At most 8 (eight) iterations reduce 3 bits by subtracting.
- for i := 0; i < 8; i++ {
- c := sub(x[:], z[:], o64[:]) // (c || x) = z-order
- z.Cmov(1-c, &x) // if c != 0 { z = x }
- }
-}
-
-// FromBytes stores z = x mod order, where x is a number stored in little-endian order.
-func (z *Scalar) FromBytes(x []byte) {
- n := len(x)
- nCeil := (n + 7) >> 3
- for i := range z {
- z[i] = 0
- }
- if nCeil < _N {
- copy(z[:], x)
- return
- }
- copy(z[:], x[8*(nCeil-_N):])
- var z64 scalar64
- z64.fromScalar(z)
- for i := nCeil - _N - 1; i >= 0; i-- {
- low := binary.LittleEndian.Uint64(x[8*i:])
- high := z64.leftShift(low)
- z64.reduceOneWord(high)
- }
- z64.modOrder()
- z64.toScalar(z)
-}
-
-// divBy4 calculates z = x/4 mod order.
-func (z *Scalar) divBy4(x *Scalar) { z.Mul(x, &invFour) }
-
-// Red reduces z mod order.
-func (z *Scalar) Red() { var t scalar64; t.fromScalar(z); t.modOrder(); t.toScalar(z) }
-
-// Neg calculates z = -z mod order.
-func (z *Scalar) Neg() { z.Sub(&order, z) }
-
-// Add calculates z = x+y mod order.
-func (z *Scalar) Add(x, y *Scalar) {
- var z64, x64, y64, t scalar64
- x64.fromScalar(x)
- y64.fromScalar(y)
- c := add(z64[:], x64[:], y64[:])
- add(t[:], z64[:], residue448[:])
- z64.Cmov(c, &t)
- z64.modOrder()
- z64.toScalar(z)
-}
-
-// Sub calculates z = x-y mod order.
-func (z *Scalar) Sub(x, y *Scalar) {
- var z64, x64, y64, t scalar64
- x64.fromScalar(x)
- y64.fromScalar(y)
- c := sub(z64[:], x64[:], y64[:])
- sub(t[:], z64[:], residue448[:])
- z64.Cmov(c, &t)
- z64.modOrder()
- z64.toScalar(z)
-}
-
-// Mul calculates z = x*y mod order.
-func (z *Scalar) Mul(x, y *Scalar) {
- var z64, x64, y64 scalar64
- prod := (&[_N + 1]uint64{})[:]
- x64.fromScalar(x)
- y64.fromScalar(y)
- mulWord(prod, x64[:], y64[_N-1])
- copy(z64[:], prod[:_N])
- z64.reduceOneWord(prod[_N])
- for i := _N - 2; i >= 0; i-- {
- h := z64.leftShift(0)
- z64.reduceOneWord(h)
- mulWord(prod, x64[:], y64[i])
- c := add(z64[:], z64[:], prod[:_N])
- z64.reduceOneWord(prod[_N] + c)
- }
- z64.modOrder()
- z64.toScalar(z)
-}
-
-// IsZero returns true if z=0.
-func (z *Scalar) IsZero() bool { z.Red(); return *z == Scalar{} }
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go
deleted file mode 100644
index 83d7cdad..00000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package goldilocks
-
-import (
- "crypto/subtle"
- "math/bits"
-
- "github.com/cloudflare/circl/internal/conv"
- "github.com/cloudflare/circl/math"
- fp "github.com/cloudflare/circl/math/fp448"
-)
-
-// twistCurve is -x^2+y^2=1-39082x^2y^2 and is 4-isogenous to Goldilocks.
-type twistCurve struct{}
-
-// Identity returns the identity point.
-func (twistCurve) Identity() *twistPoint {
- return &twistPoint{
- y: fp.One(),
- z: fp.One(),
- }
-}
-
-// subYDiv16 update x = (x - y) / 16.
-func subYDiv16(x *scalar64, y int64) {
- s := uint64(y >> 63)
- x0, b0 := bits.Sub64((*x)[0], uint64(y), 0)
- x1, b1 := bits.Sub64((*x)[1], s, b0)
- x2, b2 := bits.Sub64((*x)[2], s, b1)
- x3, b3 := bits.Sub64((*x)[3], s, b2)
- x4, b4 := bits.Sub64((*x)[4], s, b3)
- x5, b5 := bits.Sub64((*x)[5], s, b4)
- x6, _ := bits.Sub64((*x)[6], s, b5)
- x[0] = (x0 >> 4) | (x1 << 60)
- x[1] = (x1 >> 4) | (x2 << 60)
- x[2] = (x2 >> 4) | (x3 << 60)
- x[3] = (x3 >> 4) | (x4 << 60)
- x[4] = (x4 >> 4) | (x5 << 60)
- x[5] = (x5 >> 4) | (x6 << 60)
- x[6] = (x6 >> 4)
-}
-
-func recodeScalar(d *[113]int8, k *Scalar) {
- var k64 scalar64
- k64.fromScalar(k)
- for i := 0; i < 112; i++ {
- d[i] = int8((k64[0] & 0x1f) - 16)
- subYDiv16(&k64, int64(d[i]))
- }
- d[112] = int8(k64[0])
-}
-
-// ScalarMult returns kP.
-func (e twistCurve) ScalarMult(k *Scalar, P *twistPoint) *twistPoint {
- var TabP [8]preTwistPointProy
- var S preTwistPointProy
- var d [113]int8
-
- var isZero int
- if k.IsZero() {
- isZero = 1
- }
- subtle.ConstantTimeCopy(isZero, k[:], order[:])
-
- minusK := *k
- isEven := 1 - int(k[0]&0x1)
- minusK.Neg()
- subtle.ConstantTimeCopy(isEven, k[:], minusK[:])
- recodeScalar(&d, k)
-
- P.oddMultiples(TabP[:])
- Q := e.Identity()
- for i := 112; i >= 0; i-- {
- Q.Double()
- Q.Double()
- Q.Double()
- Q.Double()
- mask := d[i] >> 7
- absDi := (d[i] + mask) ^ mask
- inx := int32((absDi - 1) >> 1)
- sig := int((d[i] >> 7) & 0x1)
- for j := range TabP {
- S.cmov(&TabP[j], uint(subtle.ConstantTimeEq(inx, int32(j))))
- }
- S.cneg(sig)
- Q.mixAdd(&S)
- }
- Q.cneg(uint(isEven))
- return Q
-}
-
-const (
- omegaFix = 7
- omegaVar = 5
-)
-
-// CombinedMult returns mG+nP.
-func (e twistCurve) CombinedMult(m, n *Scalar, P *twistPoint) *twistPoint {
- nafFix := math.OmegaNAF(conv.BytesLe2BigInt(m[:]), omegaFix)
- nafVar := math.OmegaNAF(conv.BytesLe2BigInt(n[:]), omegaVar)
-
- if len(nafFix) > len(nafVar) {
- nafVar = append(nafVar, make([]int32, len(nafFix)-len(nafVar))...)
- } else if len(nafFix) < len(nafVar) {
- nafFix = append(nafFix, make([]int32, len(nafVar)-len(nafFix))...)
- }
-
- var TabQ [1 << (omegaVar - 2)]preTwistPointProy
- P.oddMultiples(TabQ[:])
- Q := e.Identity()
- for i := len(nafFix) - 1; i >= 0; i-- {
- Q.Double()
- // Generator point
- if nafFix[i] != 0 {
- idxM := absolute(nafFix[i]) >> 1
- R := tabVerif[idxM]
- if nafFix[i] < 0 {
- R.neg()
- }
- Q.mixAddZ1(&R)
- }
- // Variable input point
- if nafVar[i] != 0 {
- idxN := absolute(nafVar[i]) >> 1
- S := TabQ[idxN]
- if nafVar[i] < 0 {
- S.neg()
- }
- Q.mixAdd(&S)
- }
- }
- return Q
-}
-
-// absolute returns always a positive value.
-func absolute(x int32) int32 {
- mask := x >> 31
- return (x + mask) ^ mask
-}
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go
deleted file mode 100644
index c55db77b..00000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistPoint.go
+++ /dev/null
@@ -1,135 +0,0 @@
-package goldilocks
-
-import (
- "fmt"
-
- fp "github.com/cloudflare/circl/math/fp448"
-)
-
-type twistPoint struct{ x, y, z, ta, tb fp.Elt }
-
-type preTwistPointAffine struct{ addYX, subYX, dt2 fp.Elt }
-
-type preTwistPointProy struct {
- preTwistPointAffine
- z2 fp.Elt
-}
-
-func (P *twistPoint) String() string {
- return fmt.Sprintf("x: %v\ny: %v\nz: %v\nta: %v\ntb: %v", P.x, P.y, P.z, P.ta, P.tb)
-}
-
-// cneg conditionally negates the point if b=1.
-func (P *twistPoint) cneg(b uint) {
- t := &fp.Elt{}
- fp.Neg(t, &P.x)
- fp.Cmov(&P.x, t, b)
- fp.Neg(t, &P.ta)
- fp.Cmov(&P.ta, t, b)
-}
-
-// Double updates P with 2P.
-func (P *twistPoint) Double() {
- // This is formula (7) from "Twisted Edwards Curves Revisited" by
- // Hisil H., Wong K.KH., Carter G., Dawson E. (2008)
- // https://doi.org/10.1007/978-3-540-89255-7_20
- Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb
- a, b, c, e, f, g, h := Px, Py, Pz, Pta, Px, Py, Ptb
- fp.Add(e, Px, Py) // x+y
- fp.Sqr(a, Px) // A = x^2
- fp.Sqr(b, Py) // B = y^2
- fp.Sqr(c, Pz) // z^2
- fp.Add(c, c, c) // C = 2*z^2
- fp.Add(h, a, b) // H = A+B
- fp.Sqr(e, e) // (x+y)^2
- fp.Sub(e, e, h) // E = (x+y)^2-A-B
- fp.Sub(g, b, a) // G = B-A
- fp.Sub(f, c, g) // F = C-G
- fp.Mul(Pz, f, g) // Z = F * G
- fp.Mul(Px, e, f) // X = E * F
- fp.Mul(Py, g, h) // Y = G * H, T = E * H
-}
-
-// mixAdd calculates P= P+Q, where Q is a precomputed point with Z_Q = 1.
-func (P *twistPoint) mixAddZ1(Q *preTwistPointAffine) {
- fp.Add(&P.z, &P.z, &P.z) // D = 2*z1 (z2=1)
- P.coreAddition(Q)
-}
-
-// coreAddition calculates P=P+Q for curves with A=-1.
-func (P *twistPoint) coreAddition(Q *preTwistPointAffine) {
- // This is the formula following (5) from "Twisted Edwards Curves Revisited" by
- // Hisil H., Wong K.KH., Carter G., Dawson E. (2008)
- // https://doi.org/10.1007/978-3-540-89255-7_20
- Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb
- addYX2, subYX2, dt2 := &Q.addYX, &Q.subYX, &Q.dt2
- a, b, c, d, e, f, g, h := Px, Py, &fp.Elt{}, Pz, Pta, Px, Py, Ptb
- fp.Mul(c, Pta, Ptb) // t1 = ta*tb
- fp.Sub(h, Py, Px) // y1-x1
- fp.Add(b, Py, Px) // y1+x1
- fp.Mul(a, h, subYX2) // A = (y1-x1)*(y2-x2)
- fp.Mul(b, b, addYX2) // B = (y1+x1)*(y2+x2)
- fp.Mul(c, c, dt2) // C = 2*D*t1*t2
- fp.Sub(e, b, a) // E = B-A
- fp.Add(h, b, a) // H = B+A
- fp.Sub(f, d, c) // F = D-C
- fp.Add(g, d, c) // G = D+C
- fp.Mul(Pz, f, g) // Z = F * G
- fp.Mul(Px, e, f) // X = E * F
- fp.Mul(Py, g, h) // Y = G * H, T = E * H
-}
-
-func (P *preTwistPointAffine) neg() {
- P.addYX, P.subYX = P.subYX, P.addYX
- fp.Neg(&P.dt2, &P.dt2)
-}
-
-func (P *preTwistPointAffine) cneg(b int) {
- t := &fp.Elt{}
- fp.Cswap(&P.addYX, &P.subYX, uint(b))
- fp.Neg(t, &P.dt2)
- fp.Cmov(&P.dt2, t, uint(b))
-}
-
-func (P *preTwistPointAffine) cmov(Q *preTwistPointAffine, b uint) {
- fp.Cmov(&P.addYX, &Q.addYX, b)
- fp.Cmov(&P.subYX, &Q.subYX, b)
- fp.Cmov(&P.dt2, &Q.dt2, b)
-}
-
-// mixAdd calculates P= P+Q, where Q is a precomputed point with Z_Q != 1.
-func (P *twistPoint) mixAdd(Q *preTwistPointProy) {
- fp.Mul(&P.z, &P.z, &Q.z2) // D = 2*z1*z2
- P.coreAddition(&Q.preTwistPointAffine)
-}
-
-// oddMultiples calculates T[i] = (2*i-1)P for 0 < i < len(T).
-func (P *twistPoint) oddMultiples(T []preTwistPointProy) {
- if n := len(T); n > 0 {
- T[0].FromTwistPoint(P)
- _2P := *P
- _2P.Double()
- R := &preTwistPointProy{}
- R.FromTwistPoint(&_2P)
- for i := 1; i < n; i++ {
- P.mixAdd(R)
- T[i].FromTwistPoint(P)
- }
- }
-}
-
-// cmov conditionally moves Q into P if b=1.
-func (P *preTwistPointProy) cmov(Q *preTwistPointProy, b uint) {
- P.preTwistPointAffine.cmov(&Q.preTwistPointAffine, b)
- fp.Cmov(&P.z2, &Q.z2, b)
-}
-
-// FromTwistPoint precomputes some coordinates of Q for missed addition.
-func (P *preTwistPointProy) FromTwistPoint(Q *twistPoint) {
- fp.Add(&P.addYX, &Q.y, &Q.x) // addYX = X + Y
- fp.Sub(&P.subYX, &Q.y, &Q.x) // subYX = Y - X
- fp.Mul(&P.dt2, &Q.ta, &Q.tb) // T = ta*tb
- fp.Mul(&P.dt2, &P.dt2, ¶mDTwist) // D*T
- fp.Add(&P.dt2, &P.dt2, &P.dt2) // dt2 = 2*D*T
- fp.Add(&P.z2, &Q.z, &Q.z) // z2 = 2*Z
-}
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go
deleted file mode 100644
index ed432e02..00000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twistTables.go
+++ /dev/null
@@ -1,216 +0,0 @@
-package goldilocks
-
-import fp "github.com/cloudflare/circl/math/fp448"
-
-var tabFixMult = [fxV][fx2w1]preTwistPointAffine{
- {
- {
- addYX: fp.Elt{0x65, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2b, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05},
- subYX: fp.Elt{0x64, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2d, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05},
- dt2: fp.Elt{0x1a, 0x33, 0xea, 0x64, 0x45, 0x1c, 0xdf, 0x17, 0x1d, 0x16, 0x34, 0x28, 0xd6, 0x61, 0x19, 0x67, 0x79, 0xb4, 0x13, 0xcf, 0x3e, 0x7c, 0x0e, 0x72, 0xda, 0xf1, 0x5f, 0xda, 0xe6, 0xcf, 0x42, 0xd3, 0xb6, 0x17, 0xc2, 0x68, 0x13, 0x2d, 0xd9, 0x60, 0x3e, 0xae, 0xf0, 0x5b, 0x96, 0xf0, 0xcd, 0xaf, 0xea, 0xb7, 0x0d, 0x59, 0x16, 0xa7, 0xff, 0x55},
- },
- {
- addYX: fp.Elt{0xca, 0xd8, 0x7d, 0x86, 0x1a, 0xef, 0xad, 0x11, 0xe3, 0x27, 0x41, 0x7e, 0x7f, 0x3e, 0xa9, 0xd2, 0xb5, 0x4e, 0x50, 0xe0, 0x77, 0x91, 0xc2, 0x13, 0x52, 0x73, 0x41, 0x09, 0xa6, 0x57, 0x9a, 0xc8, 0xa8, 0x90, 0x9d, 0x26, 0x14, 0xbb, 0xa1, 0x2a, 0xf7, 0x45, 0x43, 0x4e, 0xea, 0x35, 0x62, 0xe1, 0x08, 0x85, 0x46, 0xb8, 0x24, 0x05, 0x2d, 0xab},
- subYX: fp.Elt{0x9b, 0xe6, 0xd3, 0xe5, 0xfe, 0x50, 0x36, 0x3c, 0x3c, 0x6d, 0x74, 0x1d, 0x74, 0xc0, 0xde, 0x5b, 0x45, 0x27, 0xe5, 0x12, 0xee, 0x63, 0x35, 0x6b, 0x13, 0xe2, 0x41, 0x6b, 0x3a, 0x05, 0x2b, 0xb1, 0x89, 0x26, 0xb6, 0xc6, 0xd1, 0x84, 0xff, 0x0e, 0x9b, 0xa3, 0xfb, 0x21, 0x36, 0x6b, 0x01, 0xf7, 0x9f, 0x7c, 0xeb, 0xf5, 0x18, 0x7a, 0x2a, 0x70},
- dt2: fp.Elt{0x09, 0xad, 0x99, 0x1a, 0x38, 0xd3, 0xdf, 0x22, 0x37, 0x32, 0x61, 0x8b, 0xf3, 0x19, 0x48, 0x08, 0xe8, 0x49, 0xb6, 0x4a, 0xa7, 0xed, 0xa4, 0xa2, 0xee, 0x86, 0xd7, 0x31, 0x5e, 0xce, 0x95, 0x76, 0x86, 0x42, 0x1c, 0x9d, 0x07, 0x14, 0x8c, 0x34, 0x18, 0x9c, 0x6d, 0x3a, 0xdf, 0xa9, 0xe8, 0x36, 0x7e, 0xe4, 0x95, 0xbe, 0xb5, 0x09, 0xf8, 0x9c},
- },
- {
- addYX: fp.Elt{0x51, 0xdb, 0x49, 0xa8, 0x9f, 0xe3, 0xd7, 0xec, 0x0d, 0x0f, 0x49, 0xe8, 0xb6, 0xc5, 0x0f, 0x5a, 0x1c, 0xce, 0x54, 0x0d, 0xb1, 0x8d, 0x5b, 0xbf, 0xf4, 0xaa, 0x34, 0x77, 0xc4, 0x5d, 0x59, 0xb6, 0xc5, 0x0e, 0x5a, 0xd8, 0x5b, 0x30, 0xc2, 0x1d, 0xec, 0x85, 0x1c, 0x42, 0xbe, 0x24, 0x2e, 0x50, 0x55, 0x44, 0xb2, 0x3a, 0x01, 0xaa, 0x98, 0xfb},
- subYX: fp.Elt{0xe7, 0x29, 0xb7, 0xd0, 0xaa, 0x4f, 0x32, 0x53, 0x56, 0xde, 0xbc, 0xd1, 0x92, 0x5d, 0x19, 0xbe, 0xa3, 0xe3, 0x75, 0x48, 0xe0, 0x7a, 0x1b, 0x54, 0x7a, 0xb7, 0x41, 0x77, 0x84, 0x38, 0xdd, 0x14, 0x9f, 0xca, 0x3f, 0xa3, 0xc8, 0xa7, 0x04, 0x70, 0xf1, 0x4d, 0x3d, 0xb3, 0x84, 0x79, 0xcb, 0xdb, 0xe4, 0xc5, 0x42, 0x9b, 0x57, 0x19, 0xf1, 0x2d},
- dt2: fp.Elt{0x20, 0xb4, 0x94, 0x9e, 0xdf, 0x31, 0x44, 0x0b, 0xc9, 0x7b, 0x75, 0x40, 0x9d, 0xd1, 0x96, 0x39, 0x70, 0x71, 0x15, 0xc8, 0x93, 0xd5, 0xc5, 0xe5, 0xba, 0xfe, 0xee, 0x08, 0x6a, 0x98, 0x0a, 0x1b, 0xb2, 0xaa, 0x3a, 0xf4, 0xa4, 0x79, 0xf9, 0x8e, 0x4d, 0x65, 0x10, 0x9b, 0x3a, 0x6e, 0x7c, 0x87, 0x94, 0x92, 0x11, 0x65, 0xbf, 0x1a, 0x09, 0xde},
- },
- {
- addYX: fp.Elt{0xf3, 0x84, 0x76, 0x77, 0xa5, 0x6b, 0x27, 0x3b, 0x83, 0x3d, 0xdf, 0xa0, 0xeb, 0x32, 0x6d, 0x58, 0x81, 0x57, 0x64, 0xc2, 0x21, 0x7c, 0x9b, 0xea, 0xe6, 0xb0, 0x93, 0xf9, 0xe7, 0xc3, 0xed, 0x5a, 0x8e, 0xe2, 0xb4, 0x72, 0x76, 0x66, 0x0f, 0x22, 0x29, 0x94, 0x3e, 0x63, 0x48, 0x5e, 0x80, 0xcb, 0xac, 0xfa, 0x95, 0xb6, 0x4b, 0xc4, 0x95, 0x33},
- subYX: fp.Elt{0x0c, 0x55, 0xd1, 0x5e, 0x5f, 0xbf, 0xbf, 0xe2, 0x4c, 0xfc, 0x37, 0x4a, 0xc4, 0xb1, 0xf4, 0x83, 0x61, 0x93, 0x60, 0x8e, 0x9f, 0x31, 0xf0, 0xa0, 0x41, 0xff, 0x1d, 0xe2, 0x7f, 0xca, 0x40, 0xd6, 0x88, 0xe8, 0x91, 0x61, 0xe2, 0x11, 0x18, 0x83, 0xf3, 0x25, 0x2f, 0x3f, 0x49, 0x40, 0xd4, 0x83, 0xe2, 0xd7, 0x74, 0x6a, 0x16, 0x86, 0x4e, 0xab},
- dt2: fp.Elt{0xdd, 0x58, 0x65, 0xd8, 0x9f, 0xdd, 0x70, 0x7f, 0x0f, 0xec, 0xbd, 0x5c, 0x5c, 0x9b, 0x7e, 0x1b, 0x9f, 0x79, 0x36, 0x1f, 0xfd, 0x79, 0x10, 0x1c, 0x52, 0xf3, 0x22, 0xa4, 0x1f, 0x71, 0x6e, 0x63, 0x14, 0xf4, 0xa7, 0x3e, 0xbe, 0xad, 0x43, 0x30, 0x38, 0x8c, 0x29, 0xc6, 0xcf, 0x50, 0x75, 0x21, 0xe5, 0x78, 0xfd, 0xb0, 0x9a, 0xc4, 0x6d, 0xd4},
- },
- },
- {
- {
- addYX: fp.Elt{0x7a, 0xa1, 0x38, 0xa6, 0xfd, 0x0e, 0x96, 0xd5, 0x26, 0x76, 0x86, 0x70, 0x80, 0x30, 0xa6, 0x67, 0xeb, 0xf4, 0x39, 0xdb, 0x22, 0xf5, 0x9f, 0x98, 0xe4, 0xb5, 0x3a, 0x0c, 0x59, 0xbf, 0x85, 0xc6, 0xf0, 0x0b, 0x1c, 0x41, 0x38, 0x09, 0x01, 0xdb, 0xd6, 0x3c, 0xb7, 0xf1, 0x08, 0x6b, 0x4b, 0x9e, 0x63, 0x53, 0x83, 0xd3, 0xab, 0xa3, 0x72, 0x0d},
- subYX: fp.Elt{0x84, 0x68, 0x25, 0xe8, 0xe9, 0x8f, 0x91, 0xbf, 0xf7, 0xa4, 0x30, 0xae, 0xea, 0x9f, 0xdd, 0x56, 0x64, 0x09, 0xc9, 0x54, 0x68, 0x4e, 0x33, 0xc5, 0x6f, 0x7b, 0x2d, 0x52, 0x2e, 0x42, 0xbe, 0xbe, 0xf5, 0x64, 0xbf, 0x77, 0x54, 0xdf, 0xb0, 0x10, 0xd2, 0x16, 0x5d, 0xce, 0xaf, 0x9f, 0xfb, 0xa3, 0x63, 0x50, 0xcb, 0xc0, 0xd0, 0x88, 0x44, 0xa3},
- dt2: fp.Elt{0xc3, 0x8b, 0xa5, 0xf1, 0x44, 0xe4, 0x41, 0xcd, 0x75, 0xe3, 0x17, 0x69, 0x5b, 0xb9, 0xbb, 0xee, 0x82, 0xbb, 0xce, 0x57, 0xdf, 0x2a, 0x9c, 0x12, 0xab, 0x66, 0x08, 0x68, 0x05, 0x1b, 0x87, 0xee, 0x5d, 0x1e, 0x18, 0x14, 0x22, 0x4b, 0x99, 0x61, 0x75, 0x28, 0xe7, 0x65, 0x1c, 0x36, 0xb6, 0x18, 0x09, 0xa8, 0xdf, 0xef, 0x30, 0x35, 0xbc, 0x58},
- },
- {
- addYX: fp.Elt{0xc5, 0xd3, 0x0e, 0x6f, 0xaf, 0x06, 0x69, 0xc4, 0x07, 0x9e, 0x58, 0x6e, 0x3f, 0x49, 0xd9, 0x0a, 0x3c, 0x2c, 0x37, 0xcd, 0x27, 0x4d, 0x87, 0x91, 0x7a, 0xb0, 0x28, 0xad, 0x2f, 0x68, 0x92, 0x05, 0x97, 0xf1, 0x30, 0x5f, 0x4c, 0x10, 0x20, 0x30, 0xd3, 0x08, 0x3f, 0xc1, 0xc6, 0xb7, 0xb5, 0xd1, 0x71, 0x7b, 0xa8, 0x0a, 0xd8, 0xf5, 0x17, 0xcf},
- subYX: fp.Elt{0x64, 0xd4, 0x8f, 0x91, 0x40, 0xab, 0x6e, 0x1a, 0x62, 0x83, 0xdc, 0xd7, 0x30, 0x1a, 0x4a, 0x2a, 0x4c, 0x54, 0x86, 0x19, 0x81, 0x5d, 0x04, 0x52, 0xa3, 0xca, 0x82, 0x38, 0xdc, 0x1e, 0xf0, 0x7a, 0x78, 0x76, 0x49, 0x4f, 0x71, 0xc4, 0x74, 0x2f, 0xf0, 0x5b, 0x2e, 0x5e, 0xac, 0xef, 0x17, 0xe4, 0x8e, 0x6e, 0xed, 0x43, 0x23, 0x61, 0x99, 0x49},
- dt2: fp.Elt{0x64, 0x90, 0x72, 0x76, 0xf8, 0x2c, 0x7d, 0x57, 0xf9, 0x30, 0x5e, 0x7a, 0x10, 0x74, 0x19, 0x39, 0xd9, 0xaf, 0x0a, 0xf1, 0x43, 0xed, 0x88, 0x9c, 0x8b, 0xdc, 0x9b, 0x1c, 0x90, 0xe7, 0xf7, 0xa3, 0xa5, 0x0d, 0xc6, 0xbc, 0x30, 0xfb, 0x91, 0x1a, 0x51, 0xba, 0x2d, 0xbe, 0x89, 0xdf, 0x1d, 0xdc, 0x53, 0xa8, 0x82, 0x8a, 0xd3, 0x8d, 0x16, 0x68},
- },
- {
- addYX: fp.Elt{0xef, 0x5c, 0xe3, 0x74, 0xbf, 0x13, 0x4a, 0xbf, 0x66, 0x73, 0x64, 0xb7, 0xd4, 0xce, 0x98, 0x82, 0x05, 0xfa, 0x98, 0x0c, 0x0a, 0xae, 0xe5, 0x6b, 0x9f, 0xac, 0xbb, 0x6e, 0x1f, 0xcf, 0xff, 0xa6, 0x71, 0x9a, 0xa8, 0x7a, 0x9e, 0x64, 0x1f, 0x20, 0x4a, 0x61, 0xa2, 0xd6, 0x50, 0xe3, 0xba, 0x81, 0x0c, 0x50, 0x59, 0x69, 0x59, 0x15, 0x55, 0xdb},
- subYX: fp.Elt{0xe8, 0x77, 0x4d, 0xe8, 0x66, 0x3d, 0xc1, 0x00, 0x3c, 0xf2, 0x25, 0x00, 0xdc, 0xb2, 0xe5, 0x9b, 0x12, 0x89, 0xf3, 0xd6, 0xea, 0x85, 0x60, 0xfe, 0x67, 0x91, 0xfd, 0x04, 0x7c, 0xe0, 0xf1, 0x86, 0x06, 0x11, 0x66, 0xee, 0xd4, 0xd5, 0xbe, 0x3b, 0x0f, 0xe3, 0x59, 0xb3, 0x4f, 0x00, 0xb6, 0xce, 0x80, 0xc1, 0x61, 0xf7, 0xaf, 0x04, 0x6a, 0x3c},
- dt2: fp.Elt{0x00, 0xd7, 0x32, 0x93, 0x67, 0x70, 0x6f, 0xd7, 0x69, 0xab, 0xb1, 0xd3, 0xdc, 0xd6, 0xa8, 0xdd, 0x35, 0x25, 0xca, 0xd3, 0x8a, 0x6d, 0xce, 0xfb, 0xfd, 0x2b, 0x83, 0xf0, 0xd4, 0xac, 0x66, 0xfb, 0x72, 0x87, 0x7e, 0x55, 0xb7, 0x91, 0x58, 0x10, 0xc3, 0x11, 0x7e, 0x15, 0xfe, 0x7c, 0x55, 0x90, 0xa3, 0x9e, 0xed, 0x9a, 0x7f, 0xa7, 0xb7, 0xeb},
- },
- {
- addYX: fp.Elt{0x25, 0x0f, 0xc2, 0x09, 0x9c, 0x10, 0xc8, 0x7c, 0x93, 0xa7, 0xbe, 0xe9, 0x26, 0x25, 0x7c, 0x21, 0xfe, 0xe7, 0x5f, 0x3c, 0x02, 0x83, 0xa7, 0x9e, 0xdf, 0xc0, 0x94, 0x2b, 0x7d, 0x1a, 0xd0, 0x1d, 0xcc, 0x2e, 0x7d, 0xd4, 0x85, 0xe7, 0xc1, 0x15, 0x66, 0xd6, 0xd6, 0x32, 0xb8, 0xf7, 0x63, 0xaa, 0x3b, 0xa5, 0xea, 0x49, 0xad, 0x88, 0x9b, 0x66},
- subYX: fp.Elt{0x09, 0x97, 0x79, 0x36, 0x41, 0x56, 0x9b, 0xdf, 0x15, 0xd8, 0x43, 0x28, 0x17, 0x5b, 0x96, 0xc9, 0xcf, 0x39, 0x1f, 0x13, 0xf7, 0x4d, 0x1d, 0x1f, 0xda, 0x51, 0x56, 0xe7, 0x0a, 0x5a, 0x65, 0xb6, 0x2a, 0x87, 0x49, 0x86, 0xc2, 0x2b, 0xcd, 0xfe, 0x07, 0xf6, 0x4c, 0xe2, 0x1d, 0x9b, 0xd8, 0x82, 0x09, 0x5b, 0x11, 0x10, 0x62, 0x56, 0x89, 0xbd},
- dt2: fp.Elt{0xd9, 0x15, 0x73, 0xf2, 0x96, 0x35, 0x53, 0xb0, 0xe7, 0xa8, 0x0b, 0x93, 0x35, 0x0b, 0x3a, 0x00, 0xf5, 0x18, 0xb1, 0xc3, 0x12, 0x3f, 0x91, 0x17, 0xc1, 0x4c, 0x15, 0x5a, 0x86, 0x92, 0x11, 0xbd, 0x44, 0x40, 0x5a, 0x7b, 0x15, 0x89, 0xba, 0xc1, 0xc1, 0xbc, 0x43, 0x45, 0xe6, 0x52, 0x02, 0x73, 0x0a, 0xd0, 0x2a, 0x19, 0xda, 0x47, 0xa8, 0xff},
- },
- },
-}
-
-// tabVerif contains the odd multiples of P. The entry T[i] = (2i+1)P, where
-// P = phi(G) and G is the generator of the Goldilocks curve, and phi is a
-// 4-degree isogeny.
-var tabVerif = [1 << (omegaFix - 2)]preTwistPointAffine{
- { /* 1P*/
- addYX: fp.Elt{0x65, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2b, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05},
- subYX: fp.Elt{0x64, 0x4a, 0xdd, 0xdf, 0xb4, 0x79, 0x60, 0xc8, 0xa1, 0x70, 0xb4, 0x3a, 0x1e, 0x0c, 0x9b, 0x19, 0xe5, 0x48, 0x3f, 0xd7, 0x44, 0x18, 0x18, 0x14, 0x14, 0x27, 0x45, 0xd0, 0x2d, 0x24, 0xd5, 0x93, 0xc3, 0x74, 0x4c, 0x50, 0x70, 0x43, 0x26, 0x05, 0x08, 0x24, 0xca, 0x78, 0x30, 0xc1, 0x06, 0x8d, 0xd4, 0x86, 0x42, 0xf0, 0x14, 0xde, 0x08, 0x05},
- dt2: fp.Elt{0x1a, 0x33, 0xea, 0x64, 0x45, 0x1c, 0xdf, 0x17, 0x1d, 0x16, 0x34, 0x28, 0xd6, 0x61, 0x19, 0x67, 0x79, 0xb4, 0x13, 0xcf, 0x3e, 0x7c, 0x0e, 0x72, 0xda, 0xf1, 0x5f, 0xda, 0xe6, 0xcf, 0x42, 0xd3, 0xb6, 0x17, 0xc2, 0x68, 0x13, 0x2d, 0xd9, 0x60, 0x3e, 0xae, 0xf0, 0x5b, 0x96, 0xf0, 0xcd, 0xaf, 0xea, 0xb7, 0x0d, 0x59, 0x16, 0xa7, 0xff, 0x55},
- },
- { /* 3P*/
- addYX: fp.Elt{0xd1, 0xe9, 0xa8, 0x33, 0x20, 0x76, 0x18, 0x08, 0x45, 0x2a, 0xc9, 0x67, 0x2a, 0xc3, 0x15, 0x24, 0xf9, 0x74, 0x21, 0x30, 0x99, 0x59, 0x8b, 0xb2, 0xf0, 0xa4, 0x07, 0xe2, 0x6a, 0x36, 0x8d, 0xd9, 0xd2, 0x4a, 0x7f, 0x73, 0x50, 0x39, 0x3d, 0xaa, 0xa7, 0x51, 0x73, 0x0d, 0x2b, 0x8b, 0x96, 0x47, 0xac, 0x3c, 0x5d, 0xaa, 0x39, 0x9c, 0xcf, 0xd5},
- subYX: fp.Elt{0x6b, 0x11, 0x5d, 0x1a, 0xf9, 0x41, 0x9d, 0xc5, 0x30, 0x3e, 0xad, 0x25, 0x2c, 0x04, 0x45, 0xea, 0xcc, 0x67, 0x07, 0x85, 0xe9, 0xda, 0x0e, 0xb5, 0x40, 0xb7, 0x32, 0xb4, 0x49, 0xdd, 0xff, 0xaa, 0xfc, 0xbb, 0x19, 0xca, 0x8b, 0x79, 0x2b, 0x8f, 0x8d, 0x00, 0x33, 0xc2, 0xad, 0xe9, 0xd3, 0x12, 0xa8, 0xaa, 0x87, 0x62, 0xad, 0x2d, 0xff, 0xa4},
- dt2: fp.Elt{0xb0, 0xaf, 0x3b, 0xea, 0xf0, 0x42, 0x0b, 0x5e, 0x88, 0xd3, 0x98, 0x08, 0x87, 0x59, 0x72, 0x0a, 0xc2, 0xdf, 0xcb, 0x7f, 0x59, 0xb5, 0x4c, 0x63, 0x68, 0xe8, 0x41, 0x38, 0x67, 0x4f, 0xe9, 0xc6, 0xb2, 0x6b, 0x08, 0xa7, 0xf7, 0x0e, 0xcd, 0xea, 0xca, 0x3d, 0xaf, 0x8e, 0xda, 0x4b, 0x2e, 0xd2, 0x88, 0x64, 0x8d, 0xc5, 0x5f, 0x76, 0x0f, 0x3d},
- },
- { /* 5P*/
- addYX: fp.Elt{0xe5, 0x65, 0xc9, 0xe2, 0x75, 0xf0, 0x7d, 0x1a, 0xba, 0xa4, 0x40, 0x4b, 0x93, 0x12, 0xa2, 0x80, 0x95, 0x0d, 0x03, 0x93, 0xe8, 0xa5, 0x4d, 0xe2, 0x3d, 0x81, 0xf5, 0xce, 0xd4, 0x2d, 0x25, 0x59, 0x16, 0x5c, 0xe7, 0xda, 0xc7, 0x45, 0xd2, 0x7e, 0x2c, 0x38, 0xd4, 0x37, 0x64, 0xb2, 0xc2, 0x28, 0xc5, 0x72, 0x16, 0x32, 0x45, 0x36, 0x6f, 0x9f},
- subYX: fp.Elt{0x09, 0xf4, 0x7e, 0xbd, 0x89, 0xdb, 0x19, 0x58, 0xe1, 0x08, 0x00, 0x8a, 0xf4, 0x5f, 0x2a, 0x32, 0x40, 0xf0, 0x2c, 0x3f, 0x5d, 0xe4, 0xfc, 0x89, 0x11, 0x24, 0xb4, 0x2f, 0x97, 0xad, 0xac, 0x8f, 0x19, 0xab, 0xfa, 0x12, 0xe5, 0xf9, 0x50, 0x4e, 0x50, 0x6f, 0x32, 0x30, 0x88, 0xa6, 0xe5, 0x48, 0x28, 0xa2, 0x1b, 0x9f, 0xcd, 0xe2, 0x43, 0x38},
- dt2: fp.Elt{0xa9, 0xcc, 0x53, 0x39, 0x86, 0x02, 0x60, 0x75, 0x34, 0x99, 0x57, 0xbd, 0xfc, 0x5a, 0x8e, 0xce, 0x5e, 0x98, 0x22, 0xd0, 0xa5, 0x24, 0xff, 0x90, 0x28, 0x9f, 0x58, 0xf3, 0x39, 0xe9, 0xba, 0x36, 0x23, 0xfb, 0x7f, 0x41, 0xcc, 0x2b, 0x5a, 0x25, 0x3f, 0x4c, 0x2a, 0xf1, 0x52, 0x6f, 0x2f, 0x07, 0xe3, 0x88, 0x81, 0x77, 0xdd, 0x7c, 0x88, 0x82},
- },
- { /* 7P*/
- addYX: fp.Elt{0xf7, 0xee, 0x88, 0xfd, 0x3a, 0xbf, 0x7e, 0x28, 0x39, 0x23, 0x79, 0xe6, 0x5c, 0x56, 0xcb, 0xb5, 0x48, 0x6a, 0x80, 0x6d, 0x37, 0x60, 0x6c, 0x10, 0x35, 0x49, 0x4b, 0x46, 0x60, 0xd4, 0x79, 0xd4, 0x53, 0xd3, 0x67, 0x88, 0xd0, 0x41, 0xd5, 0x43, 0x85, 0xc8, 0x71, 0xe3, 0x1c, 0xb6, 0xda, 0x22, 0x64, 0x8f, 0x80, 0xac, 0xad, 0x7d, 0xd5, 0x82},
- subYX: fp.Elt{0x92, 0x40, 0xc1, 0x83, 0x21, 0x9b, 0xd5, 0x7d, 0x3f, 0x29, 0xb6, 0x26, 0xef, 0x12, 0xb9, 0x27, 0x39, 0x42, 0x37, 0x97, 0x09, 0x9a, 0x08, 0xe1, 0x68, 0xb6, 0x7a, 0x3f, 0x9f, 0x45, 0xf8, 0x37, 0x19, 0x83, 0x97, 0xe6, 0x73, 0x30, 0x32, 0x35, 0xcf, 0xae, 0x5c, 0x12, 0x68, 0xdf, 0x6e, 0x2b, 0xde, 0x83, 0xa0, 0x44, 0x74, 0x2e, 0x4a, 0xe9},
- dt2: fp.Elt{0xcb, 0x22, 0x0a, 0xda, 0x6b, 0xc1, 0x8a, 0x29, 0xa1, 0xac, 0x8b, 0x5b, 0x8b, 0x32, 0x20, 0xf2, 0x21, 0xae, 0x0c, 0x43, 0xc4, 0xd7, 0x19, 0x37, 0x3d, 0x79, 0x25, 0x98, 0x6c, 0x9c, 0x22, 0x31, 0x2a, 0x55, 0x9f, 0xda, 0x5e, 0xa8, 0x13, 0xdb, 0x8e, 0x2e, 0x16, 0x39, 0xf4, 0x91, 0x6f, 0xec, 0x71, 0x71, 0xc9, 0x10, 0xf2, 0xa4, 0x8f, 0x11},
- },
- { /* 9P*/
- addYX: fp.Elt{0x85, 0xdd, 0x37, 0x62, 0x74, 0x8e, 0x33, 0x5b, 0x25, 0x12, 0x1b, 0xe7, 0xdf, 0x47, 0xe5, 0x12, 0xfd, 0x3a, 0x3a, 0xf5, 0x5d, 0x4c, 0xa2, 0x29, 0x3c, 0x5c, 0x2f, 0xee, 0x18, 0x19, 0x0a, 0x2b, 0xef, 0x67, 0x50, 0x7a, 0x0d, 0x29, 0xae, 0x55, 0x82, 0xcd, 0xd6, 0x41, 0x90, 0xb4, 0x13, 0x31, 0x5d, 0x11, 0xb8, 0xaa, 0x12, 0x86, 0x08, 0xac},
- subYX: fp.Elt{0xcc, 0x37, 0x8d, 0x83, 0x5f, 0xfd, 0xde, 0xd5, 0xf7, 0xf1, 0xae, 0x0a, 0xa7, 0x0b, 0xeb, 0x6d, 0x19, 0x8a, 0xb6, 0x1a, 0x59, 0xd8, 0xff, 0x3c, 0xbc, 0xbc, 0xef, 0x9c, 0xda, 0x7b, 0x75, 0x12, 0xaf, 0x80, 0x8f, 0x2c, 0x3c, 0xaa, 0x0b, 0x17, 0x86, 0x36, 0x78, 0x18, 0xc8, 0x8a, 0xf6, 0xb8, 0x2c, 0x2f, 0x57, 0x2c, 0x62, 0x57, 0xf6, 0x90},
- dt2: fp.Elt{0x83, 0xbc, 0xa2, 0x07, 0xa5, 0x38, 0x96, 0xea, 0xfe, 0x11, 0x46, 0x1d, 0x3b, 0xcd, 0x42, 0xc5, 0xee, 0x67, 0x04, 0x72, 0x08, 0xd8, 0xd9, 0x96, 0x07, 0xf7, 0xac, 0xc3, 0x64, 0xf1, 0x98, 0x2c, 0x55, 0xd7, 0x7d, 0xc8, 0x6c, 0xbd, 0x2c, 0xff, 0x15, 0xd6, 0x6e, 0xb8, 0x17, 0x8e, 0xa8, 0x27, 0x66, 0xb1, 0x73, 0x79, 0x96, 0xff, 0x29, 0x10},
- },
- { /* 11P*/
- addYX: fp.Elt{0x76, 0xcb, 0x9b, 0x0c, 0x5b, 0xfe, 0xe1, 0x2a, 0xdd, 0x6f, 0x6c, 0xdd, 0x6f, 0xb4, 0xc0, 0xc2, 0x1b, 0x4b, 0x38, 0xe8, 0x66, 0x8c, 0x1e, 0x31, 0x63, 0xb9, 0x94, 0xcd, 0xc3, 0x8c, 0x44, 0x25, 0x7b, 0xd5, 0x39, 0x80, 0xfc, 0x01, 0xaa, 0xf7, 0x2a, 0x61, 0x8a, 0x25, 0xd2, 0x5f, 0xc5, 0x66, 0x38, 0xa4, 0x17, 0xcf, 0x3e, 0x11, 0x0f, 0xa3},
- subYX: fp.Elt{0xe0, 0xb6, 0xd1, 0x9c, 0x71, 0x49, 0x2e, 0x7b, 0xde, 0x00, 0xda, 0x6b, 0xf1, 0xec, 0xe6, 0x7a, 0x15, 0x38, 0x71, 0xe9, 0x7b, 0xdb, 0xf8, 0x98, 0xc0, 0x91, 0x2e, 0x53, 0xee, 0x92, 0x87, 0x25, 0xc9, 0xb0, 0xbb, 0x33, 0x15, 0x46, 0x7f, 0xfd, 0x4f, 0x8b, 0x77, 0x05, 0x96, 0xb6, 0xe2, 0x08, 0xdb, 0x0d, 0x09, 0xee, 0x5b, 0xd1, 0x2a, 0x63},
- dt2: fp.Elt{0x8f, 0x7b, 0x57, 0x8c, 0xbf, 0x06, 0x0d, 0x43, 0x21, 0x92, 0x94, 0x2d, 0x6a, 0x38, 0x07, 0x0f, 0xa0, 0xf1, 0xe3, 0xd8, 0x2a, 0xbf, 0x46, 0xc6, 0x9e, 0x1f, 0x8f, 0x2b, 0x46, 0x84, 0x0b, 0x74, 0xed, 0xff, 0xf8, 0xa5, 0x94, 0xae, 0xf1, 0x67, 0xb1, 0x9b, 0xdd, 0x4a, 0xd0, 0xdb, 0xc2, 0xb5, 0x58, 0x49, 0x0c, 0xa9, 0x1d, 0x7d, 0xa9, 0xd3},
- },
- { /* 13P*/
- addYX: fp.Elt{0x73, 0x84, 0x2e, 0x31, 0x1f, 0xdc, 0xed, 0x9f, 0x74, 0xfa, 0xe0, 0x35, 0xb1, 0x85, 0x6a, 0x8d, 0x86, 0xd0, 0xff, 0xd6, 0x08, 0x43, 0x73, 0x1a, 0xd5, 0xf8, 0x43, 0xd4, 0xb3, 0xe5, 0x3f, 0xa8, 0x84, 0x17, 0x59, 0x65, 0x4e, 0xe6, 0xee, 0x54, 0x9c, 0xda, 0x5e, 0x7e, 0x98, 0x29, 0x6d, 0x73, 0x34, 0x1f, 0x99, 0x80, 0x54, 0x54, 0x81, 0x0b},
- subYX: fp.Elt{0xb1, 0xe5, 0xbb, 0x80, 0x22, 0x9c, 0x81, 0x6d, 0xaf, 0x27, 0x65, 0x6f, 0x7e, 0x9c, 0xb6, 0x8d, 0x35, 0x5c, 0x2e, 0x20, 0x48, 0x7a, 0x28, 0xf0, 0x97, 0xfe, 0xb7, 0x71, 0xce, 0xd6, 0xad, 0x3a, 0x81, 0xf6, 0x74, 0x5e, 0xf3, 0xfd, 0x1b, 0xd4, 0x1e, 0x7c, 0xc2, 0xb7, 0xc8, 0xa6, 0xc9, 0x89, 0x03, 0x47, 0xec, 0x24, 0xd6, 0x0e, 0xec, 0x9c},
- dt2: fp.Elt{0x91, 0x0a, 0x43, 0x34, 0x20, 0xc2, 0x64, 0xf7, 0x4e, 0x48, 0xc8, 0xd2, 0x95, 0x83, 0xd1, 0xa4, 0xfb, 0x4e, 0x41, 0x3b, 0x0d, 0xd5, 0x07, 0xd9, 0xf1, 0x13, 0x16, 0x78, 0x54, 0x57, 0xd0, 0xf1, 0x4f, 0x20, 0xac, 0xcf, 0x9c, 0x3b, 0x33, 0x0b, 0x99, 0x54, 0xc3, 0x7f, 0x3e, 0x57, 0x26, 0x86, 0xd5, 0xa5, 0x2b, 0x8d, 0xe3, 0x19, 0x36, 0xf7},
- },
- { /* 15P*/
- addYX: fp.Elt{0x23, 0x69, 0x47, 0x14, 0xf9, 0x9a, 0x50, 0xff, 0x64, 0xd1, 0x50, 0x35, 0xc3, 0x11, 0xd3, 0x19, 0xcf, 0x87, 0xda, 0x30, 0x0b, 0x50, 0xda, 0xc0, 0xe0, 0x25, 0x00, 0xe5, 0x68, 0x93, 0x04, 0xc2, 0xaf, 0xbd, 0x2f, 0x36, 0x5f, 0x47, 0x96, 0x10, 0xa8, 0xbd, 0xe4, 0x88, 0xac, 0x80, 0x52, 0x61, 0x73, 0xe9, 0x63, 0xdd, 0x99, 0xad, 0x20, 0x5b},
- subYX: fp.Elt{0x1b, 0x5e, 0xa2, 0x2a, 0x25, 0x0f, 0x86, 0xc0, 0xb1, 0x2e, 0x0c, 0x13, 0x40, 0x8d, 0xf0, 0xe6, 0x00, 0x55, 0x08, 0xc5, 0x7d, 0xf4, 0xc9, 0x31, 0x25, 0x3a, 0x99, 0x69, 0xdd, 0x67, 0x63, 0x9a, 0xd6, 0x89, 0x2e, 0xa1, 0x19, 0xca, 0x2c, 0xd9, 0x59, 0x5f, 0x5d, 0xc3, 0x6e, 0x62, 0x36, 0x12, 0x59, 0x15, 0xe1, 0xdc, 0xa4, 0xad, 0xc9, 0xd0},
- dt2: fp.Elt{0xbc, 0xea, 0xfc, 0xaf, 0x66, 0x23, 0xb7, 0x39, 0x6b, 0x2a, 0x96, 0xa8, 0x54, 0x43, 0xe9, 0xaa, 0x32, 0x40, 0x63, 0x92, 0x5e, 0xdf, 0x35, 0xc2, 0x9f, 0x24, 0x0c, 0xed, 0xfc, 0xde, 0x73, 0x8f, 0xa7, 0xd5, 0xa3, 0x2b, 0x18, 0x1f, 0xb0, 0xf8, 0xeb, 0x55, 0xd9, 0xc3, 0xfd, 0x28, 0x7c, 0x4f, 0xce, 0x0d, 0xf7, 0xae, 0xc2, 0x83, 0xc3, 0x78},
- },
- { /* 17P*/
- addYX: fp.Elt{0x71, 0xe6, 0x60, 0x93, 0x37, 0xdb, 0x01, 0xa5, 0x4c, 0xba, 0xe8, 0x8e, 0xd5, 0xf9, 0xd3, 0x98, 0xe5, 0xeb, 0xab, 0x3a, 0x15, 0x8b, 0x35, 0x60, 0xbe, 0xe5, 0x9c, 0x2d, 0x10, 0x9b, 0x2e, 0xcf, 0x65, 0x64, 0xea, 0x8f, 0x72, 0xce, 0xf5, 0x18, 0xe5, 0xe2, 0xf0, 0x0e, 0xae, 0x04, 0xec, 0xa0, 0x20, 0x65, 0x63, 0x07, 0xb1, 0x9f, 0x03, 0x97},
- subYX: fp.Elt{0x9e, 0x41, 0x64, 0x30, 0x95, 0x7f, 0x3a, 0x89, 0x7b, 0x0a, 0x79, 0x59, 0x23, 0x9a, 0x3b, 0xfe, 0xa4, 0x13, 0x08, 0xb2, 0x2e, 0x04, 0x50, 0x10, 0x30, 0xcd, 0x2e, 0xa4, 0x91, 0x71, 0x50, 0x36, 0x4a, 0x02, 0xf4, 0x8d, 0xa3, 0x36, 0x1b, 0xf4, 0x52, 0xba, 0x15, 0x04, 0x8b, 0x80, 0x25, 0xd9, 0xae, 0x67, 0x20, 0xd9, 0x88, 0x8f, 0x97, 0xa6},
- dt2: fp.Elt{0xb5, 0xe7, 0x46, 0xbd, 0x55, 0x23, 0xa0, 0x68, 0xc0, 0x12, 0xd9, 0xf1, 0x0a, 0x75, 0xe2, 0xda, 0xf4, 0x6b, 0xca, 0x14, 0xe4, 0x9f, 0x0f, 0xb5, 0x3c, 0xa6, 0xa5, 0xa2, 0x63, 0x94, 0xd1, 0x1c, 0x39, 0x58, 0x57, 0x02, 0x27, 0x98, 0xb6, 0x47, 0xc6, 0x61, 0x4b, 0x5c, 0xab, 0x6f, 0x2d, 0xab, 0xe3, 0xc1, 0x69, 0xf9, 0x12, 0xb0, 0xc8, 0xd5},
- },
- { /* 19P*/
- addYX: fp.Elt{0x19, 0x7d, 0xd5, 0xac, 0x79, 0xa2, 0x82, 0x9b, 0x28, 0x31, 0x22, 0xc0, 0x73, 0x02, 0x76, 0x17, 0x10, 0x70, 0x79, 0x57, 0xc9, 0x84, 0x62, 0x8e, 0x04, 0x04, 0x61, 0x67, 0x08, 0x48, 0xb4, 0x4b, 0xde, 0x53, 0x8c, 0xff, 0x36, 0x1b, 0x62, 0x86, 0x5d, 0xe1, 0x9b, 0xb1, 0xe5, 0xe8, 0x44, 0x64, 0xa1, 0x68, 0x3f, 0xa8, 0x45, 0x52, 0x91, 0xed},
- subYX: fp.Elt{0x42, 0x1a, 0x36, 0x1f, 0x90, 0x15, 0x24, 0x8d, 0x24, 0x80, 0xe6, 0xfe, 0x1e, 0xf0, 0xad, 0xaf, 0x6a, 0x93, 0xf0, 0xa6, 0x0d, 0x5d, 0xea, 0xf6, 0x62, 0x96, 0x7a, 0x05, 0x76, 0x85, 0x74, 0x32, 0xc7, 0xc8, 0x64, 0x53, 0x62, 0xe7, 0x54, 0x84, 0xe0, 0x40, 0x66, 0x19, 0x70, 0x40, 0x95, 0x35, 0x68, 0x64, 0x43, 0xcd, 0xba, 0x29, 0x32, 0xa8},
- dt2: fp.Elt{0x3e, 0xf6, 0xd6, 0xe4, 0x99, 0xeb, 0x20, 0x66, 0x08, 0x2e, 0x26, 0x64, 0xd7, 0x76, 0xf3, 0xb4, 0xc5, 0xa4, 0x35, 0x92, 0xd2, 0x99, 0x70, 0x5a, 0x1a, 0xe9, 0xe9, 0x3d, 0x3b, 0xe1, 0xcd, 0x0e, 0xee, 0x24, 0x13, 0x03, 0x22, 0xd6, 0xd6, 0x72, 0x08, 0x2b, 0xde, 0xfd, 0x93, 0xed, 0x0c, 0x7f, 0x5e, 0x31, 0x22, 0x4d, 0x80, 0x78, 0xc0, 0x48},
- },
- { /* 21P*/
- addYX: fp.Elt{0x8f, 0x72, 0xd2, 0x9e, 0xc4, 0xcd, 0x2c, 0xbf, 0xa8, 0xd3, 0x24, 0x62, 0x28, 0xee, 0x39, 0x0a, 0x19, 0x3a, 0x58, 0xff, 0x21, 0x2e, 0x69, 0x6c, 0x6e, 0x18, 0xd0, 0xcd, 0x61, 0xc1, 0x18, 0x02, 0x5a, 0xe9, 0xe3, 0xef, 0x1f, 0x8e, 0x10, 0xe8, 0x90, 0x2b, 0x48, 0xcd, 0xee, 0x38, 0xbd, 0x3a, 0xca, 0xbc, 0x2d, 0xe2, 0x3a, 0x03, 0x71, 0x02},
- subYX: fp.Elt{0xf8, 0xa4, 0x32, 0x26, 0x66, 0xaf, 0x3b, 0x53, 0xe7, 0xb0, 0x91, 0x92, 0xf5, 0x3c, 0x74, 0xce, 0xf2, 0xdd, 0x68, 0xa9, 0xf4, 0xcd, 0x5f, 0x60, 0xab, 0x71, 0xdf, 0xcd, 0x5c, 0x5d, 0x51, 0x72, 0x3a, 0x96, 0xea, 0xd6, 0xde, 0x54, 0x8e, 0x55, 0x4c, 0x08, 0x4c, 0x60, 0xdd, 0x34, 0xa9, 0x6f, 0xf3, 0x04, 0x02, 0xa8, 0xa6, 0x4e, 0x4d, 0x62},
- dt2: fp.Elt{0x76, 0x4a, 0xae, 0x38, 0x62, 0x69, 0x72, 0xdc, 0xe8, 0x43, 0xbe, 0x1d, 0x61, 0xde, 0x31, 0xc3, 0x42, 0x8f, 0x33, 0x9d, 0xca, 0xc7, 0x9c, 0xec, 0x6a, 0xe2, 0xaa, 0x01, 0x49, 0x78, 0x8d, 0x72, 0x4f, 0x38, 0xea, 0x52, 0xc2, 0xd3, 0xc9, 0x39, 0x71, 0xba, 0xb9, 0x09, 0x9b, 0xa3, 0x7f, 0x45, 0x43, 0x65, 0x36, 0x29, 0xca, 0xe7, 0x5c, 0x5f},
- },
- { /* 23P*/
- addYX: fp.Elt{0x89, 0x42, 0x35, 0x48, 0x6d, 0x74, 0xe5, 0x1f, 0xc3, 0xdd, 0x28, 0x5b, 0x84, 0x41, 0x33, 0x9f, 0x42, 0xf3, 0x1d, 0x5d, 0x15, 0x6d, 0x76, 0x33, 0x36, 0xaf, 0xe9, 0xdd, 0xfa, 0x63, 0x4f, 0x7a, 0x9c, 0xeb, 0x1c, 0x4f, 0x34, 0x65, 0x07, 0x54, 0xbb, 0x4c, 0x8b, 0x62, 0x9d, 0xd0, 0x06, 0x99, 0xb3, 0xe9, 0xda, 0x85, 0x19, 0xb0, 0x3d, 0x3c},
- subYX: fp.Elt{0xbb, 0x99, 0xf6, 0xbf, 0xaf, 0x2c, 0x22, 0x0d, 0x7a, 0xaa, 0x98, 0x6f, 0x01, 0x82, 0x99, 0xcf, 0x88, 0xbd, 0x0e, 0x3a, 0x89, 0xe0, 0x9c, 0x8c, 0x17, 0x20, 0xc4, 0xe0, 0xcf, 0x43, 0x7a, 0xef, 0x0d, 0x9f, 0x87, 0xd4, 0xfb, 0xf2, 0x96, 0xb8, 0x03, 0xe8, 0xcb, 0x5c, 0xec, 0x65, 0x5f, 0x49, 0xa4, 0x7c, 0x85, 0xb4, 0xf6, 0xc7, 0xdb, 0xa3},
- dt2: fp.Elt{0x11, 0xf3, 0x32, 0xa3, 0xa7, 0xb2, 0x7d, 0x51, 0x82, 0x44, 0xeb, 0xa2, 0x7d, 0x72, 0xcb, 0xc6, 0xf6, 0xc7, 0xb2, 0x38, 0x0e, 0x0f, 0x4f, 0x29, 0x00, 0xe4, 0x5b, 0x94, 0x46, 0x86, 0x66, 0xa1, 0x83, 0xb3, 0xeb, 0x15, 0xb6, 0x31, 0x50, 0x28, 0xeb, 0xed, 0x0d, 0x32, 0x39, 0xe9, 0x23, 0x81, 0x99, 0x3e, 0xff, 0x17, 0x4c, 0x11, 0x43, 0xd1},
- },
- { /* 25P*/
- addYX: fp.Elt{0xce, 0xe7, 0xf8, 0x94, 0x8f, 0x96, 0xf8, 0x96, 0xe6, 0x72, 0x20, 0x44, 0x2c, 0xa7, 0xfc, 0xba, 0xc8, 0xe1, 0xbb, 0xc9, 0x16, 0x85, 0xcd, 0x0b, 0xe5, 0xb5, 0x5a, 0x7f, 0x51, 0x43, 0x63, 0x8b, 0x23, 0x8e, 0x1d, 0x31, 0xff, 0x46, 0x02, 0x66, 0xcc, 0x9e, 0x4d, 0xa2, 0xca, 0xe2, 0xc7, 0xfd, 0x22, 0xb1, 0xdb, 0xdf, 0x6f, 0xe6, 0xa5, 0x82},
- subYX: fp.Elt{0xd0, 0xf5, 0x65, 0x40, 0xec, 0x8e, 0x65, 0x42, 0x78, 0xc1, 0x65, 0xe4, 0x10, 0xc8, 0x0b, 0x1b, 0xdd, 0x96, 0x68, 0xce, 0xee, 0x45, 0x55, 0xd8, 0x6e, 0xd3, 0xe6, 0x77, 0x19, 0xae, 0xc2, 0x8d, 0x8d, 0x3e, 0x14, 0x3f, 0x6d, 0x00, 0x2f, 0x9b, 0xd1, 0x26, 0x60, 0x28, 0x0f, 0x3a, 0x47, 0xb3, 0xe6, 0x68, 0x28, 0x24, 0x25, 0xca, 0xc8, 0x06},
- dt2: fp.Elt{0x54, 0xbb, 0x60, 0x92, 0xdb, 0x8f, 0x0f, 0x38, 0xe0, 0xe6, 0xe4, 0xc9, 0xcc, 0x14, 0x62, 0x01, 0xc4, 0x2b, 0x0f, 0xcf, 0xed, 0x7d, 0x8e, 0xa4, 0xd9, 0x73, 0x0b, 0xba, 0x0c, 0xaf, 0x0c, 0xf9, 0xe2, 0xeb, 0x29, 0x2a, 0x53, 0xdf, 0x2c, 0x5a, 0xfa, 0x8f, 0xc1, 0x01, 0xd7, 0xb1, 0x45, 0x73, 0x92, 0x32, 0x83, 0x85, 0x12, 0x74, 0x89, 0x44},
- },
- { /* 27P*/
- addYX: fp.Elt{0x0b, 0x73, 0x3c, 0xc2, 0xb1, 0x2e, 0xe1, 0xa7, 0xf5, 0xc9, 0x7a, 0xfb, 0x3d, 0x2d, 0xac, 0x59, 0xdb, 0xfa, 0x36, 0x11, 0xd1, 0x13, 0x04, 0x51, 0x1d, 0xab, 0x9b, 0x6b, 0x93, 0xfe, 0xda, 0xb0, 0x8e, 0xb4, 0x79, 0x11, 0x21, 0x0f, 0x65, 0xb9, 0xbb, 0x79, 0x96, 0x2a, 0xfd, 0x30, 0xe0, 0xb4, 0x2d, 0x9a, 0x55, 0x25, 0x5d, 0xd4, 0xad, 0x2a},
- subYX: fp.Elt{0x9e, 0xc5, 0x04, 0xfe, 0xec, 0x3c, 0x64, 0x1c, 0xed, 0x95, 0xed, 0xae, 0xaf, 0x5c, 0x6e, 0x08, 0x9e, 0x02, 0x29, 0x59, 0x7e, 0x5f, 0xc4, 0x9a, 0xd5, 0x32, 0x72, 0x86, 0xe1, 0x4e, 0x3c, 0xce, 0x99, 0x69, 0x3b, 0xc4, 0xdd, 0x4d, 0xb7, 0xbb, 0xda, 0x3b, 0x1a, 0x99, 0xaa, 0x62, 0x15, 0xc1, 0xf0, 0xb6, 0x6c, 0xec, 0x56, 0xc1, 0xff, 0x0c},
- dt2: fp.Elt{0x2f, 0xf1, 0x3f, 0x7a, 0x2d, 0x56, 0x19, 0x7f, 0xea, 0xbe, 0x59, 0x2e, 0x13, 0x67, 0x81, 0xfb, 0xdb, 0xc8, 0xa3, 0x1d, 0xd5, 0xe9, 0x13, 0x8b, 0x29, 0xdf, 0xcf, 0x9f, 0xe7, 0xd9, 0x0b, 0x70, 0xd3, 0x15, 0x57, 0x4a, 0xe9, 0x50, 0x12, 0x1b, 0x81, 0x4b, 0x98, 0x98, 0xa8, 0x31, 0x1d, 0x27, 0x47, 0x38, 0xed, 0x57, 0x99, 0x26, 0xb2, 0xee},
- },
- { /* 29P*/
- addYX: fp.Elt{0x1c, 0xb2, 0xb2, 0x67, 0x3b, 0x8b, 0x3d, 0x5a, 0x30, 0x7e, 0x38, 0x7e, 0x3c, 0x3d, 0x28, 0x56, 0x59, 0xd8, 0x87, 0x53, 0x8b, 0xe6, 0x6c, 0x5d, 0xe5, 0x0a, 0x33, 0x10, 0xce, 0xa2, 0x17, 0x0d, 0xe8, 0x76, 0xee, 0x68, 0xa8, 0x72, 0x54, 0xbd, 0xa6, 0x24, 0x94, 0x6e, 0x77, 0xc7, 0x53, 0xb7, 0x89, 0x1c, 0x7a, 0xe9, 0x78, 0x9a, 0x74, 0x5f},
- subYX: fp.Elt{0x76, 0x96, 0x1c, 0xcf, 0x08, 0x55, 0xd8, 0x1e, 0x0d, 0xa3, 0x59, 0x95, 0x32, 0xf4, 0xc2, 0x8e, 0x84, 0x5e, 0x4b, 0x04, 0xda, 0x71, 0xc9, 0x78, 0x52, 0xde, 0x14, 0xb4, 0x31, 0xf4, 0xd4, 0xb8, 0x58, 0xc5, 0x20, 0xe8, 0xdd, 0x15, 0xb5, 0xee, 0xea, 0x61, 0xe0, 0xf5, 0xd6, 0xae, 0x55, 0x59, 0x05, 0x3e, 0xaf, 0x74, 0xac, 0x1f, 0x17, 0x82},
- dt2: fp.Elt{0x59, 0x24, 0xcd, 0xfc, 0x11, 0x7e, 0x85, 0x18, 0x3d, 0x69, 0xf7, 0x71, 0x31, 0x66, 0x98, 0x42, 0x95, 0x00, 0x8c, 0xb2, 0xae, 0x39, 0x7e, 0x85, 0xd6, 0xb0, 0x02, 0xec, 0xce, 0xfc, 0x25, 0xb2, 0xe3, 0x99, 0x8e, 0x5b, 0x61, 0x96, 0x2e, 0x6d, 0x96, 0x57, 0x71, 0xa5, 0x93, 0x41, 0x0e, 0x6f, 0xfd, 0x0a, 0xbf, 0xa9, 0xf7, 0x56, 0xa9, 0x3e},
- },
- { /* 31P*/
- addYX: fp.Elt{0xa2, 0x2e, 0x0c, 0x17, 0x4d, 0xcc, 0x85, 0x2c, 0x18, 0xa0, 0xd2, 0x08, 0xba, 0x11, 0xfa, 0x47, 0x71, 0x86, 0xaf, 0x36, 0x6a, 0xd7, 0xfe, 0xb9, 0xb0, 0x2f, 0x89, 0x98, 0x49, 0x69, 0xf8, 0x6a, 0xad, 0x27, 0x5e, 0x0a, 0x22, 0x60, 0x5e, 0x5d, 0xca, 0x06, 0x51, 0x27, 0x99, 0x29, 0x85, 0x68, 0x98, 0xe1, 0xc4, 0x21, 0x50, 0xa0, 0xe9, 0xc1},
- subYX: fp.Elt{0x4d, 0x70, 0xee, 0x91, 0x92, 0x3f, 0xb7, 0xd3, 0x1d, 0xdb, 0x8d, 0x6e, 0x16, 0xf5, 0x65, 0x7d, 0x5f, 0xb5, 0x6c, 0x59, 0x26, 0x70, 0x4b, 0xf2, 0xfc, 0xe7, 0xdf, 0x86, 0xfe, 0xa5, 0xa7, 0xa6, 0x5d, 0xfb, 0x06, 0xe9, 0xf9, 0xcc, 0xc0, 0x37, 0xcc, 0xd8, 0x09, 0x04, 0xd2, 0xa5, 0x1d, 0xd7, 0xb7, 0xce, 0x92, 0xac, 0x3c, 0xad, 0xfb, 0xae},
- dt2: fp.Elt{0x17, 0xa3, 0x9a, 0xc7, 0x86, 0x2a, 0x51, 0xf7, 0x96, 0x79, 0x49, 0x22, 0x2e, 0x5a, 0x01, 0x5c, 0xb5, 0x95, 0xd4, 0xe8, 0xcb, 0x00, 0xca, 0x2d, 0x55, 0xb6, 0x34, 0x36, 0x0b, 0x65, 0x46, 0xf0, 0x49, 0xfc, 0x87, 0x86, 0xe5, 0xc3, 0x15, 0xdb, 0x32, 0xcd, 0xf2, 0xd3, 0x82, 0x4c, 0xe6, 0x61, 0x8a, 0xaf, 0xd4, 0x9e, 0x0f, 0x5a, 0xf2, 0x81},
- },
- { /* 33P*/
- addYX: fp.Elt{0x88, 0x10, 0xc0, 0xcb, 0xf5, 0x77, 0xae, 0xa5, 0xbe, 0xf6, 0xcd, 0x2e, 0x8b, 0x7e, 0xbd, 0x79, 0x62, 0x4a, 0xeb, 0x69, 0xc3, 0x28, 0xaa, 0x72, 0x87, 0xa9, 0x25, 0x87, 0x46, 0xea, 0x0e, 0x62, 0xa3, 0x6a, 0x1a, 0xe2, 0xba, 0xdc, 0x81, 0x10, 0x33, 0x01, 0xf6, 0x16, 0x89, 0x80, 0xc6, 0xcd, 0xdb, 0xdc, 0xba, 0x0e, 0x09, 0x4a, 0x35, 0x4a},
- subYX: fp.Elt{0x86, 0xb2, 0x2b, 0xd0, 0xb8, 0x4a, 0x6d, 0x66, 0x7b, 0x32, 0xdf, 0x3b, 0x1a, 0x19, 0x1f, 0x63, 0xee, 0x1f, 0x3d, 0x1c, 0x5c, 0x14, 0x60, 0x5b, 0x72, 0x49, 0x07, 0xb1, 0x0d, 0x72, 0xc6, 0x35, 0xf0, 0xbc, 0x5e, 0xda, 0x80, 0x6b, 0x64, 0x5b, 0xe5, 0x34, 0x54, 0x39, 0xdd, 0xe6, 0x3c, 0xcb, 0xe5, 0x29, 0x32, 0x06, 0xc6, 0xb1, 0x96, 0x34},
- dt2: fp.Elt{0x85, 0x86, 0xf5, 0x84, 0x86, 0xe6, 0x77, 0x8a, 0x71, 0x85, 0x0c, 0x4f, 0x81, 0x5b, 0x29, 0x06, 0xb5, 0x2e, 0x26, 0x71, 0x07, 0x78, 0x07, 0xae, 0xbc, 0x95, 0x46, 0xc3, 0x65, 0xac, 0xe3, 0x76, 0x51, 0x7d, 0xd4, 0x85, 0x31, 0xe3, 0x43, 0xf3, 0x1b, 0x7c, 0xf7, 0x6b, 0x2c, 0xf8, 0x1c, 0xbb, 0x8d, 0xca, 0xab, 0x4b, 0xba, 0x7f, 0xa4, 0xe2},
- },
- { /* 35P*/
- addYX: fp.Elt{0x1a, 0xee, 0xe7, 0xa4, 0x8a, 0x9d, 0x53, 0x80, 0xc6, 0xb8, 0x4e, 0xdc, 0x89, 0xe0, 0xc4, 0x2b, 0x60, 0x52, 0x6f, 0xec, 0x81, 0xd2, 0x55, 0x6b, 0x1b, 0x6f, 0x17, 0x67, 0x8e, 0x42, 0x26, 0x4c, 0x65, 0x23, 0x29, 0xc6, 0x7b, 0xcd, 0x9f, 0xad, 0x4b, 0x42, 0xd3, 0x0c, 0x75, 0xc3, 0x8a, 0xf5, 0xbe, 0x9e, 0x55, 0xf7, 0x47, 0x5d, 0xbd, 0x3a},
- subYX: fp.Elt{0x0d, 0xa8, 0x3b, 0xf9, 0xc7, 0x7e, 0xc6, 0x86, 0x94, 0xc0, 0x01, 0xff, 0x27, 0xce, 0x43, 0xac, 0xe5, 0xe1, 0xd2, 0x8d, 0xc1, 0x22, 0x31, 0xbe, 0xe1, 0xaf, 0xf9, 0x4a, 0x78, 0xa1, 0x0c, 0xaa, 0xd4, 0x80, 0xe4, 0x09, 0x8d, 0xfb, 0x1d, 0x52, 0xc8, 0x60, 0x2d, 0xf2, 0xa2, 0x89, 0x02, 0x56, 0x3d, 0x56, 0x27, 0x85, 0xc7, 0xf0, 0x2b, 0x9a},
- dt2: fp.Elt{0x62, 0x7c, 0xc7, 0x6b, 0x2c, 0x9d, 0x0a, 0x7c, 0xe5, 0x50, 0x3c, 0xe6, 0x87, 0x1c, 0x82, 0x30, 0x67, 0x3c, 0x39, 0xb6, 0xa0, 0x31, 0xfb, 0x03, 0x7b, 0xa1, 0x58, 0xdf, 0x12, 0x76, 0x5d, 0x5d, 0x0a, 0x8f, 0x9b, 0x37, 0x32, 0xc3, 0x60, 0x33, 0xea, 0x9f, 0x0a, 0x99, 0xfa, 0x20, 0xd0, 0x33, 0x21, 0xc3, 0x94, 0xd4, 0x86, 0x49, 0x7c, 0x4e},
- },
- { /* 37P*/
- addYX: fp.Elt{0xc7, 0x0c, 0x71, 0xfe, 0x55, 0xd1, 0x95, 0x8f, 0x43, 0xbb, 0x6b, 0x74, 0x30, 0xbd, 0xe8, 0x6f, 0x1c, 0x1b, 0x06, 0x62, 0xf5, 0xfc, 0x65, 0xa0, 0xeb, 0x81, 0x12, 0xc9, 0x64, 0x66, 0x61, 0xde, 0xf3, 0x6d, 0xd4, 0xae, 0x8e, 0xb1, 0x72, 0xe0, 0xcd, 0x37, 0x01, 0x28, 0x52, 0xd7, 0x39, 0x46, 0x0c, 0x55, 0xcf, 0x47, 0x70, 0xef, 0xa1, 0x17},
- subYX: fp.Elt{0x8d, 0x58, 0xde, 0x83, 0x88, 0x16, 0x0e, 0x12, 0x42, 0x03, 0x50, 0x60, 0x4b, 0xdf, 0xbf, 0x95, 0xcc, 0x7d, 0x18, 0x17, 0x7e, 0x31, 0x5d, 0x8a, 0x66, 0xc1, 0xcf, 0x14, 0xea, 0xf4, 0xf4, 0xe5, 0x63, 0x2d, 0x32, 0x86, 0x9b, 0xed, 0x1f, 0x4f, 0x03, 0xaf, 0x33, 0x92, 0xcb, 0xaf, 0x9c, 0x05, 0x0d, 0x47, 0x1b, 0x42, 0xba, 0x13, 0x22, 0x98},
- dt2: fp.Elt{0xb5, 0x48, 0xeb, 0x7d, 0x3d, 0x10, 0x9f, 0x59, 0xde, 0xf8, 0x1c, 0x4f, 0x7d, 0x9d, 0x40, 0x4d, 0x9e, 0x13, 0x24, 0xb5, 0x21, 0x09, 0xb7, 0xee, 0x98, 0x5c, 0x56, 0xbc, 0x5e, 0x2b, 0x78, 0x38, 0x06, 0xac, 0xe3, 0xe0, 0xfa, 0x2e, 0xde, 0x4f, 0xd2, 0xb3, 0xfb, 0x2d, 0x71, 0x84, 0xd1, 0x9d, 0x12, 0x5b, 0x35, 0xc8, 0x03, 0x68, 0x67, 0xc7},
- },
- { /* 39P*/
- addYX: fp.Elt{0xb6, 0x65, 0xfb, 0xa7, 0x06, 0x35, 0xbb, 0xe0, 0x31, 0x8d, 0x91, 0x40, 0x98, 0xab, 0x30, 0xe4, 0xca, 0x12, 0x59, 0x89, 0xed, 0x65, 0x5d, 0x7f, 0xae, 0x69, 0xa0, 0xa4, 0xfa, 0x78, 0xb4, 0xf7, 0xed, 0xae, 0x86, 0x78, 0x79, 0x64, 0x24, 0xa6, 0xd4, 0xe1, 0xf6, 0xd3, 0xa0, 0x89, 0xba, 0x20, 0xf4, 0x54, 0x0d, 0x8f, 0xdb, 0x1a, 0x79, 0xdb},
- subYX: fp.Elt{0xe1, 0x82, 0x0c, 0x4d, 0xde, 0x9f, 0x40, 0xf0, 0xc1, 0xbd, 0x8b, 0xd3, 0x24, 0x03, 0xcd, 0xf2, 0x92, 0x7d, 0xe2, 0x68, 0x7f, 0xf1, 0xbe, 0x69, 0xde, 0x34, 0x67, 0x4c, 0x85, 0x3b, 0xec, 0x98, 0xcc, 0x4d, 0x3e, 0xc0, 0x96, 0x27, 0xe6, 0x75, 0xfc, 0xdf, 0x37, 0xc0, 0x1e, 0x27, 0xe0, 0xf6, 0xc2, 0xbd, 0xbc, 0x3d, 0x9b, 0x39, 0xdc, 0xe2},
- dt2: fp.Elt{0xd8, 0x29, 0xa7, 0x39, 0xe3, 0x9f, 0x2f, 0x0e, 0x4b, 0x24, 0x21, 0x70, 0xef, 0xfd, 0x91, 0xea, 0xbf, 0xe1, 0x72, 0x90, 0xcc, 0xc9, 0x84, 0x0e, 0xad, 0xd5, 0xe6, 0xbb, 0xc5, 0x99, 0x7f, 0xa4, 0xf0, 0x2e, 0xcc, 0x95, 0x64, 0x27, 0x19, 0xd8, 0x4c, 0x27, 0x0d, 0xff, 0xb6, 0x29, 0xe2, 0x6c, 0xfa, 0xbb, 0x4d, 0x9c, 0xbb, 0xaf, 0xa5, 0xec},
- },
- { /* 41P*/
- addYX: fp.Elt{0xd6, 0x33, 0x3f, 0x9f, 0xcf, 0xfd, 0x4c, 0xd1, 0xfe, 0xe5, 0xeb, 0x64, 0x27, 0xae, 0x7a, 0xa2, 0x82, 0x50, 0x6d, 0xaa, 0xe3, 0x5d, 0xe2, 0x48, 0x60, 0xb3, 0x76, 0x04, 0xd9, 0x19, 0xa7, 0xa1, 0x73, 0x8d, 0x38, 0xa9, 0xaf, 0x45, 0xb5, 0xb2, 0x62, 0x9b, 0xf1, 0x35, 0x7b, 0x84, 0x66, 0xeb, 0x06, 0xef, 0xf1, 0xb2, 0x2d, 0x6a, 0x61, 0x15},
- subYX: fp.Elt{0x86, 0x50, 0x42, 0xf7, 0xda, 0x59, 0xb2, 0xcf, 0x0d, 0x3d, 0xee, 0x8e, 0x53, 0x5d, 0xf7, 0x9e, 0x6a, 0x26, 0x2d, 0xc7, 0x8c, 0x8e, 0x18, 0x50, 0x6d, 0xb7, 0x51, 0x4c, 0xa7, 0x52, 0x6e, 0x0e, 0x0a, 0x16, 0x74, 0xb2, 0x81, 0x8b, 0x56, 0x27, 0x22, 0x84, 0xf4, 0x56, 0xc5, 0x06, 0xe1, 0x8b, 0xca, 0x2d, 0xdb, 0x9a, 0xf6, 0x10, 0x9c, 0x51},
- dt2: fp.Elt{0x1f, 0x16, 0xa2, 0x78, 0x96, 0x1b, 0x85, 0x9c, 0x76, 0x49, 0xd4, 0x0f, 0xac, 0xb0, 0xf4, 0xd0, 0x06, 0x2c, 0x7e, 0x6d, 0x6e, 0x8e, 0xc7, 0x9f, 0x18, 0xad, 0xfc, 0x88, 0x0c, 0x0c, 0x09, 0x05, 0x05, 0xa0, 0x79, 0x72, 0x32, 0x72, 0x87, 0x0f, 0x49, 0x87, 0x0c, 0xb4, 0x12, 0xc2, 0x09, 0xf8, 0x9f, 0x30, 0x72, 0xa9, 0x47, 0x13, 0x93, 0x49},
- },
- { /* 43P*/
- addYX: fp.Elt{0xcc, 0xb1, 0x4c, 0xd3, 0xc0, 0x9e, 0x9e, 0x4d, 0x6d, 0x28, 0x0b, 0xa5, 0x94, 0xa7, 0x2e, 0xc2, 0xc7, 0xaf, 0x29, 0x73, 0xc9, 0x68, 0xea, 0x0f, 0x34, 0x37, 0x8d, 0x96, 0x8f, 0x3a, 0x3d, 0x73, 0x1e, 0x6d, 0x9f, 0xcf, 0x8d, 0x83, 0xb5, 0x71, 0xb9, 0xe1, 0x4b, 0x67, 0x71, 0xea, 0xcf, 0x56, 0xe5, 0xeb, 0x72, 0x15, 0x2f, 0x9e, 0xa8, 0xaa},
- subYX: fp.Elt{0xf4, 0x3e, 0x85, 0x1c, 0x1a, 0xef, 0x50, 0xd1, 0xb4, 0x20, 0xb2, 0x60, 0x05, 0x98, 0xfe, 0x47, 0x3b, 0xc1, 0x76, 0xca, 0x2c, 0x4e, 0x5a, 0x42, 0xa3, 0xf7, 0x20, 0xaa, 0x57, 0x39, 0xee, 0x34, 0x1f, 0xe1, 0x68, 0xd3, 0x7e, 0x06, 0xc4, 0x6c, 0xc7, 0x76, 0x2b, 0xe4, 0x1c, 0x48, 0x44, 0xe6, 0xe5, 0x44, 0x24, 0x8d, 0xb3, 0xb6, 0x88, 0x32},
- dt2: fp.Elt{0x18, 0xa7, 0xba, 0xd0, 0x44, 0x6f, 0x33, 0x31, 0x00, 0xf8, 0xf6, 0x12, 0xe3, 0xc5, 0xc7, 0xb5, 0x91, 0x9c, 0x91, 0xb5, 0x75, 0x18, 0x18, 0x8a, 0xab, 0xed, 0x24, 0x11, 0x2e, 0xce, 0x5a, 0x0f, 0x94, 0x5f, 0x2e, 0xca, 0xd3, 0x80, 0xea, 0xe5, 0x34, 0x96, 0x67, 0x8b, 0x6a, 0x26, 0x5e, 0xc8, 0x9d, 0x2c, 0x5e, 0x6c, 0xa2, 0x0c, 0xbf, 0xf0},
- },
- { /* 45P*/
- addYX: fp.Elt{0xb3, 0xbf, 0xa3, 0x85, 0xee, 0xf6, 0x58, 0x02, 0x78, 0xc4, 0x30, 0xd6, 0x57, 0x59, 0x8c, 0x88, 0x08, 0x7c, 0xbc, 0xbe, 0x0a, 0x74, 0xa9, 0xde, 0x69, 0xe7, 0x41, 0xd8, 0xbf, 0x66, 0x8d, 0x3d, 0x28, 0x00, 0x8c, 0x47, 0x65, 0x34, 0xfe, 0x86, 0x9e, 0x6a, 0xf2, 0x41, 0x6a, 0x94, 0xc4, 0x88, 0x75, 0x23, 0x0d, 0x52, 0x69, 0xee, 0x07, 0x89},
- subYX: fp.Elt{0x22, 0x3c, 0xa1, 0x70, 0x58, 0x97, 0x93, 0xbe, 0x59, 0xa8, 0x0b, 0x8a, 0x46, 0x2a, 0x38, 0x1e, 0x08, 0x6b, 0x61, 0x9f, 0xf2, 0x4a, 0x8b, 0x80, 0x68, 0x6e, 0xc8, 0x92, 0x60, 0xf3, 0xc9, 0x89, 0xb2, 0x6d, 0x63, 0xb0, 0xeb, 0x83, 0x15, 0x63, 0x0e, 0x64, 0xbb, 0xb8, 0xfe, 0xb4, 0x81, 0x90, 0x01, 0x28, 0x10, 0xb9, 0x74, 0x6e, 0xde, 0xa4},
- dt2: fp.Elt{0x1a, 0x23, 0x45, 0xa8, 0x6f, 0x4e, 0xa7, 0x4a, 0x0c, 0xeb, 0xb0, 0x43, 0xf9, 0xef, 0x99, 0x60, 0x5b, 0xdb, 0x66, 0xc0, 0x86, 0x71, 0x43, 0xb1, 0x22, 0x7b, 0x1c, 0xe7, 0x8d, 0x09, 0x1d, 0x83, 0x76, 0x9c, 0xd3, 0x5a, 0xdd, 0x42, 0xd9, 0x2f, 0x2d, 0xba, 0x7a, 0xc2, 0xd9, 0x6b, 0xd4, 0x7a, 0xf1, 0xd5, 0x5f, 0x6b, 0x85, 0xbf, 0x0b, 0xf1},
- },
- { /* 47P*/
- addYX: fp.Elt{0xb2, 0x83, 0xfa, 0x1f, 0xd2, 0xce, 0xb6, 0xf2, 0x2d, 0xea, 0x1b, 0xe5, 0x29, 0xa5, 0x72, 0xf9, 0x25, 0x48, 0x4e, 0xf2, 0x50, 0x1b, 0x39, 0xda, 0x34, 0xc5, 0x16, 0x13, 0xb4, 0x0c, 0xa1, 0x00, 0x79, 0x7a, 0xf5, 0x8b, 0xf3, 0x70, 0x14, 0xb6, 0xfc, 0x9a, 0x47, 0x68, 0x1e, 0x42, 0x70, 0x64, 0x2a, 0x84, 0x3e, 0x3d, 0x20, 0x58, 0xf9, 0x6a},
- subYX: fp.Elt{0xd9, 0xee, 0xc0, 0xc4, 0xf5, 0xc2, 0x86, 0xaf, 0x45, 0xd2, 0xd2, 0x87, 0x1b, 0x64, 0xd5, 0xe0, 0x8c, 0x44, 0x00, 0x4f, 0x43, 0x89, 0x04, 0x48, 0x4a, 0x0b, 0xca, 0x94, 0x06, 0x2f, 0x23, 0x5b, 0x6c, 0x8d, 0x44, 0x66, 0x53, 0xf5, 0x5a, 0x20, 0x72, 0x28, 0x58, 0x84, 0xcc, 0x73, 0x22, 0x5e, 0xd1, 0x0b, 0x56, 0x5e, 0x6a, 0xa3, 0x11, 0x91},
- dt2: fp.Elt{0x6e, 0x9f, 0x88, 0xa8, 0x68, 0x2f, 0x12, 0x37, 0x88, 0xfc, 0x92, 0x8f, 0x24, 0xeb, 0x5b, 0x2a, 0x2a, 0xd0, 0x14, 0x40, 0x4c, 0xa9, 0xa4, 0x03, 0x0c, 0x45, 0x48, 0x13, 0xe8, 0xa6, 0x37, 0xab, 0xc0, 0x06, 0x38, 0x6c, 0x96, 0x73, 0x40, 0x6c, 0xc6, 0xea, 0x56, 0xc6, 0xe9, 0x1a, 0x69, 0xeb, 0x7a, 0xd1, 0x33, 0x69, 0x58, 0x2b, 0xea, 0x2f},
- },
- { /* 49P*/
- addYX: fp.Elt{0x58, 0xa8, 0x05, 0x41, 0x00, 0x9d, 0xaa, 0xd9, 0x98, 0xcf, 0xb9, 0x41, 0xb5, 0x4a, 0x8d, 0xe2, 0xe7, 0xc0, 0x72, 0xef, 0xc8, 0x28, 0x6b, 0x68, 0x9d, 0xc9, 0xdf, 0x05, 0x8b, 0xd0, 0x04, 0x74, 0x79, 0x45, 0x52, 0x05, 0xa3, 0x6e, 0x35, 0x3a, 0xe3, 0xef, 0xb2, 0xdc, 0x08, 0x6f, 0x4e, 0x76, 0x85, 0x67, 0xba, 0x23, 0x8f, 0xdd, 0xaf, 0x09},
- subYX: fp.Elt{0xb4, 0x38, 0xc8, 0xff, 0x4f, 0x65, 0x2a, 0x7e, 0xad, 0xb1, 0xc6, 0xb9, 0x3d, 0xd6, 0xf7, 0x14, 0xcf, 0xf6, 0x98, 0x75, 0xbb, 0x47, 0x83, 0x90, 0xe7, 0xe1, 0xf6, 0x14, 0x99, 0x7e, 0xfa, 0xe4, 0x77, 0x24, 0xe3, 0xe7, 0xf0, 0x1e, 0xdb, 0x27, 0x4e, 0x16, 0x04, 0xf2, 0x08, 0x52, 0xfc, 0xec, 0x55, 0xdb, 0x2e, 0x67, 0xe1, 0x94, 0x32, 0x89},
- dt2: fp.Elt{0x00, 0xad, 0x03, 0x35, 0x1a, 0xb1, 0x88, 0xf0, 0xc9, 0x11, 0xe4, 0x12, 0x52, 0x61, 0xfd, 0x8a, 0x1b, 0x6a, 0x0a, 0x4c, 0x42, 0x46, 0x22, 0x0e, 0xa5, 0xf9, 0xe2, 0x50, 0xf2, 0xb2, 0x1f, 0x20, 0x78, 0x10, 0xf6, 0xbf, 0x7f, 0x0c, 0x9c, 0xad, 0x40, 0x8b, 0x82, 0xd4, 0xba, 0x69, 0x09, 0xac, 0x4b, 0x6d, 0xc4, 0x49, 0x17, 0x81, 0x57, 0x3b},
- },
- { /* 51P*/
- addYX: fp.Elt{0x0d, 0xfe, 0xb4, 0x35, 0x11, 0xbd, 0x1d, 0x6b, 0xc2, 0xc5, 0x3b, 0xd2, 0x23, 0x2c, 0x72, 0xe3, 0x48, 0xb1, 0x48, 0x73, 0xfb, 0xa3, 0x21, 0x6e, 0xc0, 0x09, 0x69, 0xac, 0xe1, 0x60, 0xbc, 0x24, 0x03, 0x99, 0x63, 0x0a, 0x00, 0xf0, 0x75, 0xf6, 0x92, 0xc5, 0xd6, 0xdb, 0x51, 0xd4, 0x7d, 0xe6, 0xf4, 0x11, 0x79, 0xd7, 0xc3, 0xaf, 0x48, 0xd0},
- subYX: fp.Elt{0xf4, 0x4f, 0xaf, 0x31, 0xe3, 0x10, 0x89, 0x95, 0xf0, 0x8a, 0xf6, 0x31, 0x9f, 0x48, 0x02, 0xba, 0x42, 0x2b, 0x3c, 0x22, 0x8b, 0xcc, 0x12, 0x98, 0x6e, 0x7a, 0x64, 0x3a, 0xc4, 0xca, 0x32, 0x2a, 0x72, 0xf8, 0x2c, 0xcf, 0x78, 0x5e, 0x7a, 0x75, 0x6e, 0x72, 0x46, 0x48, 0x62, 0x28, 0xac, 0x58, 0x1a, 0xc6, 0x59, 0x88, 0x2a, 0x44, 0x9e, 0x83},
- dt2: fp.Elt{0xb3, 0xde, 0x36, 0xfd, 0xeb, 0x1b, 0xd4, 0x24, 0x1b, 0x08, 0x8c, 0xfe, 0xa9, 0x41, 0xa1, 0x64, 0xf2, 0x6d, 0xdb, 0xf9, 0x94, 0xae, 0x86, 0x71, 0xab, 0x10, 0xbf, 0xa3, 0xb2, 0xa0, 0xdf, 0x10, 0x8c, 0x74, 0xce, 0xb3, 0xfc, 0xdb, 0xba, 0x15, 0xf6, 0x91, 0x7a, 0x9c, 0x36, 0x1e, 0x45, 0x07, 0x3c, 0xec, 0x1a, 0x61, 0x26, 0x93, 0xe3, 0x50},
- },
- { /* 53P*/
- addYX: fp.Elt{0xc5, 0x50, 0xc5, 0x83, 0xb0, 0xbd, 0xd9, 0xf6, 0x6d, 0x15, 0x5e, 0xc1, 0x1a, 0x33, 0xa0, 0xce, 0x13, 0x70, 0x3b, 0xe1, 0x31, 0xc6, 0xc4, 0x02, 0xec, 0x8c, 0xd5, 0x9c, 0x97, 0xd3, 0x12, 0xc4, 0xa2, 0xf9, 0xd5, 0xfb, 0x22, 0x69, 0x94, 0x09, 0x2f, 0x59, 0xce, 0xdb, 0xf2, 0xf2, 0x00, 0xe0, 0xa9, 0x08, 0x44, 0x2e, 0x8b, 0x6b, 0xf5, 0xb3},
- subYX: fp.Elt{0x90, 0xdd, 0xec, 0xa2, 0x65, 0xb7, 0x61, 0xbc, 0xaa, 0x70, 0xa2, 0x15, 0xd8, 0xb0, 0xf8, 0x8e, 0x23, 0x3d, 0x9f, 0x46, 0xa3, 0x29, 0x20, 0xd1, 0xa1, 0x15, 0x81, 0xc6, 0xb6, 0xde, 0xbe, 0x60, 0x63, 0x24, 0xac, 0x15, 0xfb, 0xeb, 0xd3, 0xea, 0x57, 0x13, 0x86, 0x38, 0x1e, 0x22, 0xf4, 0x8c, 0x5d, 0xaf, 0x1b, 0x27, 0x21, 0x4f, 0xa3, 0x63},
- dt2: fp.Elt{0x07, 0x15, 0x87, 0xc4, 0xfd, 0xa1, 0x97, 0x7a, 0x07, 0x1f, 0x56, 0xcc, 0xe3, 0x6a, 0x01, 0x90, 0xce, 0xf9, 0xfa, 0x50, 0xb2, 0xe0, 0x87, 0x8b, 0x6c, 0x63, 0x6c, 0xf6, 0x2a, 0x09, 0xef, 0xef, 0xd2, 0x31, 0x40, 0x25, 0xf6, 0x84, 0xcb, 0xe0, 0xc4, 0x23, 0xc1, 0xcb, 0xe2, 0x02, 0x83, 0x2d, 0xed, 0x74, 0x74, 0x8b, 0xf8, 0x7c, 0x81, 0x18},
- },
- { /* 55P*/
- addYX: fp.Elt{0x9e, 0xe5, 0x59, 0x95, 0x63, 0x2e, 0xac, 0x8b, 0x03, 0x3c, 0xc1, 0x8e, 0xe1, 0x5b, 0x56, 0x3c, 0x16, 0x41, 0xe4, 0xc2, 0x60, 0x0c, 0x6d, 0x65, 0x9f, 0xfc, 0x27, 0x68, 0x43, 0x44, 0x05, 0x12, 0x6c, 0xda, 0x04, 0xef, 0xcf, 0xcf, 0xdc, 0x0a, 0x1a, 0x7f, 0x12, 0xd3, 0xeb, 0x02, 0xb6, 0x04, 0xca, 0xd6, 0xcb, 0xf0, 0x22, 0xba, 0x35, 0x6d},
- subYX: fp.Elt{0x09, 0x6d, 0xf9, 0x64, 0x4c, 0xe6, 0x41, 0xff, 0x01, 0x4d, 0xce, 0x1e, 0xfa, 0x38, 0xa2, 0x25, 0x62, 0xff, 0x03, 0x39, 0x18, 0x91, 0xbb, 0x9d, 0xce, 0x02, 0xf0, 0xf1, 0x3c, 0x55, 0x18, 0xa9, 0xab, 0x4d, 0xd2, 0x35, 0xfd, 0x8d, 0xa9, 0xb2, 0xad, 0xb7, 0x06, 0x6e, 0xc6, 0x69, 0x49, 0xd6, 0x98, 0x98, 0x0b, 0x22, 0x81, 0x6b, 0xbd, 0xa0},
- dt2: fp.Elt{0x22, 0xf4, 0x85, 0x5d, 0x2b, 0xf1, 0x55, 0xa5, 0xd6, 0x27, 0x86, 0x57, 0x12, 0x1f, 0x16, 0x0a, 0x5a, 0x9b, 0xf2, 0x38, 0xb6, 0x28, 0xd8, 0x99, 0x0c, 0x89, 0x1d, 0x7f, 0xca, 0x21, 0x17, 0x1a, 0x0b, 0x02, 0x5f, 0x77, 0x2f, 0x73, 0x30, 0x7c, 0xc8, 0xd7, 0x2b, 0xcc, 0xe7, 0xf3, 0x21, 0xac, 0x53, 0xa7, 0x11, 0x5d, 0xd8, 0x1d, 0x9b, 0xf5},
- },
- { /* 57P*/
- addYX: fp.Elt{0x94, 0x63, 0x5d, 0xef, 0xfd, 0x6d, 0x25, 0x4e, 0x6d, 0x29, 0x03, 0xed, 0x24, 0x28, 0x27, 0x57, 0x47, 0x3e, 0x6a, 0x1a, 0xfe, 0x37, 0xee, 0x5f, 0x83, 0x29, 0x14, 0xfd, 0x78, 0x25, 0x8a, 0xe1, 0x02, 0x38, 0xd8, 0xca, 0x65, 0x55, 0x40, 0x7d, 0x48, 0x2c, 0x7c, 0x7e, 0x60, 0xb6, 0x0c, 0x6d, 0xf7, 0xe8, 0xb3, 0x62, 0x53, 0xd6, 0x9c, 0x2b},
- subYX: fp.Elt{0x47, 0x25, 0x70, 0x62, 0xf5, 0x65, 0x93, 0x62, 0x08, 0xac, 0x59, 0x66, 0xdb, 0x08, 0xd9, 0x1a, 0x19, 0xaf, 0xf4, 0xef, 0x02, 0xa2, 0x78, 0xa9, 0x55, 0x1c, 0xfa, 0x08, 0x11, 0xcb, 0xa3, 0x71, 0x74, 0xb1, 0x62, 0xe7, 0xc7, 0xf3, 0x5a, 0xb5, 0x8b, 0xd4, 0xf6, 0x10, 0x57, 0x79, 0x72, 0x2f, 0x13, 0x86, 0x7b, 0x44, 0x5f, 0x48, 0xfd, 0x88},
- dt2: fp.Elt{0x10, 0x02, 0xcd, 0x05, 0x9a, 0xc3, 0x32, 0x6d, 0x10, 0x3a, 0x74, 0xba, 0x06, 0xc4, 0x3b, 0x34, 0xbc, 0x36, 0xed, 0xa3, 0xba, 0x9a, 0xdb, 0x6d, 0xd4, 0x69, 0x99, 0x97, 0xd0, 0xe4, 0xdd, 0xf5, 0xd4, 0x7c, 0xd3, 0x4e, 0xab, 0xd1, 0x3b, 0xbb, 0xe9, 0xc7, 0x6a, 0x94, 0x25, 0x61, 0xf0, 0x06, 0xc5, 0x12, 0xa8, 0x86, 0xe5, 0x35, 0x46, 0xeb},
- },
- { /* 59P*/
- addYX: fp.Elt{0x9e, 0x95, 0x11, 0xc6, 0xc7, 0xe8, 0xee, 0x5a, 0x26, 0xa0, 0x72, 0x72, 0x59, 0x91, 0x59, 0x16, 0x49, 0x99, 0x7e, 0xbb, 0xd7, 0x15, 0xb4, 0xf2, 0x40, 0xf9, 0x5a, 0x4d, 0xc8, 0xa0, 0xe2, 0x34, 0x7b, 0x34, 0xf3, 0x99, 0xbf, 0xa9, 0xf3, 0x79, 0xc1, 0x1a, 0x0c, 0xf4, 0x86, 0x74, 0x4e, 0xcb, 0xbc, 0x90, 0xad, 0xb6, 0x51, 0x6d, 0xaa, 0x33},
- subYX: fp.Elt{0x9f, 0xd1, 0xc5, 0xa2, 0x6c, 0x24, 0x88, 0x15, 0x71, 0x68, 0xf6, 0x07, 0x45, 0x02, 0xc4, 0x73, 0x7e, 0x75, 0x87, 0xca, 0x7c, 0xf0, 0x92, 0x00, 0x75, 0xd6, 0x5a, 0xdd, 0xe0, 0x64, 0x16, 0x9d, 0x62, 0x80, 0x33, 0x9f, 0xf4, 0x8e, 0x1a, 0x15, 0x1c, 0xd3, 0x0f, 0x4d, 0x4f, 0x62, 0x2d, 0xd7, 0xa5, 0x77, 0xe3, 0xea, 0xf0, 0xfb, 0x1a, 0xdb},
- dt2: fp.Elt{0x6a, 0xa2, 0xb1, 0xaa, 0xfb, 0x5a, 0x32, 0x4e, 0xff, 0x47, 0x06, 0xd5, 0x9a, 0x4f, 0xce, 0x83, 0x5b, 0x82, 0x34, 0x3e, 0x47, 0xb8, 0xf8, 0xe9, 0x7c, 0x67, 0x69, 0x8d, 0x9c, 0xb7, 0xde, 0x57, 0xf4, 0x88, 0x41, 0x56, 0x0c, 0x87, 0x1e, 0xc9, 0x2f, 0x54, 0xbf, 0x5c, 0x68, 0x2c, 0xd9, 0xc4, 0xef, 0x53, 0x73, 0x1e, 0xa6, 0x38, 0x02, 0x10},
- },
- { /* 61P*/
- addYX: fp.Elt{0x08, 0x80, 0x4a, 0xc9, 0xb7, 0xa8, 0x88, 0xd9, 0xfc, 0x6a, 0xc0, 0x3e, 0xc2, 0x33, 0x4d, 0x2b, 0x2a, 0xa3, 0x6d, 0x72, 0x3e, 0xdc, 0x34, 0x68, 0x08, 0xbf, 0x27, 0xef, 0xf4, 0xff, 0xe2, 0x0c, 0x31, 0x0c, 0xa2, 0x0a, 0x1f, 0x65, 0xc1, 0x4c, 0x61, 0xd3, 0x1b, 0xbc, 0x25, 0xb1, 0xd0, 0xd4, 0x89, 0xb2, 0x53, 0xfb, 0x43, 0xa5, 0xaf, 0x04},
- subYX: fp.Elt{0xe3, 0xe1, 0x37, 0xad, 0x58, 0xa9, 0x55, 0x81, 0xee, 0x64, 0x21, 0xb9, 0xf5, 0x4c, 0x35, 0xea, 0x4a, 0xd3, 0x26, 0xaa, 0x90, 0xd4, 0x60, 0x46, 0x09, 0x4b, 0x4a, 0x62, 0xf9, 0xcd, 0xe1, 0xee, 0xbb, 0xc2, 0x09, 0x0b, 0xb0, 0x96, 0x8e, 0x43, 0x77, 0xaf, 0x25, 0x20, 0x5e, 0x47, 0xe4, 0x1d, 0x50, 0x69, 0x74, 0x08, 0xd7, 0xb9, 0x90, 0x13},
- dt2: fp.Elt{0x51, 0x91, 0x95, 0x64, 0x03, 0x16, 0xfd, 0x6e, 0x26, 0x94, 0x6b, 0x61, 0xe7, 0xd9, 0xe0, 0x4a, 0x6d, 0x7c, 0xfa, 0xc0, 0xe2, 0x43, 0x23, 0x53, 0x70, 0xf5, 0x6f, 0x73, 0x8b, 0x81, 0xb0, 0x0c, 0xee, 0x2e, 0x46, 0xf2, 0x8d, 0xa6, 0xfb, 0xb5, 0x1c, 0x33, 0xbf, 0x90, 0x59, 0xc9, 0x7c, 0xb8, 0x6f, 0xad, 0x75, 0x02, 0x90, 0x8e, 0x59, 0x75},
- },
- { /* 63P*/
- addYX: fp.Elt{0x36, 0x4d, 0x77, 0x04, 0xb8, 0x7d, 0x4a, 0xd1, 0xc5, 0xbb, 0x7b, 0x50, 0x5f, 0x8d, 0x9d, 0x62, 0x0f, 0x66, 0x71, 0xec, 0x87, 0xc5, 0x80, 0x82, 0xc8, 0xf4, 0x6a, 0x94, 0x92, 0x5b, 0xb0, 0x16, 0x9b, 0xb2, 0xc9, 0x6f, 0x2b, 0x2d, 0xee, 0x95, 0x73, 0x2e, 0xc2, 0x1b, 0xc5, 0x55, 0x36, 0x86, 0x24, 0xf8, 0x20, 0x05, 0x0d, 0x93, 0xd7, 0x76},
- subYX: fp.Elt{0x7f, 0x01, 0xeb, 0x2e, 0x48, 0x4d, 0x1d, 0xf1, 0x06, 0x7e, 0x7c, 0x2a, 0x43, 0xbf, 0x28, 0xac, 0xe9, 0x58, 0x13, 0xc8, 0xbf, 0x8e, 0xc0, 0xef, 0xe8, 0x4f, 0x46, 0x8a, 0xe7, 0xc0, 0xf6, 0x0f, 0x0a, 0x03, 0x48, 0x91, 0x55, 0x39, 0x2a, 0xe3, 0xdc, 0xf6, 0x22, 0x9d, 0x4d, 0x71, 0x55, 0x68, 0x25, 0x6e, 0x95, 0x52, 0xee, 0x4c, 0xd9, 0x01},
- dt2: fp.Elt{0xac, 0x33, 0x3f, 0x7c, 0x27, 0x35, 0x15, 0x91, 0x33, 0x8d, 0xf9, 0xc4, 0xf4, 0xf3, 0x90, 0x09, 0x75, 0x69, 0x62, 0x9f, 0x61, 0x35, 0x83, 0x92, 0x04, 0xef, 0x96, 0x38, 0x80, 0x9e, 0x88, 0xb3, 0x67, 0x95, 0xbe, 0x79, 0x3c, 0x35, 0xd8, 0xdc, 0xb2, 0x3e, 0x2d, 0xe6, 0x46, 0xbe, 0x81, 0xf3, 0x32, 0x0e, 0x37, 0x23, 0x75, 0x2a, 0x3d, 0xa0},
- },
-}
diff --git a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go b/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go
deleted file mode 100644
index f6ac5edb..00000000
--- a/vendor/github.com/cloudflare/circl/ecc/goldilocks/twist_basemult.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package goldilocks
-
-import (
- "crypto/subtle"
-
- mlsb "github.com/cloudflare/circl/math/mlsbset"
-)
-
-const (
- // MLSBRecoding parameters
- fxT = 448
- fxV = 2
- fxW = 3
- fx2w1 = 1 << (uint(fxW) - 1)
-)
-
-// ScalarBaseMult returns kG where G is the generator point.
-func (e twistCurve) ScalarBaseMult(k *Scalar) *twistPoint {
- m, err := mlsb.New(fxT, fxV, fxW)
- if err != nil {
- panic(err)
- }
- if m.IsExtended() {
- panic("not extended")
- }
-
- var isZero int
- if k.IsZero() {
- isZero = 1
- }
- subtle.ConstantTimeCopy(isZero, k[:], order[:])
-
- minusK := *k
- isEven := 1 - int(k[0]&0x1)
- minusK.Neg()
- subtle.ConstantTimeCopy(isEven, k[:], minusK[:])
- c, err := m.Encode(k[:])
- if err != nil {
- panic(err)
- }
-
- gP := c.Exp(groupMLSB{})
- P := gP.(*twistPoint)
- P.cneg(uint(isEven))
- return P
-}
-
-type groupMLSB struct{}
-
-func (e groupMLSB) ExtendedEltP() mlsb.EltP { return nil }
-func (e groupMLSB) Sqr(x mlsb.EltG) { x.(*twistPoint).Double() }
-func (e groupMLSB) Mul(x mlsb.EltG, y mlsb.EltP) { x.(*twistPoint).mixAddZ1(y.(*preTwistPointAffine)) }
-func (e groupMLSB) Identity() mlsb.EltG { return twistCurve{}.Identity() }
-func (e groupMLSB) NewEltP() mlsb.EltP { return &preTwistPointAffine{} }
-func (e groupMLSB) Lookup(a mlsb.EltP, v uint, s, u int32) {
- Tabj := &tabFixMult[v]
- P := a.(*preTwistPointAffine)
- for k := range Tabj {
- P.cmov(&Tabj[k], uint(subtle.ConstantTimeEq(int32(k), u)))
- }
- P.cneg(int(s >> 31))
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/conv/conv.go b/vendor/github.com/cloudflare/circl/internal/conv/conv.go
deleted file mode 100644
index 649a8e93..00000000
--- a/vendor/github.com/cloudflare/circl/internal/conv/conv.go
+++ /dev/null
@@ -1,140 +0,0 @@
-package conv
-
-import (
- "encoding/binary"
- "fmt"
- "math/big"
- "strings"
-)
-
-// BytesLe2Hex returns an hexadecimal string of a number stored in a
-// little-endian order slice x.
-func BytesLe2Hex(x []byte) string {
- b := &strings.Builder{}
- b.Grow(2*len(x) + 2)
- fmt.Fprint(b, "0x")
- if len(x) == 0 {
- fmt.Fprint(b, "00")
- }
- for i := len(x) - 1; i >= 0; i-- {
- fmt.Fprintf(b, "%02x", x[i])
- }
- return b.String()
-}
-
-// BytesLe2BigInt converts a little-endian slice x into a big-endian
-// math/big.Int.
-func BytesLe2BigInt(x []byte) *big.Int {
- n := len(x)
- b := new(big.Int)
- if len(x) > 0 {
- y := make([]byte, n)
- for i := 0; i < n; i++ {
- y[n-1-i] = x[i]
- }
- b.SetBytes(y)
- }
- return b
-}
-
-// BytesBe2Uint64Le converts a big-endian slice x to a little-endian slice of uint64.
-func BytesBe2Uint64Le(x []byte) []uint64 {
- l := len(x)
- z := make([]uint64, (l+7)/8)
- blocks := l / 8
- for i := 0; i < blocks; i++ {
- z[i] = binary.BigEndian.Uint64(x[l-8*(i+1):])
- }
- remBytes := l % 8
- for i := 0; i < remBytes; i++ {
- z[blocks] |= uint64(x[l-1-8*blocks-i]) << uint(8*i)
- }
- return z
-}
-
-// BigInt2BytesLe stores a positive big.Int number x into a little-endian slice z.
-// The slice is modified if the bitlength of x <= 8*len(z) (padding with zeros).
-// If x does not fit in the slice or is negative, z is not modified.
-func BigInt2BytesLe(z []byte, x *big.Int) {
- xLen := (x.BitLen() + 7) >> 3
- zLen := len(z)
- if zLen >= xLen && x.Sign() >= 0 {
- y := x.Bytes()
- for i := 0; i < xLen; i++ {
- z[i] = y[xLen-1-i]
- }
- for i := xLen; i < zLen; i++ {
- z[i] = 0
- }
- }
-}
-
-// Uint64Le2BigInt converts a little-endian slice x into a big number.
-func Uint64Le2BigInt(x []uint64) *big.Int {
- n := len(x)
- b := new(big.Int)
- var bi big.Int
- for i := n - 1; i >= 0; i-- {
- bi.SetUint64(x[i])
- b.Lsh(b, 64)
- b.Add(b, &bi)
- }
- return b
-}
-
-// Uint64Le2BytesLe converts a little-endian slice x to a little-endian slice of bytes.
-func Uint64Le2BytesLe(x []uint64) []byte {
- b := make([]byte, 8*len(x))
- n := len(x)
- for i := 0; i < n; i++ {
- binary.LittleEndian.PutUint64(b[i*8:], x[i])
- }
- return b
-}
-
-// Uint64Le2BytesBe converts a little-endian slice x to a big-endian slice of bytes.
-func Uint64Le2BytesBe(x []uint64) []byte {
- b := make([]byte, 8*len(x))
- n := len(x)
- for i := 0; i < n; i++ {
- binary.BigEndian.PutUint64(b[i*8:], x[n-1-i])
- }
- return b
-}
-
-// Uint64Le2Hex returns an hexadecimal string of a number stored in a
-// little-endian order slice x.
-func Uint64Le2Hex(x []uint64) string {
- b := new(strings.Builder)
- b.Grow(16*len(x) + 2)
- fmt.Fprint(b, "0x")
- if len(x) == 0 {
- fmt.Fprint(b, "00")
- }
- for i := len(x) - 1; i >= 0; i-- {
- fmt.Fprintf(b, "%016x", x[i])
- }
- return b.String()
-}
-
-// BigInt2Uint64Le stores a positive big.Int number x into a little-endian slice z.
-// The slice is modified if the bitlength of x <= 8*len(z) (padding with zeros).
-// If x does not fit in the slice or is negative, z is not modified.
-func BigInt2Uint64Le(z []uint64, x *big.Int) {
- xLen := (x.BitLen() + 63) >> 6 // number of 64-bit words
- zLen := len(z)
- if zLen >= xLen && x.Sign() > 0 {
- var y, yi big.Int
- y.Set(x)
- two64 := big.NewInt(1)
- two64.Lsh(two64, 64).Sub(two64, big.NewInt(1))
- for i := 0; i < xLen; i++ {
- yi.And(&y, two64)
- z[i] = yi.Uint64()
- y.Rsh(&y, 64)
- }
- }
- for i := xLen; i < zLen; i++ {
- z[i] = 0
- }
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/doc.go b/vendor/github.com/cloudflare/circl/internal/sha3/doc.go
deleted file mode 100644
index 7e023090..00000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/doc.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package sha3 implements the SHA-3 fixed-output-length hash functions and
-// the SHAKE variable-output-length hash functions defined by FIPS-202.
-//
-// Both types of hash function use the "sponge" construction and the Keccak
-// permutation. For a detailed specification see http://keccak.noekeon.org/
-//
-// # Guidance
-//
-// If you aren't sure what function you need, use SHAKE256 with at least 64
-// bytes of output. The SHAKE instances are faster than the SHA3 instances;
-// the latter have to allocate memory to conform to the hash.Hash interface.
-//
-// If you need a secret-key MAC (message authentication code), prepend the
-// secret key to the input, hash with SHAKE256 and read at least 32 bytes of
-// output.
-//
-// # Security strengths
-//
-// The SHA3-x (x equals 224, 256, 384, or 512) functions have a security
-// strength against preimage attacks of x bits. Since they only produce "x"
-// bits of output, their collision-resistance is only "x/2" bits.
-//
-// The SHAKE-256 and -128 functions have a generic security strength of 256 and
-// 128 bits against all attacks, provided that at least 2x bits of their output
-// is used. Requesting more than 64 or 32 bytes of output, respectively, does
-// not increase the collision-resistance of the SHAKE functions.
-//
-// # The sponge construction
-//
-// A sponge builds a pseudo-random function from a public pseudo-random
-// permutation, by applying the permutation to a state of "rate + capacity"
-// bytes, but hiding "capacity" of the bytes.
-//
-// A sponge starts out with a zero state. To hash an input using a sponge, up
-// to "rate" bytes of the input are XORed into the sponge's state. The sponge
-// is then "full" and the permutation is applied to "empty" it. This process is
-// repeated until all the input has been "absorbed". The input is then padded.
-// The digest is "squeezed" from the sponge in the same way, except that output
-// is copied out instead of input being XORed in.
-//
-// A sponge is parameterized by its generic security strength, which is equal
-// to half its capacity; capacity + rate is equal to the permutation's width.
-// Since the KeccakF-1600 permutation is 1600 bits (200 bytes) wide, this means
-// that the security strength of a sponge instance is equal to (1600 - bitrate) / 2.
-//
-// # Recommendations
-//
-// The SHAKE functions are recommended for most new uses. They can produce
-// output of arbitrary length. SHAKE256, with an output length of at least
-// 64 bytes, provides 256-bit security against all attacks. The Keccak team
-// recommends it for most applications upgrading from SHA2-512. (NIST chose a
-// much stronger, but much slower, sponge instance for SHA3-512.)
-//
-// The SHA-3 functions are "drop-in" replacements for the SHA-2 functions.
-// They produce output of the same length, with the same security strengths
-// against all attacks. This means, in particular, that SHA3-256 only has
-// 128-bit collision resistance, because its output length is 32 bytes.
-package sha3
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go b/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go
deleted file mode 100644
index 7d2365a7..00000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/hashes.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-// This file provides functions for creating instances of the SHA-3
-// and SHAKE hash functions, as well as utility functions for hashing
-// bytes.
-
-// New224 creates a new SHA3-224 hash.
-// Its generic security strength is 224 bits against preimage attacks,
-// and 112 bits against collision attacks.
-func New224() State {
- return State{rate: 144, outputLen: 28, dsbyte: 0x06}
-}
-
-// New256 creates a new SHA3-256 hash.
-// Its generic security strength is 256 bits against preimage attacks,
-// and 128 bits against collision attacks.
-func New256() State {
- return State{rate: 136, outputLen: 32, dsbyte: 0x06}
-}
-
-// New384 creates a new SHA3-384 hash.
-// Its generic security strength is 384 bits against preimage attacks,
-// and 192 bits against collision attacks.
-func New384() State {
- return State{rate: 104, outputLen: 48, dsbyte: 0x06}
-}
-
-// New512 creates a new SHA3-512 hash.
-// Its generic security strength is 512 bits against preimage attacks,
-// and 256 bits against collision attacks.
-func New512() State {
- return State{rate: 72, outputLen: 64, dsbyte: 0x06}
-}
-
-// Sum224 returns the SHA3-224 digest of the data.
-func Sum224(data []byte) (digest [28]byte) {
- h := New224()
- _, _ = h.Write(data)
- h.Sum(digest[:0])
- return
-}
-
-// Sum256 returns the SHA3-256 digest of the data.
-func Sum256(data []byte) (digest [32]byte) {
- h := New256()
- _, _ = h.Write(data)
- h.Sum(digest[:0])
- return
-}
-
-// Sum384 returns the SHA3-384 digest of the data.
-func Sum384(data []byte) (digest [48]byte) {
- h := New384()
- _, _ = h.Write(data)
- h.Sum(digest[:0])
- return
-}
-
-// Sum512 returns the SHA3-512 digest of the data.
-func Sum512(data []byte) (digest [64]byte) {
- h := New512()
- _, _ = h.Write(data)
- h.Sum(digest[:0])
- return
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go b/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go
deleted file mode 100644
index 1755fd1e..00000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/keccakf.go
+++ /dev/null
@@ -1,391 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-// KeccakF1600 applies the Keccak permutation to a 1600b-wide
-// state represented as a slice of 25 uint64s.
-// If turbo is true, applies the 12-round variant instead of the
-// regular 24-round variant.
-// nolint:funlen
-func KeccakF1600(a *[25]uint64, turbo bool) {
- // Implementation translated from Keccak-inplace.c
- // in the keccak reference code.
- var t, bc0, bc1, bc2, bc3, bc4, d0, d1, d2, d3, d4 uint64
-
- i := 0
-
- if turbo {
- i = 12
- }
-
- for ; i < 24; i += 4 {
- // Combines the 5 steps in each round into 2 steps.
- // Unrolls 4 rounds per loop and spreads some steps across rounds.
-
- // Round 1
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[6] ^ d1
- bc1 = t<<44 | t>>(64-44)
- t = a[12] ^ d2
- bc2 = t<<43 | t>>(64-43)
- t = a[18] ^ d3
- bc3 = t<<21 | t>>(64-21)
- t = a[24] ^ d4
- bc4 = t<<14 | t>>(64-14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i]
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc2 = t<<3 | t>>(64-3)
- t = a[16] ^ d1
- bc3 = t<<45 | t>>(64-45)
- t = a[22] ^ d2
- bc4 = t<<61 | t>>(64-61)
- t = a[3] ^ d3
- bc0 = t<<28 | t>>(64-28)
- t = a[9] ^ d4
- bc1 = t<<20 | t>>(64-20)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc4 = t<<18 | t>>(64-18)
- t = a[1] ^ d1
- bc0 = t<<1 | t>>(64-1)
- t = a[7] ^ d2
- bc1 = t<<6 | t>>(64-6)
- t = a[13] ^ d3
- bc2 = t<<25 | t>>(64-25)
- t = a[19] ^ d4
- bc3 = t<<8 | t>>(64-8)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc1 = t<<36 | t>>(64-36)
- t = a[11] ^ d1
- bc2 = t<<10 | t>>(64-10)
- t = a[17] ^ d2
- bc3 = t<<15 | t>>(64-15)
- t = a[23] ^ d3
- bc4 = t<<56 | t>>(64-56)
- t = a[4] ^ d4
- bc0 = t<<27 | t>>(64-27)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc3 = t<<41 | t>>(64-41)
- t = a[21] ^ d1
- bc4 = t<<2 | t>>(64-2)
- t = a[2] ^ d2
- bc0 = t<<62 | t>>(64-62)
- t = a[8] ^ d3
- bc1 = t<<55 | t>>(64-55)
- t = a[14] ^ d4
- bc2 = t<<39 | t>>(64-39)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- // Round 2
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[16] ^ d1
- bc1 = t<<44 | t>>(64-44)
- t = a[7] ^ d2
- bc2 = t<<43 | t>>(64-43)
- t = a[23] ^ d3
- bc3 = t<<21 | t>>(64-21)
- t = a[14] ^ d4
- bc4 = t<<14 | t>>(64-14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+1]
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc2 = t<<3 | t>>(64-3)
- t = a[11] ^ d1
- bc3 = t<<45 | t>>(64-45)
- t = a[2] ^ d2
- bc4 = t<<61 | t>>(64-61)
- t = a[18] ^ d3
- bc0 = t<<28 | t>>(64-28)
- t = a[9] ^ d4
- bc1 = t<<20 | t>>(64-20)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc4 = t<<18 | t>>(64-18)
- t = a[6] ^ d1
- bc0 = t<<1 | t>>(64-1)
- t = a[22] ^ d2
- bc1 = t<<6 | t>>(64-6)
- t = a[13] ^ d3
- bc2 = t<<25 | t>>(64-25)
- t = a[4] ^ d4
- bc3 = t<<8 | t>>(64-8)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc1 = t<<36 | t>>(64-36)
- t = a[1] ^ d1
- bc2 = t<<10 | t>>(64-10)
- t = a[17] ^ d2
- bc3 = t<<15 | t>>(64-15)
- t = a[8] ^ d3
- bc4 = t<<56 | t>>(64-56)
- t = a[24] ^ d4
- bc0 = t<<27 | t>>(64-27)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc3 = t<<41 | t>>(64-41)
- t = a[21] ^ d1
- bc4 = t<<2 | t>>(64-2)
- t = a[12] ^ d2
- bc0 = t<<62 | t>>(64-62)
- t = a[3] ^ d3
- bc1 = t<<55 | t>>(64-55)
- t = a[19] ^ d4
- bc2 = t<<39 | t>>(64-39)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- // Round 3
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[11] ^ d1
- bc1 = t<<44 | t>>(64-44)
- t = a[22] ^ d2
- bc2 = t<<43 | t>>(64-43)
- t = a[8] ^ d3
- bc3 = t<<21 | t>>(64-21)
- t = a[19] ^ d4
- bc4 = t<<14 | t>>(64-14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+2]
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc2 = t<<3 | t>>(64-3)
- t = a[1] ^ d1
- bc3 = t<<45 | t>>(64-45)
- t = a[12] ^ d2
- bc4 = t<<61 | t>>(64-61)
- t = a[23] ^ d3
- bc0 = t<<28 | t>>(64-28)
- t = a[9] ^ d4
- bc1 = t<<20 | t>>(64-20)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc4 = t<<18 | t>>(64-18)
- t = a[16] ^ d1
- bc0 = t<<1 | t>>(64-1)
- t = a[2] ^ d2
- bc1 = t<<6 | t>>(64-6)
- t = a[13] ^ d3
- bc2 = t<<25 | t>>(64-25)
- t = a[24] ^ d4
- bc3 = t<<8 | t>>(64-8)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc1 = t<<36 | t>>(64-36)
- t = a[6] ^ d1
- bc2 = t<<10 | t>>(64-10)
- t = a[17] ^ d2
- bc3 = t<<15 | t>>(64-15)
- t = a[3] ^ d3
- bc4 = t<<56 | t>>(64-56)
- t = a[14] ^ d4
- bc0 = t<<27 | t>>(64-27)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc3 = t<<41 | t>>(64-41)
- t = a[21] ^ d1
- bc4 = t<<2 | t>>(64-2)
- t = a[7] ^ d2
- bc0 = t<<62 | t>>(64-62)
- t = a[18] ^ d3
- bc1 = t<<55 | t>>(64-55)
- t = a[4] ^ d4
- bc2 = t<<39 | t>>(64-39)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- // Round 4
- bc0 = a[0] ^ a[5] ^ a[10] ^ a[15] ^ a[20]
- bc1 = a[1] ^ a[6] ^ a[11] ^ a[16] ^ a[21]
- bc2 = a[2] ^ a[7] ^ a[12] ^ a[17] ^ a[22]
- bc3 = a[3] ^ a[8] ^ a[13] ^ a[18] ^ a[23]
- bc4 = a[4] ^ a[9] ^ a[14] ^ a[19] ^ a[24]
- d0 = bc4 ^ (bc1<<1 | bc1>>63)
- d1 = bc0 ^ (bc2<<1 | bc2>>63)
- d2 = bc1 ^ (bc3<<1 | bc3>>63)
- d3 = bc2 ^ (bc4<<1 | bc4>>63)
- d4 = bc3 ^ (bc0<<1 | bc0>>63)
-
- bc0 = a[0] ^ d0
- t = a[1] ^ d1
- bc1 = t<<44 | t>>(64-44)
- t = a[2] ^ d2
- bc2 = t<<43 | t>>(64-43)
- t = a[3] ^ d3
- bc3 = t<<21 | t>>(64-21)
- t = a[4] ^ d4
- bc4 = t<<14 | t>>(64-14)
- a[0] = bc0 ^ (bc2 &^ bc1) ^ RC[i+3]
- a[1] = bc1 ^ (bc3 &^ bc2)
- a[2] = bc2 ^ (bc4 &^ bc3)
- a[3] = bc3 ^ (bc0 &^ bc4)
- a[4] = bc4 ^ (bc1 &^ bc0)
-
- t = a[5] ^ d0
- bc2 = t<<3 | t>>(64-3)
- t = a[6] ^ d1
- bc3 = t<<45 | t>>(64-45)
- t = a[7] ^ d2
- bc4 = t<<61 | t>>(64-61)
- t = a[8] ^ d3
- bc0 = t<<28 | t>>(64-28)
- t = a[9] ^ d4
- bc1 = t<<20 | t>>(64-20)
- a[5] = bc0 ^ (bc2 &^ bc1)
- a[6] = bc1 ^ (bc3 &^ bc2)
- a[7] = bc2 ^ (bc4 &^ bc3)
- a[8] = bc3 ^ (bc0 &^ bc4)
- a[9] = bc4 ^ (bc1 &^ bc0)
-
- t = a[10] ^ d0
- bc4 = t<<18 | t>>(64-18)
- t = a[11] ^ d1
- bc0 = t<<1 | t>>(64-1)
- t = a[12] ^ d2
- bc1 = t<<6 | t>>(64-6)
- t = a[13] ^ d3
- bc2 = t<<25 | t>>(64-25)
- t = a[14] ^ d4
- bc3 = t<<8 | t>>(64-8)
- a[10] = bc0 ^ (bc2 &^ bc1)
- a[11] = bc1 ^ (bc3 &^ bc2)
- a[12] = bc2 ^ (bc4 &^ bc3)
- a[13] = bc3 ^ (bc0 &^ bc4)
- a[14] = bc4 ^ (bc1 &^ bc0)
-
- t = a[15] ^ d0
- bc1 = t<<36 | t>>(64-36)
- t = a[16] ^ d1
- bc2 = t<<10 | t>>(64-10)
- t = a[17] ^ d2
- bc3 = t<<15 | t>>(64-15)
- t = a[18] ^ d3
- bc4 = t<<56 | t>>(64-56)
- t = a[19] ^ d4
- bc0 = t<<27 | t>>(64-27)
- a[15] = bc0 ^ (bc2 &^ bc1)
- a[16] = bc1 ^ (bc3 &^ bc2)
- a[17] = bc2 ^ (bc4 &^ bc3)
- a[18] = bc3 ^ (bc0 &^ bc4)
- a[19] = bc4 ^ (bc1 &^ bc0)
-
- t = a[20] ^ d0
- bc3 = t<<41 | t>>(64-41)
- t = a[21] ^ d1
- bc4 = t<<2 | t>>(64-2)
- t = a[22] ^ d2
- bc0 = t<<62 | t>>(64-62)
- t = a[23] ^ d3
- bc1 = t<<55 | t>>(64-55)
- t = a[24] ^ d4
- bc2 = t<<39 | t>>(64-39)
- a[20] = bc0 ^ (bc2 &^ bc1)
- a[21] = bc1 ^ (bc3 &^ bc2)
- a[22] = bc2 ^ (bc4 &^ bc3)
- a[23] = bc3 ^ (bc0 &^ bc4)
- a[24] = bc4 ^ (bc1 &^ bc0)
- }
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/rc.go b/vendor/github.com/cloudflare/circl/internal/sha3/rc.go
deleted file mode 100644
index 6a3df42f..00000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/rc.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package sha3
-
-// RC stores the round constants for use in the ι step.
-var RC = [24]uint64{
- 0x0000000000000001,
- 0x0000000000008082,
- 0x800000000000808A,
- 0x8000000080008000,
- 0x000000000000808B,
- 0x0000000080000001,
- 0x8000000080008081,
- 0x8000000000008009,
- 0x000000000000008A,
- 0x0000000000000088,
- 0x0000000080008009,
- 0x000000008000000A,
- 0x000000008000808B,
- 0x800000000000008B,
- 0x8000000000008089,
- 0x8000000000008003,
- 0x8000000000008002,
- 0x8000000000000080,
- 0x000000000000800A,
- 0x800000008000000A,
- 0x8000000080008081,
- 0x8000000000008080,
- 0x0000000080000001,
- 0x8000000080008008,
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go b/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go
deleted file mode 100644
index a0df5aa6..00000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/sha3.go
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-// spongeDirection indicates the direction bytes are flowing through the sponge.
-type spongeDirection int
-
-const (
- // spongeAbsorbing indicates that the sponge is absorbing input.
- spongeAbsorbing spongeDirection = iota
- // spongeSqueezing indicates that the sponge is being squeezed.
- spongeSqueezing
-)
-
-const (
- // maxRate is the maximum size of the internal buffer. SHAKE-256
- // currently needs the largest buffer.
- maxRate = 168
-)
-
-func (d *State) buf() []byte {
- return d.storage.asBytes()[d.bufo:d.bufe]
-}
-
-type State struct {
- // Generic sponge components.
- a [25]uint64 // main state of the hash
- rate int // the number of bytes of state to use
-
- bufo int // offset of buffer in storage
- bufe int // end of buffer in storage
-
- // dsbyte contains the "domain separation" bits and the first bit of
- // the padding. Sections 6.1 and 6.2 of [1] separate the outputs of the
- // SHA-3 and SHAKE functions by appending bitstrings to the message.
- // Using a little-endian bit-ordering convention, these are "01" for SHA-3
- // and "1111" for SHAKE, or 00000010b and 00001111b, respectively. Then the
- // padding rule from section 5.1 is applied to pad the message to a multiple
- // of the rate, which involves adding a "1" bit, zero or more "0" bits, and
- // a final "1" bit. We merge the first "1" bit from the padding into dsbyte,
- // giving 00000110b (0x06) and 00011111b (0x1f).
- // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf
- // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and
- // Extendable-Output Functions (May 2014)"
- dsbyte byte
-
- storage storageBuf
-
- // Specific to SHA-3 and SHAKE.
- outputLen int // the default output size in bytes
- state spongeDirection // whether the sponge is absorbing or squeezing
- turbo bool // Whether we're using 12 rounds instead of 24
-}
-
-// BlockSize returns the rate of sponge underlying this hash function.
-func (d *State) BlockSize() int { return d.rate }
-
-// Size returns the output size of the hash function in bytes.
-func (d *State) Size() int { return d.outputLen }
-
-// Reset clears the internal state by zeroing the sponge state and
-// the byte buffer, and setting Sponge.state to absorbing.
-func (d *State) Reset() {
- // Zero the permutation's state.
- for i := range d.a {
- d.a[i] = 0
- }
- d.state = spongeAbsorbing
- d.bufo = 0
- d.bufe = 0
-}
-
-func (d *State) clone() *State {
- ret := *d
- return &ret
-}
-
-// permute applies the KeccakF-1600 permutation. It handles
-// any input-output buffering.
-func (d *State) permute() {
- switch d.state {
- case spongeAbsorbing:
- // If we're absorbing, we need to xor the input into the state
- // before applying the permutation.
- xorIn(d, d.buf())
- d.bufe = 0
- d.bufo = 0
- KeccakF1600(&d.a, d.turbo)
- case spongeSqueezing:
- // If we're squeezing, we need to apply the permutation before
- // copying more output.
- KeccakF1600(&d.a, d.turbo)
- d.bufe = d.rate
- d.bufo = 0
- copyOut(d, d.buf())
- }
-}
-
-// pads appends the domain separation bits in dsbyte, applies
-// the multi-bitrate 10..1 padding rule, and permutes the state.
-func (d *State) padAndPermute(dsbyte byte) {
- // Pad with this instance's domain-separator bits. We know that there's
- // at least one byte of space in d.buf() because, if it were full,
- // permute would have been called to empty it. dsbyte also contains the
- // first one bit for the padding. See the comment in the state struct.
- zerosStart := d.bufe + 1
- d.bufe = d.rate
- buf := d.buf()
- buf[zerosStart-1] = dsbyte
- for i := zerosStart; i < d.rate; i++ {
- buf[i] = 0
- }
- // This adds the final one bit for the padding. Because of the way that
- // bits are numbered from the LSB upwards, the final bit is the MSB of
- // the last byte.
- buf[d.rate-1] ^= 0x80
- // Apply the permutation
- d.permute()
- d.state = spongeSqueezing
- d.bufe = d.rate
- copyOut(d, buf)
-}
-
-// Write absorbs more data into the hash's state. It produces an error
-// if more data is written to the ShakeHash after writing
-func (d *State) Write(p []byte) (written int, err error) {
- if d.state != spongeAbsorbing {
- panic("sha3: write to sponge after read")
- }
- written = len(p)
-
- for len(p) > 0 {
- bufl := d.bufe - d.bufo
- if bufl == 0 && len(p) >= d.rate {
- // The fast path; absorb a full "rate" bytes of input and apply the permutation.
- xorIn(d, p[:d.rate])
- p = p[d.rate:]
- KeccakF1600(&d.a, d.turbo)
- } else {
- // The slow path; buffer the input until we can fill the sponge, and then xor it in.
- todo := d.rate - bufl
- if todo > len(p) {
- todo = len(p)
- }
- d.bufe += todo
- buf := d.buf()
- copy(buf[bufl:], p[:todo])
- p = p[todo:]
-
- // If the sponge is full, apply the permutation.
- if d.bufe == d.rate {
- d.permute()
- }
- }
- }
-
- return written, nil
-}
-
-// Read squeezes an arbitrary number of bytes from the sponge.
-func (d *State) Read(out []byte) (n int, err error) {
- // If we're still absorbing, pad and apply the permutation.
- if d.state == spongeAbsorbing {
- d.padAndPermute(d.dsbyte)
- }
-
- n = len(out)
-
- // Now, do the squeezing.
- for len(out) > 0 {
- buf := d.buf()
- n := copy(out, buf)
- d.bufo += n
- out = out[n:]
-
- // Apply the permutation if we've squeezed the sponge dry.
- if d.bufo == d.bufe {
- d.permute()
- }
- }
-
- return
-}
-
-// Sum applies padding to the hash state and then squeezes out the desired
-// number of output bytes.
-func (d *State) Sum(in []byte) []byte {
- // Make a copy of the original hash so that caller can keep writing
- // and summing.
- dup := d.clone()
- hash := make([]byte, dup.outputLen)
- _, _ = dup.Read(hash)
- return append(in, hash...)
-}
-
-func (d *State) IsAbsorbing() bool {
- return d.state == spongeAbsorbing
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s b/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s
deleted file mode 100644
index 8a4458f6..00000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/sha3_s390x.s
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !gccgo,!appengine
-
-#include "textflag.h"
-
-// func kimd(function code, chain *[200]byte, src []byte)
-TEXT ·kimd(SB), NOFRAME|NOSPLIT, $0-40
- MOVD function+0(FP), R0
- MOVD chain+8(FP), R1
- LMG src+16(FP), R2, R3 // R2=base, R3=len
-
-continue:
- WORD $0xB93E0002 // KIMD --, R2
- BVS continue // continue if interrupted
- MOVD $0, R0 // reset R0 for pre-go1.8 compilers
- RET
-
-// func klmd(function code, chain *[200]byte, dst, src []byte)
-TEXT ·klmd(SB), NOFRAME|NOSPLIT, $0-64
- // TODO: SHAKE support
- MOVD function+0(FP), R0
- MOVD chain+8(FP), R1
- LMG dst+16(FP), R2, R3 // R2=base, R3=len
- LMG src+40(FP), R4, R5 // R4=base, R5=len
-
-continue:
- WORD $0xB93F0024 // KLMD R2, R4
- BVS continue // continue if interrupted
- MOVD $0, R0 // reset R0 for pre-go1.8 compilers
- RET
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/shake.go b/vendor/github.com/cloudflare/circl/internal/sha3/shake.go
deleted file mode 100644
index 77817f75..00000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/shake.go
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package sha3
-
-// This file defines the ShakeHash interface, and provides
-// functions for creating SHAKE and cSHAKE instances, as well as utility
-// functions for hashing bytes to arbitrary-length output.
-//
-//
-// SHAKE implementation is based on FIPS PUB 202 [1]
-// cSHAKE implementations is based on NIST SP 800-185 [2]
-//
-// [1] https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.202.pdf
-// [2] https://doi.org/10.6028/NIST.SP.800-185
-
-import (
- "io"
-)
-
-// ShakeHash defines the interface to hash functions that
-// support arbitrary-length output.
-type ShakeHash interface {
- // Write absorbs more data into the hash's state. It panics if input is
- // written to it after output has been read from it.
- io.Writer
-
- // Read reads more output from the hash; reading affects the hash's
- // state. (ShakeHash.Read is thus very different from Hash.Sum)
- // It never returns an error.
- io.Reader
-
- // Clone returns a copy of the ShakeHash in its current state.
- Clone() ShakeHash
-
- // Reset resets the ShakeHash to its initial state.
- Reset()
-}
-
-// Consts for configuring initial SHA-3 state
-const (
- dsbyteShake = 0x1f
- rate128 = 168
- rate256 = 136
-)
-
-// Clone returns copy of SHAKE context within its current state.
-func (d *State) Clone() ShakeHash {
- return d.clone()
-}
-
-// NewShake128 creates a new SHAKE128 variable-output-length ShakeHash.
-// Its generic security strength is 128 bits against all attacks if at
-// least 32 bytes of its output are used.
-func NewShake128() State {
- return State{rate: rate128, dsbyte: dsbyteShake}
-}
-
-// NewTurboShake128 creates a new TurboSHAKE128 variable-output-length ShakeHash.
-// Its generic security strength is 128 bits against all attacks if at
-// least 32 bytes of its output are used.
-// D is the domain separation byte and must be between 0x01 and 0x7f inclusive.
-func NewTurboShake128(D byte) State {
- if D == 0 || D > 0x7f {
- panic("turboshake: D out of range")
- }
- return State{rate: rate128, dsbyte: D, turbo: true}
-}
-
-// NewShake256 creates a new SHAKE256 variable-output-length ShakeHash.
-// Its generic security strength is 256 bits against all attacks if
-// at least 64 bytes of its output are used.
-func NewShake256() State {
- return State{rate: rate256, dsbyte: dsbyteShake}
-}
-
-// NewTurboShake256 creates a new TurboSHAKE256 variable-output-length ShakeHash.
-// Its generic security strength is 256 bits against all attacks if
-// at least 64 bytes of its output are used.
-// D is the domain separation byte and must be between 0x01 and 0x7f inclusive.
-func NewTurboShake256(D byte) State {
- if D == 0 || D > 0x7f {
- panic("turboshake: D out of range")
- }
- return State{rate: rate256, dsbyte: D, turbo: true}
-}
-
-// ShakeSum128 writes an arbitrary-length digest of data into hash.
-func ShakeSum128(hash, data []byte) {
- h := NewShake128()
- _, _ = h.Write(data)
- _, _ = h.Read(hash)
-}
-
-// ShakeSum256 writes an arbitrary-length digest of data into hash.
-func ShakeSum256(hash, data []byte) {
- h := NewShake256()
- _, _ = h.Write(data)
- _, _ = h.Read(hash)
-}
-
-// TurboShakeSum128 writes an arbitrary-length digest of data into hash.
-func TurboShakeSum128(hash, data []byte, D byte) {
- h := NewTurboShake128(D)
- _, _ = h.Write(data)
- _, _ = h.Read(hash)
-}
-
-// TurboShakeSum256 writes an arbitrary-length digest of data into hash.
-func TurboShakeSum256(hash, data []byte, D byte) {
- h := NewTurboShake256(D)
- _, _ = h.Write(data)
- _, _ = h.Read(hash)
-}
-
-func (d *State) SwitchDS(D byte) {
- d.dsbyte = D
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/xor.go b/vendor/github.com/cloudflare/circl/internal/sha3/xor.go
deleted file mode 100644
index 1e213374..00000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/xor.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (!amd64 && !386 && !ppc64le) || appengine
-// +build !amd64,!386,!ppc64le appengine
-
-package sha3
-
-// A storageBuf is an aligned array of maxRate bytes.
-type storageBuf [maxRate]byte
-
-func (b *storageBuf) asBytes() *[maxRate]byte {
- return (*[maxRate]byte)(b)
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go b/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go
deleted file mode 100644
index 2b0c6617..00000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/xor_generic.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (!amd64 || appengine) && (!386 || appengine) && (!ppc64le || appengine)
-// +build !amd64 appengine
-// +build !386 appengine
-// +build !ppc64le appengine
-
-package sha3
-
-import "encoding/binary"
-
-// xorIn xors the bytes in buf into the state; it
-// makes no non-portable assumptions about memory layout
-// or alignment.
-func xorIn(d *State, buf []byte) {
- n := len(buf) / 8
-
- for i := 0; i < n; i++ {
- a := binary.LittleEndian.Uint64(buf)
- d.a[i] ^= a
- buf = buf[8:]
- }
-}
-
-// copyOut copies ulint64s to a byte buffer.
-func copyOut(d *State, b []byte) {
- for i := 0; len(b) >= 8; i++ {
- binary.LittleEndian.PutUint64(b, d.a[i])
- b = b[8:]
- }
-}
diff --git a/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go b/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go
deleted file mode 100644
index 052fc8d3..00000000
--- a/vendor/github.com/cloudflare/circl/internal/sha3/xor_unaligned.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build (amd64 || 386 || ppc64le) && !appengine
-// +build amd64 386 ppc64le
-// +build !appengine
-
-package sha3
-
-import "unsafe"
-
-// A storageBuf is an aligned array of maxRate bytes.
-type storageBuf [maxRate / 8]uint64
-
-func (b *storageBuf) asBytes() *[maxRate]byte {
- return (*[maxRate]byte)(unsafe.Pointer(b))
-}
-
-// xorInuses unaligned reads and writes to update d.a to contain d.a
-// XOR buf.
-func xorIn(d *State, buf []byte) {
- n := len(buf)
- bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8]
- if n >= 72 {
- d.a[0] ^= bw[0]
- d.a[1] ^= bw[1]
- d.a[2] ^= bw[2]
- d.a[3] ^= bw[3]
- d.a[4] ^= bw[4]
- d.a[5] ^= bw[5]
- d.a[6] ^= bw[6]
- d.a[7] ^= bw[7]
- d.a[8] ^= bw[8]
- }
- if n >= 104 {
- d.a[9] ^= bw[9]
- d.a[10] ^= bw[10]
- d.a[11] ^= bw[11]
- d.a[12] ^= bw[12]
- }
- if n >= 136 {
- d.a[13] ^= bw[13]
- d.a[14] ^= bw[14]
- d.a[15] ^= bw[15]
- d.a[16] ^= bw[16]
- }
- if n >= 144 {
- d.a[17] ^= bw[17]
- }
- if n >= 168 {
- d.a[18] ^= bw[18]
- d.a[19] ^= bw[19]
- d.a[20] ^= bw[20]
- }
-}
-
-func copyOut(d *State, buf []byte) {
- ab := (*[maxRate]uint8)(unsafe.Pointer(&d.a[0]))
- copy(buf, ab[:])
-}
diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp.go b/vendor/github.com/cloudflare/circl/math/fp25519/fp.go
deleted file mode 100644
index 57a50ff5..00000000
--- a/vendor/github.com/cloudflare/circl/math/fp25519/fp.go
+++ /dev/null
@@ -1,205 +0,0 @@
-// Package fp25519 provides prime field arithmetic over GF(2^255-19).
-package fp25519
-
-import (
- "errors"
-
- "github.com/cloudflare/circl/internal/conv"
-)
-
-// Size in bytes of an element.
-const Size = 32
-
-// Elt is a prime field element.
-type Elt [Size]byte
-
-func (e Elt) String() string { return conv.BytesLe2Hex(e[:]) }
-
-// p is the prime modulus 2^255-19.
-var p = Elt{
- 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f,
-}
-
-// P returns the prime modulus 2^255-19.
-func P() Elt { return p }
-
-// ToBytes stores in b the little-endian byte representation of x.
-func ToBytes(b []byte, x *Elt) error {
- if len(b) != Size {
- return errors.New("wrong size")
- }
- Modp(x)
- copy(b, x[:])
- return nil
-}
-
-// IsZero returns true if x is equal to 0.
-func IsZero(x *Elt) bool { Modp(x); return *x == Elt{} }
-
-// SetOne assigns x=1.
-func SetOne(x *Elt) { *x = Elt{}; x[0] = 1 }
-
-// Neg calculates z = -x.
-func Neg(z, x *Elt) { Sub(z, &p, x) }
-
-// InvSqrt calculates z = sqrt(x/y) iff x/y is a quadratic-residue, which is
-// indicated by returning isQR = true. Otherwise, when x/y is a quadratic
-// non-residue, z will have an undetermined value and isQR = false.
-func InvSqrt(z, x, y *Elt) (isQR bool) {
- sqrtMinusOne := &Elt{
- 0xb0, 0xa0, 0x0e, 0x4a, 0x27, 0x1b, 0xee, 0xc4,
- 0x78, 0xe4, 0x2f, 0xad, 0x06, 0x18, 0x43, 0x2f,
- 0xa7, 0xd7, 0xfb, 0x3d, 0x99, 0x00, 0x4d, 0x2b,
- 0x0b, 0xdf, 0xc1, 0x4f, 0x80, 0x24, 0x83, 0x2b,
- }
- t0, t1, t2, t3 := &Elt{}, &Elt{}, &Elt{}, &Elt{}
-
- Mul(t0, x, y) // t0 = u*v
- Sqr(t1, y) // t1 = v^2
- Mul(t2, t0, t1) // t2 = u*v^3
- Sqr(t0, t1) // t0 = v^4
- Mul(t1, t0, t2) // t1 = u*v^7
-
- var Tab [4]*Elt
- Tab[0] = &Elt{}
- Tab[1] = &Elt{}
- Tab[2] = t3
- Tab[3] = t1
-
- *Tab[0] = *t1
- Sqr(Tab[0], Tab[0])
- Sqr(Tab[1], Tab[0])
- Sqr(Tab[1], Tab[1])
- Mul(Tab[1], Tab[1], Tab[3])
- Mul(Tab[0], Tab[0], Tab[1])
- Sqr(Tab[0], Tab[0])
- Mul(Tab[0], Tab[0], Tab[1])
- Sqr(Tab[1], Tab[0])
- for i := 0; i < 4; i++ {
- Sqr(Tab[1], Tab[1])
- }
- Mul(Tab[1], Tab[1], Tab[0])
- Sqr(Tab[2], Tab[1])
- for i := 0; i < 4; i++ {
- Sqr(Tab[2], Tab[2])
- }
- Mul(Tab[2], Tab[2], Tab[0])
- Sqr(Tab[1], Tab[2])
- for i := 0; i < 14; i++ {
- Sqr(Tab[1], Tab[1])
- }
- Mul(Tab[1], Tab[1], Tab[2])
- Sqr(Tab[2], Tab[1])
- for i := 0; i < 29; i++ {
- Sqr(Tab[2], Tab[2])
- }
- Mul(Tab[2], Tab[2], Tab[1])
- Sqr(Tab[1], Tab[2])
- for i := 0; i < 59; i++ {
- Sqr(Tab[1], Tab[1])
- }
- Mul(Tab[1], Tab[1], Tab[2])
- for i := 0; i < 5; i++ {
- Sqr(Tab[1], Tab[1])
- }
- Mul(Tab[1], Tab[1], Tab[0])
- Sqr(Tab[2], Tab[1])
- for i := 0; i < 124; i++ {
- Sqr(Tab[2], Tab[2])
- }
- Mul(Tab[2], Tab[2], Tab[1])
- Sqr(Tab[2], Tab[2])
- Sqr(Tab[2], Tab[2])
- Mul(Tab[2], Tab[2], Tab[3])
-
- Mul(z, t3, t2) // z = xy^(p+3)/8 = xy^3*(xy^7)^(p-5)/8
- // Checking whether y z^2 == x
- Sqr(t0, z) // t0 = z^2
- Mul(t0, t0, y) // t0 = yz^2
- Sub(t1, t0, x) // t1 = t0-u
- Add(t2, t0, x) // t2 = t0+u
- if IsZero(t1) {
- return true
- } else if IsZero(t2) {
- Mul(z, z, sqrtMinusOne) // z = z*sqrt(-1)
- return true
- } else {
- return false
- }
-}
-
-// Inv calculates z = 1/x mod p.
-func Inv(z, x *Elt) {
- x0, x1, x2 := &Elt{}, &Elt{}, &Elt{}
- Sqr(x1, x)
- Sqr(x0, x1)
- Sqr(x0, x0)
- Mul(x0, x0, x)
- Mul(z, x0, x1)
- Sqr(x1, z)
- Mul(x0, x0, x1)
- Sqr(x1, x0)
- for i := 0; i < 4; i++ {
- Sqr(x1, x1)
- }
- Mul(x0, x0, x1)
- Sqr(x1, x0)
- for i := 0; i < 9; i++ {
- Sqr(x1, x1)
- }
- Mul(x1, x1, x0)
- Sqr(x2, x1)
- for i := 0; i < 19; i++ {
- Sqr(x2, x2)
- }
- Mul(x2, x2, x1)
- for i := 0; i < 10; i++ {
- Sqr(x2, x2)
- }
- Mul(x2, x2, x0)
- Sqr(x0, x2)
- for i := 0; i < 49; i++ {
- Sqr(x0, x0)
- }
- Mul(x0, x0, x2)
- Sqr(x1, x0)
- for i := 0; i < 99; i++ {
- Sqr(x1, x1)
- }
- Mul(x1, x1, x0)
- for i := 0; i < 50; i++ {
- Sqr(x1, x1)
- }
- Mul(x1, x1, x2)
- for i := 0; i < 5; i++ {
- Sqr(x1, x1)
- }
- Mul(z, z, x1)
-}
-
-// Cmov assigns y to x if n is 1.
-func Cmov(x, y *Elt, n uint) { cmov(x, y, n) }
-
-// Cswap interchanges x and y if n is 1.
-func Cswap(x, y *Elt, n uint) { cswap(x, y, n) }
-
-// Add calculates z = x+y mod p.
-func Add(z, x, y *Elt) { add(z, x, y) }
-
-// Sub calculates z = x-y mod p.
-func Sub(z, x, y *Elt) { sub(z, x, y) }
-
-// AddSub calculates (x,y) = (x+y mod p, x-y mod p).
-func AddSub(x, y *Elt) { addsub(x, y) }
-
-// Mul calculates z = x*y mod p.
-func Mul(z, x, y *Elt) { mul(z, x, y) }
-
-// Sqr calculates z = x^2 mod p.
-func Sqr(z, x *Elt) { sqr(z, x) }
-
-// Modp ensures that z is between [0,p-1].
-func Modp(z *Elt) { modp(z) }
diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go
deleted file mode 100644
index 057f0d28..00000000
--- a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.go
+++ /dev/null
@@ -1,45 +0,0 @@
-//go:build amd64 && !purego
-// +build amd64,!purego
-
-package fp25519
-
-import (
- "golang.org/x/sys/cpu"
-)
-
-var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX
-
-var _ = hasBmi2Adx
-
-func cmov(x, y *Elt, n uint) { cmovAmd64(x, y, n) }
-func cswap(x, y *Elt, n uint) { cswapAmd64(x, y, n) }
-func add(z, x, y *Elt) { addAmd64(z, x, y) }
-func sub(z, x, y *Elt) { subAmd64(z, x, y) }
-func addsub(x, y *Elt) { addsubAmd64(x, y) }
-func mul(z, x, y *Elt) { mulAmd64(z, x, y) }
-func sqr(z, x *Elt) { sqrAmd64(z, x) }
-func modp(z *Elt) { modpAmd64(z) }
-
-//go:noescape
-func cmovAmd64(x, y *Elt, n uint)
-
-//go:noescape
-func cswapAmd64(x, y *Elt, n uint)
-
-//go:noescape
-func addAmd64(z, x, y *Elt)
-
-//go:noescape
-func subAmd64(z, x, y *Elt)
-
-//go:noescape
-func addsubAmd64(x, y *Elt)
-
-//go:noescape
-func mulAmd64(z, x, y *Elt)
-
-//go:noescape
-func sqrAmd64(z, x *Elt)
-
-//go:noescape
-func modpAmd64(z *Elt)
diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h b/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h
deleted file mode 100644
index b884b584..00000000
--- a/vendor/github.com/cloudflare/circl/math/fp25519/fp_amd64.h
+++ /dev/null
@@ -1,351 +0,0 @@
-// This code was imported from https://github.com/armfazh/rfc7748_precomputed
-
-// CHECK_BMI2ADX triggers bmi2adx if supported,
-// otherwise it fallbacks to legacy code.
-#define CHECK_BMI2ADX(label, legacy, bmi2adx) \
- CMPB ·hasBmi2Adx(SB), $0 \
- JE label \
- bmi2adx \
- RET \
- label: \
- legacy \
- RET
-
-// cselect is a conditional move
-// if b=1: it copies y into x;
-// if b=0: x remains with the same value;
-// if b<> 0,1: undefined.
-// Uses: AX, DX, FLAGS
-// Instr: x86_64, cmov
-#define cselect(x,y,b) \
- TESTQ b, b \
- MOVQ 0+x, AX; MOVQ 0+y, DX; CMOVQNE DX, AX; MOVQ AX, 0+x; \
- MOVQ 8+x, AX; MOVQ 8+y, DX; CMOVQNE DX, AX; MOVQ AX, 8+x; \
- MOVQ 16+x, AX; MOVQ 16+y, DX; CMOVQNE DX, AX; MOVQ AX, 16+x; \
- MOVQ 24+x, AX; MOVQ 24+y, DX; CMOVQNE DX, AX; MOVQ AX, 24+x;
-
-// cswap is a conditional swap
-// if b=1: x,y <- y,x;
-// if b=0: x,y remain with the same values;
-// if b<> 0,1: undefined.
-// Uses: AX, DX, R8, FLAGS
-// Instr: x86_64, cmov
-#define cswap(x,y,b) \
- TESTQ b, b \
- MOVQ 0+x, AX; MOVQ AX, R8; MOVQ 0+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 0+x; MOVQ DX, 0+y; \
- MOVQ 8+x, AX; MOVQ AX, R8; MOVQ 8+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 8+x; MOVQ DX, 8+y; \
- MOVQ 16+x, AX; MOVQ AX, R8; MOVQ 16+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 16+x; MOVQ DX, 16+y; \
- MOVQ 24+x, AX; MOVQ AX, R8; MOVQ 24+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 24+x; MOVQ DX, 24+y;
-
-// additionLeg adds x and y and stores in z
-// Uses: AX, DX, R8-R11, FLAGS
-// Instr: x86_64, cmov
-#define additionLeg(z,x,y) \
- MOVL $38, AX; \
- MOVL $0, DX; \
- MOVQ 0+x, R8; ADDQ 0+y, R8; \
- MOVQ 8+x, R9; ADCQ 8+y, R9; \
- MOVQ 16+x, R10; ADCQ 16+y, R10; \
- MOVQ 24+x, R11; ADCQ 24+y, R11; \
- CMOVQCS AX, DX; \
- ADDQ DX, R8; \
- ADCQ $0, R9; MOVQ R9, 8+z; \
- ADCQ $0, R10; MOVQ R10, 16+z; \
- ADCQ $0, R11; MOVQ R11, 24+z; \
- MOVL $0, DX; \
- CMOVQCS AX, DX; \
- ADDQ DX, R8; MOVQ R8, 0+z;
-
-// additionAdx adds x and y and stores in z
-// Uses: AX, DX, R8-R11, FLAGS
-// Instr: x86_64, cmov, adx
-#define additionAdx(z,x,y) \
- MOVL $38, AX; \
- XORL DX, DX; \
- MOVQ 0+x, R8; ADCXQ 0+y, R8; \
- MOVQ 8+x, R9; ADCXQ 8+y, R9; \
- MOVQ 16+x, R10; ADCXQ 16+y, R10; \
- MOVQ 24+x, R11; ADCXQ 24+y, R11; \
- CMOVQCS AX, DX ; \
- XORL AX, AX; \
- ADCXQ DX, R8; \
- ADCXQ AX, R9; MOVQ R9, 8+z; \
- ADCXQ AX, R10; MOVQ R10, 16+z; \
- ADCXQ AX, R11; MOVQ R11, 24+z; \
- MOVL $38, DX; \
- CMOVQCS DX, AX; \
- ADDQ AX, R8; MOVQ R8, 0+z;
-
-// subtraction subtracts y from x and stores in z
-// Uses: AX, DX, R8-R11, FLAGS
-// Instr: x86_64, cmov
-#define subtraction(z,x,y) \
- MOVL $38, AX; \
- MOVQ 0+x, R8; SUBQ 0+y, R8; \
- MOVQ 8+x, R9; SBBQ 8+y, R9; \
- MOVQ 16+x, R10; SBBQ 16+y, R10; \
- MOVQ 24+x, R11; SBBQ 24+y, R11; \
- MOVL $0, DX; \
- CMOVQCS AX, DX; \
- SUBQ DX, R8; \
- SBBQ $0, R9; MOVQ R9, 8+z; \
- SBBQ $0, R10; MOVQ R10, 16+z; \
- SBBQ $0, R11; MOVQ R11, 24+z; \
- MOVL $0, DX; \
- CMOVQCS AX, DX; \
- SUBQ DX, R8; MOVQ R8, 0+z;
-
-// integerMulAdx multiplies x and y and stores in z
-// Uses: AX, DX, R8-R15, FLAGS
-// Instr: x86_64, bmi2, adx
-#define integerMulAdx(z,x,y) \
- MOVL $0,R15; \
- MOVQ 0+y, DX; XORL AX, AX; \
- MULXQ 0+x, AX, R8; MOVQ AX, 0+z; \
- MULXQ 8+x, AX, R9; ADCXQ AX, R8; \
- MULXQ 16+x, AX, R10; ADCXQ AX, R9; \
- MULXQ 24+x, AX, R11; ADCXQ AX, R10; \
- MOVL $0, AX;;;;;;;;; ADCXQ AX, R11; \
- MOVQ 8+y, DX; XORL AX, AX; \
- MULXQ 0+x, AX, R12; ADCXQ R8, AX; MOVQ AX, 8+z; \
- MULXQ 8+x, AX, R13; ADCXQ R9, R12; ADOXQ AX, R12; \
- MULXQ 16+x, AX, R14; ADCXQ R10, R13; ADOXQ AX, R13; \
- MULXQ 24+x, AX, R15; ADCXQ R11, R14; ADOXQ AX, R14; \
- MOVL $0, AX;;;;;;;;; ADCXQ AX, R15; ADOXQ AX, R15; \
- MOVQ 16+y, DX; XORL AX, AX; \
- MULXQ 0+x, AX, R8; ADCXQ R12, AX; MOVQ AX, 16+z; \
- MULXQ 8+x, AX, R9; ADCXQ R13, R8; ADOXQ AX, R8; \
- MULXQ 16+x, AX, R10; ADCXQ R14, R9; ADOXQ AX, R9; \
- MULXQ 24+x, AX, R11; ADCXQ R15, R10; ADOXQ AX, R10; \
- MOVL $0, AX;;;;;;;;; ADCXQ AX, R11; ADOXQ AX, R11; \
- MOVQ 24+y, DX; XORL AX, AX; \
- MULXQ 0+x, AX, R12; ADCXQ R8, AX; MOVQ AX, 24+z; \
- MULXQ 8+x, AX, R13; ADCXQ R9, R12; ADOXQ AX, R12; MOVQ R12, 32+z; \
- MULXQ 16+x, AX, R14; ADCXQ R10, R13; ADOXQ AX, R13; MOVQ R13, 40+z; \
- MULXQ 24+x, AX, R15; ADCXQ R11, R14; ADOXQ AX, R14; MOVQ R14, 48+z; \
- MOVL $0, AX;;;;;;;;; ADCXQ AX, R15; ADOXQ AX, R15; MOVQ R15, 56+z;
-
-// integerMulLeg multiplies x and y and stores in z
-// Uses: AX, DX, R8-R15, FLAGS
-// Instr: x86_64
-#define integerMulLeg(z,x,y) \
- MOVQ 0+y, R8; \
- MOVQ 0+x, AX; MULQ R8; MOVQ AX, 0+z; MOVQ DX, R15; \
- MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \
- MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \
- MOVQ 24+x, AX; MULQ R8; \
- ADDQ R13, R15; \
- ADCQ R14, R10; MOVQ R10, 16+z; \
- ADCQ AX, R11; MOVQ R11, 24+z; \
- ADCQ $0, DX; MOVQ DX, 32+z; \
- MOVQ 8+y, R8; \
- MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \
- MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \
- MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \
- MOVQ 24+x, AX; MULQ R8; \
- ADDQ R12, R15; MOVQ R15, 8+z; \
- ADCQ R13, R9; \
- ADCQ R14, R10; \
- ADCQ AX, R11; \
- ADCQ $0, DX; \
- ADCQ 16+z, R9; MOVQ R9, R15; \
- ADCQ 24+z, R10; MOVQ R10, 24+z; \
- ADCQ 32+z, R11; MOVQ R11, 32+z; \
- ADCQ $0, DX; MOVQ DX, 40+z; \
- MOVQ 16+y, R8; \
- MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \
- MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \
- MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \
- MOVQ 24+x, AX; MULQ R8; \
- ADDQ R12, R15; MOVQ R15, 16+z; \
- ADCQ R13, R9; \
- ADCQ R14, R10; \
- ADCQ AX, R11; \
- ADCQ $0, DX; \
- ADCQ 24+z, R9; MOVQ R9, R15; \
- ADCQ 32+z, R10; MOVQ R10, 32+z; \
- ADCQ 40+z, R11; MOVQ R11, 40+z; \
- ADCQ $0, DX; MOVQ DX, 48+z; \
- MOVQ 24+y, R8; \
- MOVQ 0+x, AX; MULQ R8; MOVQ AX, R12; MOVQ DX, R9; \
- MOVQ 8+x, AX; MULQ R8; MOVQ AX, R13; MOVQ DX, R10; \
- MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; \
- MOVQ 24+x, AX; MULQ R8; \
- ADDQ R12, R15; MOVQ R15, 24+z; \
- ADCQ R13, R9; \
- ADCQ R14, R10; \
- ADCQ AX, R11; \
- ADCQ $0, DX; \
- ADCQ 32+z, R9; MOVQ R9, 32+z; \
- ADCQ 40+z, R10; MOVQ R10, 40+z; \
- ADCQ 48+z, R11; MOVQ R11, 48+z; \
- ADCQ $0, DX; MOVQ DX, 56+z;
-
-// integerSqrLeg squares x and stores in z
-// Uses: AX, CX, DX, R8-R15, FLAGS
-// Instr: x86_64
-#define integerSqrLeg(z,x) \
- MOVQ 0+x, R8; \
- MOVQ 8+x, AX; MULQ R8; MOVQ AX, R9; MOVQ DX, R10; /* A[0]*A[1] */ \
- MOVQ 16+x, AX; MULQ R8; MOVQ AX, R14; MOVQ DX, R11; /* A[0]*A[2] */ \
- MOVQ 24+x, AX; MULQ R8; MOVQ AX, R15; MOVQ DX, R12; /* A[0]*A[3] */ \
- MOVQ 24+x, R8; \
- MOVQ 8+x, AX; MULQ R8; MOVQ AX, CX; MOVQ DX, R13; /* A[3]*A[1] */ \
- MOVQ 16+x, AX; MULQ R8; /* A[3]*A[2] */ \
- \
- ADDQ R14, R10;\
- ADCQ R15, R11; MOVL $0, R15;\
- ADCQ CX, R12;\
- ADCQ AX, R13;\
- ADCQ $0, DX; MOVQ DX, R14;\
- MOVQ 8+x, AX; MULQ 16+x;\
- \
- ADDQ AX, R11;\
- ADCQ DX, R12;\
- ADCQ $0, R13;\
- ADCQ $0, R14;\
- ADCQ $0, R15;\
- \
- SHLQ $1, R14, R15; MOVQ R15, 56+z;\
- SHLQ $1, R13, R14; MOVQ R14, 48+z;\
- SHLQ $1, R12, R13; MOVQ R13, 40+z;\
- SHLQ $1, R11, R12; MOVQ R12, 32+z;\
- SHLQ $1, R10, R11; MOVQ R11, 24+z;\
- SHLQ $1, R9, R10; MOVQ R10, 16+z;\
- SHLQ $1, R9; MOVQ R9, 8+z;\
- \
- MOVQ 0+x,AX; MULQ AX; MOVQ AX, 0+z; MOVQ DX, R9;\
- MOVQ 8+x,AX; MULQ AX; MOVQ AX, R10; MOVQ DX, R11;\
- MOVQ 16+x,AX; MULQ AX; MOVQ AX, R12; MOVQ DX, R13;\
- MOVQ 24+x,AX; MULQ AX; MOVQ AX, R14; MOVQ DX, R15;\
- \
- ADDQ 8+z, R9; MOVQ R9, 8+z;\
- ADCQ 16+z, R10; MOVQ R10, 16+z;\
- ADCQ 24+z, R11; MOVQ R11, 24+z;\
- ADCQ 32+z, R12; MOVQ R12, 32+z;\
- ADCQ 40+z, R13; MOVQ R13, 40+z;\
- ADCQ 48+z, R14; MOVQ R14, 48+z;\
- ADCQ 56+z, R15; MOVQ R15, 56+z;
-
-// integerSqrAdx squares x and stores in z
-// Uses: AX, CX, DX, R8-R15, FLAGS
-// Instr: x86_64, bmi2, adx
-#define integerSqrAdx(z,x) \
- MOVQ 0+x, DX; /* A[0] */ \
- MULXQ 8+x, R8, R14; /* A[1]*A[0] */ XORL R15, R15; \
- MULXQ 16+x, R9, R10; /* A[2]*A[0] */ ADCXQ R14, R9; \
- MULXQ 24+x, AX, CX; /* A[3]*A[0] */ ADCXQ AX, R10; \
- MOVQ 24+x, DX; /* A[3] */ \
- MULXQ 8+x, R11, R12; /* A[1]*A[3] */ ADCXQ CX, R11; \
- MULXQ 16+x, AX, R13; /* A[2]*A[3] */ ADCXQ AX, R12; \
- MOVQ 8+x, DX; /* A[1] */ ADCXQ R15, R13; \
- MULXQ 16+x, AX, CX; /* A[2]*A[1] */ MOVL $0, R14; \
- ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADCXQ R15, R14; \
- XORL R15, R15; \
- ADOXQ AX, R10; ADCXQ R8, R8; \
- ADOXQ CX, R11; ADCXQ R9, R9; \
- ADOXQ R15, R12; ADCXQ R10, R10; \
- ADOXQ R15, R13; ADCXQ R11, R11; \
- ADOXQ R15, R14; ADCXQ R12, R12; \
- ;;;;;;;;;;;;;;; ADCXQ R13, R13; \
- ;;;;;;;;;;;;;;; ADCXQ R14, R14; \
- MOVQ 0+x, DX; MULXQ DX, AX, CX; /* A[0]^2 */ \
- ;;;;;;;;;;;;;;; MOVQ AX, 0+z; \
- ADDQ CX, R8; MOVQ R8, 8+z; \
- MOVQ 8+x, DX; MULXQ DX, AX, CX; /* A[1]^2 */ \
- ADCQ AX, R9; MOVQ R9, 16+z; \
- ADCQ CX, R10; MOVQ R10, 24+z; \
- MOVQ 16+x, DX; MULXQ DX, AX, CX; /* A[2]^2 */ \
- ADCQ AX, R11; MOVQ R11, 32+z; \
- ADCQ CX, R12; MOVQ R12, 40+z; \
- MOVQ 24+x, DX; MULXQ DX, AX, CX; /* A[3]^2 */ \
- ADCQ AX, R13; MOVQ R13, 48+z; \
- ADCQ CX, R14; MOVQ R14, 56+z;
-
-// reduceFromDouble finds z congruent to x modulo p such that 0> 63)
- // PUT BIT 255 IN CARRY FLAG AND CLEAR
- x3 &^= 1 << 63
-
- x0, c0 := bits.Add64(x0, cx, 0)
- x1, c1 := bits.Add64(x1, 0, c0)
- x2, c2 := bits.Add64(x2, 0, c1)
- x3, _ = bits.Add64(x3, 0, c2)
-
- // TEST FOR BIT 255 AGAIN; ONLY TRIGGERED ON OVERFLOW MODULO 2^255-19
- // cx = C[255] ? 0 : 19
- cx = uint64(19) &^ (-(x3 >> 63))
- // CLEAR BIT 255
- x3 &^= 1 << 63
-
- x0, c0 = bits.Sub64(x0, cx, 0)
- x1, c1 = bits.Sub64(x1, 0, c0)
- x2, c2 = bits.Sub64(x2, 0, c1)
- x3, _ = bits.Sub64(x3, 0, c2)
-
- binary.LittleEndian.PutUint64(x[0*8:1*8], x0)
- binary.LittleEndian.PutUint64(x[1*8:2*8], x1)
- binary.LittleEndian.PutUint64(x[2*8:3*8], x2)
- binary.LittleEndian.PutUint64(x[3*8:4*8], x3)
-}
-
-func red64(z *Elt, x0, x1, x2, x3, x4, x5, x6, x7 uint64) {
- h0, l0 := bits.Mul64(x4, 38)
- h1, l1 := bits.Mul64(x5, 38)
- h2, l2 := bits.Mul64(x6, 38)
- h3, l3 := bits.Mul64(x7, 38)
-
- l1, c0 := bits.Add64(h0, l1, 0)
- l2, c1 := bits.Add64(h1, l2, c0)
- l3, c2 := bits.Add64(h2, l3, c1)
- l4, _ := bits.Add64(h3, 0, c2)
-
- l0, c0 = bits.Add64(l0, x0, 0)
- l1, c1 = bits.Add64(l1, x1, c0)
- l2, c2 = bits.Add64(l2, x2, c1)
- l3, c3 := bits.Add64(l3, x3, c2)
- l4, _ = bits.Add64(l4, 0, c3)
-
- _, l4 = bits.Mul64(l4, 38)
- l0, c0 = bits.Add64(l0, l4, 0)
- z1, c1 := bits.Add64(l1, 0, c0)
- z2, c2 := bits.Add64(l2, 0, c1)
- z3, c3 := bits.Add64(l3, 0, c2)
- z0, _ := bits.Add64(l0, (-c3)&38, 0)
-
- binary.LittleEndian.PutUint64(z[0*8:1*8], z0)
- binary.LittleEndian.PutUint64(z[1*8:2*8], z1)
- binary.LittleEndian.PutUint64(z[2*8:3*8], z2)
- binary.LittleEndian.PutUint64(z[3*8:4*8], z3)
-}
diff --git a/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go b/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go
deleted file mode 100644
index 26ca4d01..00000000
--- a/vendor/github.com/cloudflare/circl/math/fp25519/fp_noasm.go
+++ /dev/null
@@ -1,13 +0,0 @@
-//go:build !amd64 || purego
-// +build !amd64 purego
-
-package fp25519
-
-func cmov(x, y *Elt, n uint) { cmovGeneric(x, y, n) }
-func cswap(x, y *Elt, n uint) { cswapGeneric(x, y, n) }
-func add(z, x, y *Elt) { addGeneric(z, x, y) }
-func sub(z, x, y *Elt) { subGeneric(z, x, y) }
-func addsub(x, y *Elt) { addsubGeneric(x, y) }
-func mul(z, x, y *Elt) { mulGeneric(z, x, y) }
-func sqr(z, x *Elt) { sqrGeneric(z, x) }
-func modp(z *Elt) { modpGeneric(z) }
diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp.go b/vendor/github.com/cloudflare/circl/math/fp448/fp.go
deleted file mode 100644
index a5e36600..00000000
--- a/vendor/github.com/cloudflare/circl/math/fp448/fp.go
+++ /dev/null
@@ -1,164 +0,0 @@
-// Package fp448 provides prime field arithmetic over GF(2^448-2^224-1).
-package fp448
-
-import (
- "errors"
-
- "github.com/cloudflare/circl/internal/conv"
-)
-
-// Size in bytes of an element.
-const Size = 56
-
-// Elt is a prime field element.
-type Elt [Size]byte
-
-func (e Elt) String() string { return conv.BytesLe2Hex(e[:]) }
-
-// p is the prime modulus 2^448-2^224-1.
-var p = Elt{
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
-}
-
-// P returns the prime modulus 2^448-2^224-1.
-func P() Elt { return p }
-
-// ToBytes stores in b the little-endian byte representation of x.
-func ToBytes(b []byte, x *Elt) error {
- if len(b) != Size {
- return errors.New("wrong size")
- }
- Modp(x)
- copy(b, x[:])
- return nil
-}
-
-// IsZero returns true if x is equal to 0.
-func IsZero(x *Elt) bool { Modp(x); return *x == Elt{} }
-
-// IsOne returns true if x is equal to 1.
-func IsOne(x *Elt) bool { Modp(x); return *x == Elt{1} }
-
-// SetOne assigns x=1.
-func SetOne(x *Elt) { *x = Elt{1} }
-
-// One returns the 1 element.
-func One() (x Elt) { x = Elt{1}; return }
-
-// Neg calculates z = -x.
-func Neg(z, x *Elt) { Sub(z, &p, x) }
-
-// Modp ensures that z is between [0,p-1].
-func Modp(z *Elt) { Sub(z, z, &p) }
-
-// InvSqrt calculates z = sqrt(x/y) iff x/y is a quadratic-residue. If so,
-// isQR = true; otherwise, isQR = false, since x/y is a quadratic non-residue,
-// and z = sqrt(-x/y).
-func InvSqrt(z, x, y *Elt) (isQR bool) {
- // First note that x^(2(k+1)) = x^(p-1)/2 * x = legendre(x) * x
- // so that's x if x is a quadratic residue and -x otherwise.
- // Next, y^(6k+3) = y^(4k+2) * y^(2k+1) = y^(p-1) * y^((p-1)/2) = legendre(y).
- // So the z we compute satisfies z^2 y = x^(2(k+1)) y^(6k+3) = legendre(x)*legendre(y).
- // Thus if x and y are quadratic residues, then z is indeed sqrt(x/y).
- t0, t1 := &Elt{}, &Elt{}
- Mul(t0, x, y) // x*y
- Sqr(t1, y) // y^2
- Mul(t1, t0, t1) // x*y^3
- powPminus3div4(z, t1) // (x*y^3)^k
- Mul(z, z, t0) // z = x*y*(x*y^3)^k = x^(k+1) * y^(3k+1)
-
- // Check if x/y is a quadratic residue
- Sqr(t0, z) // z^2
- Mul(t0, t0, y) // y*z^2
- Sub(t0, t0, x) // y*z^2-x
- return IsZero(t0)
-}
-
-// Inv calculates z = 1/x mod p.
-func Inv(z, x *Elt) {
- // Calculates z = x^(4k+1) = x^(p-3+1) = x^(p-2) = x^-1, where k = (p-3)/4.
- t := &Elt{}
- powPminus3div4(t, x) // t = x^k
- Sqr(t, t) // t = x^2k
- Sqr(t, t) // t = x^4k
- Mul(z, t, x) // z = x^(4k+1)
-}
-
-// powPminus3div4 calculates z = x^k mod p, where k = (p-3)/4.
-func powPminus3div4(z, x *Elt) {
- x0, x1 := &Elt{}, &Elt{}
- Sqr(z, x)
- Mul(z, z, x)
- Sqr(x0, z)
- Mul(x0, x0, x)
- Sqr(z, x0)
- Sqr(z, z)
- Sqr(z, z)
- Mul(z, z, x0)
- Sqr(x1, z)
- for i := 0; i < 5; i++ {
- Sqr(x1, x1)
- }
- Mul(x1, x1, z)
- Sqr(z, x1)
- for i := 0; i < 11; i++ {
- Sqr(z, z)
- }
- Mul(z, z, x1)
- Sqr(z, z)
- Sqr(z, z)
- Sqr(z, z)
- Mul(z, z, x0)
- Sqr(x1, z)
- for i := 0; i < 26; i++ {
- Sqr(x1, x1)
- }
- Mul(x1, x1, z)
- Sqr(z, x1)
- for i := 0; i < 53; i++ {
- Sqr(z, z)
- }
- Mul(z, z, x1)
- Sqr(z, z)
- Sqr(z, z)
- Sqr(z, z)
- Mul(z, z, x0)
- Sqr(x1, z)
- for i := 0; i < 110; i++ {
- Sqr(x1, x1)
- }
- Mul(x1, x1, z)
- Sqr(z, x1)
- Mul(z, z, x)
- for i := 0; i < 223; i++ {
- Sqr(z, z)
- }
- Mul(z, z, x1)
-}
-
-// Cmov assigns y to x if n is 1.
-func Cmov(x, y *Elt, n uint) { cmov(x, y, n) }
-
-// Cswap interchanges x and y if n is 1.
-func Cswap(x, y *Elt, n uint) { cswap(x, y, n) }
-
-// Add calculates z = x+y mod p.
-func Add(z, x, y *Elt) { add(z, x, y) }
-
-// Sub calculates z = x-y mod p.
-func Sub(z, x, y *Elt) { sub(z, x, y) }
-
-// AddSub calculates (x,y) = (x+y mod p, x-y mod p).
-func AddSub(x, y *Elt) { addsub(x, y) }
-
-// Mul calculates z = x*y mod p.
-func Mul(z, x, y *Elt) { mul(z, x, y) }
-
-// Sqr calculates z = x^2 mod p.
-func Sqr(z, x *Elt) { sqr(z, x) }
diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go
deleted file mode 100644
index 6a12209a..00000000
--- a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.go
+++ /dev/null
@@ -1,43 +0,0 @@
-//go:build amd64 && !purego
-// +build amd64,!purego
-
-package fp448
-
-import (
- "golang.org/x/sys/cpu"
-)
-
-var hasBmi2Adx = cpu.X86.HasBMI2 && cpu.X86.HasADX
-
-var _ = hasBmi2Adx
-
-func cmov(x, y *Elt, n uint) { cmovAmd64(x, y, n) }
-func cswap(x, y *Elt, n uint) { cswapAmd64(x, y, n) }
-func add(z, x, y *Elt) { addAmd64(z, x, y) }
-func sub(z, x, y *Elt) { subAmd64(z, x, y) }
-func addsub(x, y *Elt) { addsubAmd64(x, y) }
-func mul(z, x, y *Elt) { mulAmd64(z, x, y) }
-func sqr(z, x *Elt) { sqrAmd64(z, x) }
-
-/* Functions defined in fp_amd64.s */
-
-//go:noescape
-func cmovAmd64(x, y *Elt, n uint)
-
-//go:noescape
-func cswapAmd64(x, y *Elt, n uint)
-
-//go:noescape
-func addAmd64(z, x, y *Elt)
-
-//go:noescape
-func subAmd64(z, x, y *Elt)
-
-//go:noescape
-func addsubAmd64(x, y *Elt)
-
-//go:noescape
-func mulAmd64(z, x, y *Elt)
-
-//go:noescape
-func sqrAmd64(z, x *Elt)
diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h
deleted file mode 100644
index 536fe5bd..00000000
--- a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.h
+++ /dev/null
@@ -1,591 +0,0 @@
-// This code was imported from https://github.com/armfazh/rfc7748_precomputed
-
-// CHECK_BMI2ADX triggers bmi2adx if supported,
-// otherwise it fallbacks to legacy code.
-#define CHECK_BMI2ADX(label, legacy, bmi2adx) \
- CMPB ·hasBmi2Adx(SB), $0 \
- JE label \
- bmi2adx \
- RET \
- label: \
- legacy \
- RET
-
-// cselect is a conditional move
-// if b=1: it copies y into x;
-// if b=0: x remains with the same value;
-// if b<> 0,1: undefined.
-// Uses: AX, DX, FLAGS
-// Instr: x86_64, cmov
-#define cselect(x,y,b) \
- TESTQ b, b \
- MOVQ 0+x, AX; MOVQ 0+y, DX; CMOVQNE DX, AX; MOVQ AX, 0+x; \
- MOVQ 8+x, AX; MOVQ 8+y, DX; CMOVQNE DX, AX; MOVQ AX, 8+x; \
- MOVQ 16+x, AX; MOVQ 16+y, DX; CMOVQNE DX, AX; MOVQ AX, 16+x; \
- MOVQ 24+x, AX; MOVQ 24+y, DX; CMOVQNE DX, AX; MOVQ AX, 24+x; \
- MOVQ 32+x, AX; MOVQ 32+y, DX; CMOVQNE DX, AX; MOVQ AX, 32+x; \
- MOVQ 40+x, AX; MOVQ 40+y, DX; CMOVQNE DX, AX; MOVQ AX, 40+x; \
- MOVQ 48+x, AX; MOVQ 48+y, DX; CMOVQNE DX, AX; MOVQ AX, 48+x;
-
-// cswap is a conditional swap
-// if b=1: x,y <- y,x;
-// if b=0: x,y remain with the same values;
-// if b<> 0,1: undefined.
-// Uses: AX, DX, R8, FLAGS
-// Instr: x86_64, cmov
-#define cswap(x,y,b) \
- TESTQ b, b \
- MOVQ 0+x, AX; MOVQ AX, R8; MOVQ 0+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 0+x; MOVQ DX, 0+y; \
- MOVQ 8+x, AX; MOVQ AX, R8; MOVQ 8+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 8+x; MOVQ DX, 8+y; \
- MOVQ 16+x, AX; MOVQ AX, R8; MOVQ 16+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 16+x; MOVQ DX, 16+y; \
- MOVQ 24+x, AX; MOVQ AX, R8; MOVQ 24+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 24+x; MOVQ DX, 24+y; \
- MOVQ 32+x, AX; MOVQ AX, R8; MOVQ 32+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 32+x; MOVQ DX, 32+y; \
- MOVQ 40+x, AX; MOVQ AX, R8; MOVQ 40+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 40+x; MOVQ DX, 40+y; \
- MOVQ 48+x, AX; MOVQ AX, R8; MOVQ 48+y, DX; CMOVQNE DX, AX; CMOVQNE R8, DX; MOVQ AX, 48+x; MOVQ DX, 48+y;
-
-// additionLeg adds x and y and stores in z
-// Uses: AX, DX, R8-R14, FLAGS
-// Instr: x86_64
-#define additionLeg(z,x,y) \
- MOVQ 0+x, R8; ADDQ 0+y, R8; \
- MOVQ 8+x, R9; ADCQ 8+y, R9; \
- MOVQ 16+x, R10; ADCQ 16+y, R10; \
- MOVQ 24+x, R11; ADCQ 24+y, R11; \
- MOVQ 32+x, R12; ADCQ 32+y, R12; \
- MOVQ 40+x, R13; ADCQ 40+y, R13; \
- MOVQ 48+x, R14; ADCQ 48+y, R14; \
- MOVQ $0, AX; ADCQ $0, AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- ADDQ AX, R8; MOVQ $0, AX; \
- ADCQ $0, R9; \
- ADCQ $0, R10; \
- ADCQ DX, R11; \
- ADCQ $0, R12; \
- ADCQ $0, R13; \
- ADCQ $0, R14; \
- ADCQ $0, AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- ADDQ AX, R8; MOVQ R8, 0+z; \
- ADCQ $0, R9; MOVQ R9, 8+z; \
- ADCQ $0, R10; MOVQ R10, 16+z; \
- ADCQ DX, R11; MOVQ R11, 24+z; \
- ADCQ $0, R12; MOVQ R12, 32+z; \
- ADCQ $0, R13; MOVQ R13, 40+z; \
- ADCQ $0, R14; MOVQ R14, 48+z;
-
-
-// additionAdx adds x and y and stores in z
-// Uses: AX, DX, R8-R15, FLAGS
-// Instr: x86_64, adx
-#define additionAdx(z,x,y) \
- MOVL $32, R15; \
- XORL DX, DX; \
- MOVQ 0+x, R8; ADCXQ 0+y, R8; \
- MOVQ 8+x, R9; ADCXQ 8+y, R9; \
- MOVQ 16+x, R10; ADCXQ 16+y, R10; \
- MOVQ 24+x, R11; ADCXQ 24+y, R11; \
- MOVQ 32+x, R12; ADCXQ 32+y, R12; \
- MOVQ 40+x, R13; ADCXQ 40+y, R13; \
- MOVQ 48+x, R14; ADCXQ 48+y, R14; \
- ;;;;;;;;;;;;;;; ADCXQ DX, DX; \
- XORL AX, AX; \
- ADCXQ DX, R8; SHLXQ R15, DX, DX; \
- ADCXQ AX, R9; \
- ADCXQ AX, R10; \
- ADCXQ DX, R11; \
- ADCXQ AX, R12; \
- ADCXQ AX, R13; \
- ADCXQ AX, R14; \
- ADCXQ AX, AX; \
- XORL DX, DX; \
- ADCXQ AX, R8; MOVQ R8, 0+z; SHLXQ R15, AX, AX; \
- ADCXQ DX, R9; MOVQ R9, 8+z; \
- ADCXQ DX, R10; MOVQ R10, 16+z; \
- ADCXQ AX, R11; MOVQ R11, 24+z; \
- ADCXQ DX, R12; MOVQ R12, 32+z; \
- ADCXQ DX, R13; MOVQ R13, 40+z; \
- ADCXQ DX, R14; MOVQ R14, 48+z;
-
-// subtraction subtracts y from x and stores in z
-// Uses: AX, DX, R8-R14, FLAGS
-// Instr: x86_64
-#define subtraction(z,x,y) \
- MOVQ 0+x, R8; SUBQ 0+y, R8; \
- MOVQ 8+x, R9; SBBQ 8+y, R9; \
- MOVQ 16+x, R10; SBBQ 16+y, R10; \
- MOVQ 24+x, R11; SBBQ 24+y, R11; \
- MOVQ 32+x, R12; SBBQ 32+y, R12; \
- MOVQ 40+x, R13; SBBQ 40+y, R13; \
- MOVQ 48+x, R14; SBBQ 48+y, R14; \
- MOVQ $0, AX; SETCS AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- SUBQ AX, R8; MOVQ $0, AX; \
- SBBQ $0, R9; \
- SBBQ $0, R10; \
- SBBQ DX, R11; \
- SBBQ $0, R12; \
- SBBQ $0, R13; \
- SBBQ $0, R14; \
- SETCS AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- SUBQ AX, R8; MOVQ R8, 0+z; \
- SBBQ $0, R9; MOVQ R9, 8+z; \
- SBBQ $0, R10; MOVQ R10, 16+z; \
- SBBQ DX, R11; MOVQ R11, 24+z; \
- SBBQ $0, R12; MOVQ R12, 32+z; \
- SBBQ $0, R13; MOVQ R13, 40+z; \
- SBBQ $0, R14; MOVQ R14, 48+z;
-
-// maddBmi2Adx multiplies x and y and accumulates in z
-// Uses: AX, DX, R15, FLAGS
-// Instr: x86_64, bmi2, adx
-#define maddBmi2Adx(z,x,y,i,r0,r1,r2,r3,r4,r5,r6) \
- MOVQ i+y, DX; XORL AX, AX; \
- MULXQ 0+x, AX, R8; ADOXQ AX, r0; ADCXQ R8, r1; MOVQ r0,i+z; \
- MULXQ 8+x, AX, r0; ADOXQ AX, r1; ADCXQ r0, r2; MOVQ $0, R8; \
- MULXQ 16+x, AX, r0; ADOXQ AX, r2; ADCXQ r0, r3; \
- MULXQ 24+x, AX, r0; ADOXQ AX, r3; ADCXQ r0, r4; \
- MULXQ 32+x, AX, r0; ADOXQ AX, r4; ADCXQ r0, r5; \
- MULXQ 40+x, AX, r0; ADOXQ AX, r5; ADCXQ r0, r6; \
- MULXQ 48+x, AX, r0; ADOXQ AX, r6; ADCXQ R8, r0; \
- ;;;;;;;;;;;;;;;;;;; ADOXQ R8, r0;
-
-// integerMulAdx multiplies x and y and stores in z
-// Uses: AX, DX, R8-R15, FLAGS
-// Instr: x86_64, bmi2, adx
-#define integerMulAdx(z,x,y) \
- MOVL $0,R15; \
- MOVQ 0+y, DX; XORL AX, AX; MOVQ $0, R8; \
- MULXQ 0+x, AX, R9; MOVQ AX, 0+z; \
- MULXQ 8+x, AX, R10; ADCXQ AX, R9; \
- MULXQ 16+x, AX, R11; ADCXQ AX, R10; \
- MULXQ 24+x, AX, R12; ADCXQ AX, R11; \
- MULXQ 32+x, AX, R13; ADCXQ AX, R12; \
- MULXQ 40+x, AX, R14; ADCXQ AX, R13; \
- MULXQ 48+x, AX, R15; ADCXQ AX, R14; \
- ;;;;;;;;;;;;;;;;;;;; ADCXQ R8, R15; \
- maddBmi2Adx(z,x,y, 8, R9,R10,R11,R12,R13,R14,R15) \
- maddBmi2Adx(z,x,y,16,R10,R11,R12,R13,R14,R15, R9) \
- maddBmi2Adx(z,x,y,24,R11,R12,R13,R14,R15, R9,R10) \
- maddBmi2Adx(z,x,y,32,R12,R13,R14,R15, R9,R10,R11) \
- maddBmi2Adx(z,x,y,40,R13,R14,R15, R9,R10,R11,R12) \
- maddBmi2Adx(z,x,y,48,R14,R15, R9,R10,R11,R12,R13) \
- MOVQ R15, 56+z; \
- MOVQ R9, 64+z; \
- MOVQ R10, 72+z; \
- MOVQ R11, 80+z; \
- MOVQ R12, 88+z; \
- MOVQ R13, 96+z; \
- MOVQ R14, 104+z;
-
-// maddLegacy multiplies x and y and accumulates in z
-// Uses: AX, DX, R15, FLAGS
-// Instr: x86_64
-#define maddLegacy(z,x,y,i) \
- MOVQ i+y, R15; \
- MOVQ 0+x, AX; MULQ R15; MOVQ AX, R8; ;;;;;;;;;;;; MOVQ DX, R9; \
- MOVQ 8+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \
- MOVQ 16+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \
- MOVQ 24+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \
- MOVQ 32+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \
- MOVQ 40+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \
- MOVQ 48+x, AX; MULQ R15; ADDQ AX, R14; ADCQ $0, DX; \
- ADDQ 0+i+z, R8; MOVQ R8, 0+i+z; \
- ADCQ 8+i+z, R9; MOVQ R9, 8+i+z; \
- ADCQ 16+i+z, R10; MOVQ R10, 16+i+z; \
- ADCQ 24+i+z, R11; MOVQ R11, 24+i+z; \
- ADCQ 32+i+z, R12; MOVQ R12, 32+i+z; \
- ADCQ 40+i+z, R13; MOVQ R13, 40+i+z; \
- ADCQ 48+i+z, R14; MOVQ R14, 48+i+z; \
- ADCQ $0, DX; MOVQ DX, 56+i+z;
-
-// integerMulLeg multiplies x and y and stores in z
-// Uses: AX, DX, R8-R15, FLAGS
-// Instr: x86_64
-#define integerMulLeg(z,x,y) \
- MOVQ 0+y, R15; \
- MOVQ 0+x, AX; MULQ R15; MOVQ AX, 0+z; ;;;;;;;;;;;; MOVQ DX, R8; \
- MOVQ 8+x, AX; MULQ R15; ADDQ AX, R8; ADCQ $0, DX; MOVQ DX, R9; MOVQ R8, 8+z; \
- MOVQ 16+x, AX; MULQ R15; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; MOVQ R9, 16+z; \
- MOVQ 24+x, AX; MULQ R15; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; MOVQ R10, 24+z; \
- MOVQ 32+x, AX; MULQ R15; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; MOVQ R11, 32+z; \
- MOVQ 40+x, AX; MULQ R15; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; MOVQ R12, 40+z; \
- MOVQ 48+x, AX; MULQ R15; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX,56+z; MOVQ R13, 48+z; \
- maddLegacy(z,x,y, 8) \
- maddLegacy(z,x,y,16) \
- maddLegacy(z,x,y,24) \
- maddLegacy(z,x,y,32) \
- maddLegacy(z,x,y,40) \
- maddLegacy(z,x,y,48)
-
-// integerSqrLeg squares x and stores in z
-// Uses: AX, CX, DX, R8-R15, FLAGS
-// Instr: x86_64
-#define integerSqrLeg(z,x) \
- XORL R15, R15; \
- MOVQ 0+x, CX; \
- MOVQ CX, AX; MULQ CX; MOVQ AX, 0+z; MOVQ DX, R8; \
- ADDQ CX, CX; ADCQ $0, R15; \
- MOVQ 8+x, AX; MULQ CX; ADDQ AX, R8; ADCQ $0, DX; MOVQ DX, R9; MOVQ R8, 8+z; \
- MOVQ 16+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ DX, R10; \
- MOVQ 24+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; MOVQ DX, R11; \
- MOVQ 32+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ DX, R12; \
- MOVQ 40+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; MOVQ DX, R13; \
- MOVQ 48+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ DX, R14; \
- \
- MOVQ 8+x, CX; \
- MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
- ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ R9,16+z; \
- MOVQ R15, AX; NEGQ AX; ANDQ 8+x, AX; ADDQ AX, DX; ADCQ $0, R11; MOVQ DX, R8; \
- ADDQ 8+x, CX; ADCQ $0, R15; \
- MOVQ 16+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX, R8; MOVQ R10, 24+z; \
- MOVQ 24+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; ADDQ R8, R11; ADCQ $0, DX; MOVQ DX, R8; \
- MOVQ 32+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; \
- MOVQ 40+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; ADDQ R8, R13; ADCQ $0, DX; MOVQ DX, R8; \
- MOVQ 48+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R9; \
- \
- MOVQ 16+x, CX; \
- MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
- ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ R11, 32+z; \
- MOVQ R15, AX; NEGQ AX; ANDQ 16+x,AX; ADDQ AX, DX; ADCQ $0, R13; MOVQ DX, R8; \
- ADDQ 16+x, CX; ADCQ $0, R15; \
- MOVQ 24+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; MOVQ R12, 40+z; \
- MOVQ 32+x, AX; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; ADDQ R8, R13; ADCQ $0, DX; MOVQ DX, R8; \
- MOVQ 40+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R8; \
- MOVQ 48+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; ADDQ R8, R9; ADCQ $0, DX; MOVQ DX,R10; \
- \
- MOVQ 24+x, CX; \
- MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
- ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ R13, 48+z; \
- MOVQ R15, AX; NEGQ AX; ANDQ 24+x,AX; ADDQ AX, DX; ADCQ $0, R9; MOVQ DX, R8; \
- ADDQ 24+x, CX; ADCQ $0, R15; \
- MOVQ 32+x, AX; MULQ CX; ADDQ AX, R14; ADCQ $0, DX; ADDQ R8, R14; ADCQ $0, DX; MOVQ DX, R8; MOVQ R14, 56+z; \
- MOVQ 40+x, AX; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; ADDQ R8, R9; ADCQ $0, DX; MOVQ DX, R8; \
- MOVQ 48+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX,R11; \
- \
- MOVQ 32+x, CX; \
- MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
- ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R9; ADCQ $0, DX; MOVQ R9, 64+z; \
- MOVQ R15, AX; NEGQ AX; ANDQ 32+x,AX; ADDQ AX, DX; ADCQ $0, R11; MOVQ DX, R8; \
- ADDQ 32+x, CX; ADCQ $0, R15; \
- MOVQ 40+x, AX; MULQ CX; ADDQ AX, R10; ADCQ $0, DX; ADDQ R8, R10; ADCQ $0, DX; MOVQ DX, R8; MOVQ R10, 72+z; \
- MOVQ 48+x, AX; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; ADDQ R8, R11; ADCQ $0, DX; MOVQ DX,R12; \
- \
- XORL R13, R13; \
- XORL R14, R14; \
- MOVQ 40+x, CX; \
- MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
- ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R11; ADCQ $0, DX; MOVQ R11, 80+z; \
- MOVQ R15, AX; NEGQ AX; ANDQ 40+x,AX; ADDQ AX, DX; ADCQ $0, R13; MOVQ DX, R8; \
- ADDQ 40+x, CX; ADCQ $0, R15; \
- MOVQ 48+x, AX; MULQ CX; ADDQ AX, R12; ADCQ $0, DX; ADDQ R8, R12; ADCQ $0, DX; MOVQ DX, R8; MOVQ R12, 88+z; \
- ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADDQ R8, R13; ADCQ $0,R14; \
- \
- XORL R9, R9; \
- MOVQ 48+x, CX; \
- MOVQ CX, AX; ADDQ R15, CX; MOVQ $0, R15; ADCQ $0, R15; \
- ;;;;;;;;;;;;;; MULQ CX; ADDQ AX, R13; ADCQ $0, DX; MOVQ R13, 96+z; \
- MOVQ R15, AX; NEGQ AX; ANDQ 48+x,AX; ADDQ AX, DX; ADCQ $0, R9; MOVQ DX, R8; \
- ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ADDQ R8,R14; ADCQ $0, R9; MOVQ R14, 104+z;
-
-
-// integerSqrAdx squares x and stores in z
-// Uses: AX, CX, DX, R8-R15, FLAGS
-// Instr: x86_64, bmi2, adx
-#define integerSqrAdx(z,x) \
- XORL R15, R15; \
- MOVQ 0+x, DX; \
- ;;;;;;;;;;;;;; MULXQ DX, AX, R8; MOVQ AX, 0+z; \
- ADDQ DX, DX; ADCQ $0, R15; CLC; \
- MULXQ 8+x, AX, R9; ADCXQ AX, R8; MOVQ R8, 8+z; \
- MULXQ 16+x, AX, R10; ADCXQ AX, R9; MOVQ $0, R8;\
- MULXQ 24+x, AX, R11; ADCXQ AX, R10; \
- MULXQ 32+x, AX, R12; ADCXQ AX, R11; \
- MULXQ 40+x, AX, R13; ADCXQ AX, R12; \
- MULXQ 48+x, AX, R14; ADCXQ AX, R13; \
- ;;;;;;;;;;;;;;;;;;;; ADCXQ R8, R14; \
- \
- MOVQ 8+x, DX; \
- MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
- MULXQ AX, AX, CX; \
- MOVQ R15, R8; NEGQ R8; ANDQ 8+x, R8; \
- ADDQ AX, R9; MOVQ R9, 16+z; \
- ADCQ CX, R8; \
- ADCQ $0, R11; \
- ADDQ 8+x, DX; \
- ADCQ $0, R15; \
- XORL R9, R9; ;;;;;;;;;;;;;;;;;;;;; ADOXQ R8, R10; \
- MULXQ 16+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; MOVQ R10, 24+z; \
- MULXQ 24+x, AX, CX; ADCXQ AX, R11; ADOXQ CX, R12; MOVQ $0, R10; \
- MULXQ 32+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; \
- MULXQ 40+x, AX, CX; ADCXQ AX, R13; ADOXQ CX, R14; \
- MULXQ 48+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; \
- ;;;;;;;;;;;;;;;;;;; ADCXQ R10, R9; \
- \
- MOVQ 16+x, DX; \
- MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
- MULXQ AX, AX, CX; \
- MOVQ R15, R8; NEGQ R8; ANDQ 16+x, R8; \
- ADDQ AX, R11; MOVQ R11, 32+z; \
- ADCQ CX, R8; \
- ADCQ $0, R13; \
- ADDQ 16+x, DX; \
- ADCQ $0, R15; \
- XORL R11, R11; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R12; \
- MULXQ 24+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; MOVQ R12, 40+z; \
- MULXQ 32+x, AX, CX; ADCXQ AX, R13; ADOXQ CX, R14; MOVQ $0, R12; \
- MULXQ 40+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; \
- MULXQ 48+x, AX, CX; ADCXQ AX, R9; ADOXQ CX, R10; \
- ;;;;;;;;;;;;;;;;;;; ADCXQ R11,R10; \
- \
- MOVQ 24+x, DX; \
- MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
- MULXQ AX, AX, CX; \
- MOVQ R15, R8; NEGQ R8; ANDQ 24+x, R8; \
- ADDQ AX, R13; MOVQ R13, 48+z; \
- ADCQ CX, R8; \
- ADCQ $0, R9; \
- ADDQ 24+x, DX; \
- ADCQ $0, R15; \
- XORL R13, R13; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R14; \
- MULXQ 32+x, AX, CX; ADCXQ AX, R14; ADOXQ CX, R9; MOVQ R14, 56+z; \
- MULXQ 40+x, AX, CX; ADCXQ AX, R9; ADOXQ CX, R10; MOVQ $0, R14; \
- MULXQ 48+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; \
- ;;;;;;;;;;;;;;;;;;; ADCXQ R12,R11; \
- \
- MOVQ 32+x, DX; \
- MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
- MULXQ AX, AX, CX; \
- MOVQ R15, R8; NEGQ R8; ANDQ 32+x, R8; \
- ADDQ AX, R9; MOVQ R9, 64+z; \
- ADCQ CX, R8; \
- ADCQ $0, R11; \
- ADDQ 32+x, DX; \
- ADCQ $0, R15; \
- XORL R9, R9; ;;;;;;;;;;;;;;;;;;;;; ADOXQ R8, R10; \
- MULXQ 40+x, AX, CX; ADCXQ AX, R10; ADOXQ CX, R11; MOVQ R10, 72+z; \
- MULXQ 48+x, AX, CX; ADCXQ AX, R11; ADOXQ CX, R12; \
- ;;;;;;;;;;;;;;;;;;; ADCXQ R13,R12; \
- \
- MOVQ 40+x, DX; \
- MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
- MULXQ AX, AX, CX; \
- MOVQ R15, R8; NEGQ R8; ANDQ 40+x, R8; \
- ADDQ AX, R11; MOVQ R11, 80+z; \
- ADCQ CX, R8; \
- ADCQ $0, R13; \
- ADDQ 40+x, DX; \
- ADCQ $0, R15; \
- XORL R11, R11; ;;;;;;;;;;;;;;;;;;; ADOXQ R8, R12; \
- MULXQ 48+x, AX, CX; ADCXQ AX, R12; ADOXQ CX, R13; MOVQ R12, 88+z; \
- ;;;;;;;;;;;;;;;;;;; ADCXQ R14,R13; \
- \
- MOVQ 48+x, DX; \
- MOVQ DX, AX; ADDQ R15, DX; MOVQ $0, R15; ADCQ $0, R15; \
- MULXQ AX, AX, CX; \
- MOVQ R15, R8; NEGQ R8; ANDQ 48+x, R8; \
- XORL R10, R10; ;;;;;;;;;;;;;; ADOXQ CX, R14; \
- ;;;;;;;;;;;;;; ADCXQ AX, R13; ;;;;;;;;;;;;;; MOVQ R13, 96+z; \
- ;;;;;;;;;;;;;; ADCXQ R8, R14; MOVQ R14, 104+z;
-
-// reduceFromDoubleLeg finds a z=x modulo p such that z<2^448 and stores in z
-// Uses: AX, R8-R15, FLAGS
-// Instr: x86_64
-#define reduceFromDoubleLeg(z,x) \
- /* ( ,2C13,2C12,2C11,2C10|C10,C9,C8, C7) + (C6,...,C0) */ \
- /* (r14, r13, r12, r11, r10,r9,r8,r15) */ \
- MOVQ 80+x,AX; MOVQ AX,R10; \
- MOVQ $0xFFFFFFFF00000000, R8; \
- ANDQ R8,R10; \
- \
- MOVQ $0,R14; \
- MOVQ 104+x,R13; SHLQ $1,R13,R14; \
- MOVQ 96+x,R12; SHLQ $1,R12,R13; \
- MOVQ 88+x,R11; SHLQ $1,R11,R12; \
- MOVQ 72+x, R9; SHLQ $1,R10,R11; \
- MOVQ 64+x, R8; SHLQ $1,R10; \
- MOVQ $0xFFFFFFFF,R15; ANDQ R15,AX; ORQ AX,R10; \
- MOVQ 56+x,R15; \
- \
- ADDQ 0+x,R15; MOVQ R15, 0+z; MOVQ 56+x,R15; \
- ADCQ 8+x, R8; MOVQ R8, 8+z; MOVQ 64+x, R8; \
- ADCQ 16+x, R9; MOVQ R9,16+z; MOVQ 72+x, R9; \
- ADCQ 24+x,R10; MOVQ R10,24+z; MOVQ 80+x,R10; \
- ADCQ 32+x,R11; MOVQ R11,32+z; MOVQ 88+x,R11; \
- ADCQ 40+x,R12; MOVQ R12,40+z; MOVQ 96+x,R12; \
- ADCQ 48+x,R13; MOVQ R13,48+z; MOVQ 104+x,R13; \
- ADCQ $0,R14; \
- /* (c10c9,c9c8,c8c7,c7c13,c13c12,c12c11,c11c10) + (c6,...,c0) */ \
- /* ( r9, r8, r15, r13, r12, r11, r10) */ \
- MOVQ R10, AX; \
- SHRQ $32,R11,R10; \
- SHRQ $32,R12,R11; \
- SHRQ $32,R13,R12; \
- SHRQ $32,R15,R13; \
- SHRQ $32, R8,R15; \
- SHRQ $32, R9, R8; \
- SHRQ $32, AX, R9; \
- \
- ADDQ 0+z,R10; \
- ADCQ 8+z,R11; \
- ADCQ 16+z,R12; \
- ADCQ 24+z,R13; \
- ADCQ 32+z,R15; \
- ADCQ 40+z, R8; \
- ADCQ 48+z, R9; \
- ADCQ $0,R14; \
- /* ( c7) + (c6,...,c0) */ \
- /* (r14) */ \
- MOVQ R14, AX; SHLQ $32, AX; \
- ADDQ R14,R10; MOVQ $0,R14; \
- ADCQ $0,R11; \
- ADCQ $0,R12; \
- ADCQ AX,R13; \
- ADCQ $0,R15; \
- ADCQ $0, R8; \
- ADCQ $0, R9; \
- ADCQ $0,R14; \
- /* ( c7) + (c6,...,c0) */ \
- /* (r14) */ \
- MOVQ R14, AX; SHLQ $32,AX; \
- ADDQ R14,R10; MOVQ R10, 0+z; \
- ADCQ $0,R11; MOVQ R11, 8+z; \
- ADCQ $0,R12; MOVQ R12,16+z; \
- ADCQ AX,R13; MOVQ R13,24+z; \
- ADCQ $0,R15; MOVQ R15,32+z; \
- ADCQ $0, R8; MOVQ R8,40+z; \
- ADCQ $0, R9; MOVQ R9,48+z;
-
-// reduceFromDoubleAdx finds a z=x modulo p such that z<2^448 and stores in z
-// Uses: AX, R8-R15, FLAGS
-// Instr: x86_64, adx
-#define reduceFromDoubleAdx(z,x) \
- /* ( ,2C13,2C12,2C11,2C10|C10,C9,C8, C7) + (C6,...,C0) */ \
- /* (r14, r13, r12, r11, r10,r9,r8,r15) */ \
- MOVQ 80+x,AX; MOVQ AX,R10; \
- MOVQ $0xFFFFFFFF00000000, R8; \
- ANDQ R8,R10; \
- \
- MOVQ $0,R14; \
- MOVQ 104+x,R13; SHLQ $1,R13,R14; \
- MOVQ 96+x,R12; SHLQ $1,R12,R13; \
- MOVQ 88+x,R11; SHLQ $1,R11,R12; \
- MOVQ 72+x, R9; SHLQ $1,R10,R11; \
- MOVQ 64+x, R8; SHLQ $1,R10; \
- MOVQ $0xFFFFFFFF,R15; ANDQ R15,AX; ORQ AX,R10; \
- MOVQ 56+x,R15; \
- \
- XORL AX,AX; \
- ADCXQ 0+x,R15; MOVQ R15, 0+z; MOVQ 56+x,R15; \
- ADCXQ 8+x, R8; MOVQ R8, 8+z; MOVQ 64+x, R8; \
- ADCXQ 16+x, R9; MOVQ R9,16+z; MOVQ 72+x, R9; \
- ADCXQ 24+x,R10; MOVQ R10,24+z; MOVQ 80+x,R10; \
- ADCXQ 32+x,R11; MOVQ R11,32+z; MOVQ 88+x,R11; \
- ADCXQ 40+x,R12; MOVQ R12,40+z; MOVQ 96+x,R12; \
- ADCXQ 48+x,R13; MOVQ R13,48+z; MOVQ 104+x,R13; \
- ADCXQ AX,R14; \
- /* (c10c9,c9c8,c8c7,c7c13,c13c12,c12c11,c11c10) + (c6,...,c0) */ \
- /* ( r9, r8, r15, r13, r12, r11, r10) */ \
- MOVQ R10, AX; \
- SHRQ $32,R11,R10; \
- SHRQ $32,R12,R11; \
- SHRQ $32,R13,R12; \
- SHRQ $32,R15,R13; \
- SHRQ $32, R8,R15; \
- SHRQ $32, R9, R8; \
- SHRQ $32, AX, R9; \
- \
- XORL AX,AX; \
- ADCXQ 0+z,R10; \
- ADCXQ 8+z,R11; \
- ADCXQ 16+z,R12; \
- ADCXQ 24+z,R13; \
- ADCXQ 32+z,R15; \
- ADCXQ 40+z, R8; \
- ADCXQ 48+z, R9; \
- ADCXQ AX,R14; \
- /* ( c7) + (c6,...,c0) */ \
- /* (r14) */ \
- MOVQ R14, AX; SHLQ $32, AX; \
- CLC; \
- ADCXQ R14,R10; MOVQ $0,R14; \
- ADCXQ R14,R11; \
- ADCXQ R14,R12; \
- ADCXQ AX,R13; \
- ADCXQ R14,R15; \
- ADCXQ R14, R8; \
- ADCXQ R14, R9; \
- ADCXQ R14,R14; \
- /* ( c7) + (c6,...,c0) */ \
- /* (r14) */ \
- MOVQ R14, AX; SHLQ $32, AX; \
- CLC; \
- ADCXQ R14,R10; MOVQ R10, 0+z; MOVQ $0,R14; \
- ADCXQ R14,R11; MOVQ R11, 8+z; \
- ADCXQ R14,R12; MOVQ R12,16+z; \
- ADCXQ AX,R13; MOVQ R13,24+z; \
- ADCXQ R14,R15; MOVQ R15,32+z; \
- ADCXQ R14, R8; MOVQ R8,40+z; \
- ADCXQ R14, R9; MOVQ R9,48+z;
-
-// addSub calculates two operations: x,y = x+y,x-y
-// Uses: AX, DX, R8-R15, FLAGS
-#define addSub(x,y) \
- MOVQ 0+x, R8; ADDQ 0+y, R8; \
- MOVQ 8+x, R9; ADCQ 8+y, R9; \
- MOVQ 16+x, R10; ADCQ 16+y, R10; \
- MOVQ 24+x, R11; ADCQ 24+y, R11; \
- MOVQ 32+x, R12; ADCQ 32+y, R12; \
- MOVQ 40+x, R13; ADCQ 40+y, R13; \
- MOVQ 48+x, R14; ADCQ 48+y, R14; \
- MOVQ $0, AX; ADCQ $0, AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- ADDQ AX, R8; MOVQ $0, AX; \
- ADCQ $0, R9; \
- ADCQ $0, R10; \
- ADCQ DX, R11; \
- ADCQ $0, R12; \
- ADCQ $0, R13; \
- ADCQ $0, R14; \
- ADCQ $0, AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- ADDQ AX, R8; MOVQ 0+x,AX; MOVQ R8, 0+x; MOVQ AX, R8; \
- ADCQ $0, R9; MOVQ 8+x,AX; MOVQ R9, 8+x; MOVQ AX, R9; \
- ADCQ $0, R10; MOVQ 16+x,AX; MOVQ R10, 16+x; MOVQ AX, R10; \
- ADCQ DX, R11; MOVQ 24+x,AX; MOVQ R11, 24+x; MOVQ AX, R11; \
- ADCQ $0, R12; MOVQ 32+x,AX; MOVQ R12, 32+x; MOVQ AX, R12; \
- ADCQ $0, R13; MOVQ 40+x,AX; MOVQ R13, 40+x; MOVQ AX, R13; \
- ADCQ $0, R14; MOVQ 48+x,AX; MOVQ R14, 48+x; MOVQ AX, R14; \
- SUBQ 0+y, R8; \
- SBBQ 8+y, R9; \
- SBBQ 16+y, R10; \
- SBBQ 24+y, R11; \
- SBBQ 32+y, R12; \
- SBBQ 40+y, R13; \
- SBBQ 48+y, R14; \
- MOVQ $0, AX; SETCS AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- SUBQ AX, R8; MOVQ $0, AX; \
- SBBQ $0, R9; \
- SBBQ $0, R10; \
- SBBQ DX, R11; \
- SBBQ $0, R12; \
- SBBQ $0, R13; \
- SBBQ $0, R14; \
- SETCS AX; \
- MOVQ AX, DX; \
- SHLQ $32, DX; \
- SUBQ AX, R8; MOVQ R8, 0+y; \
- SBBQ $0, R9; MOVQ R9, 8+y; \
- SBBQ $0, R10; MOVQ R10, 16+y; \
- SBBQ DX, R11; MOVQ R11, 24+y; \
- SBBQ $0, R12; MOVQ R12, 32+y; \
- SBBQ $0, R13; MOVQ R13, 40+y; \
- SBBQ $0, R14; MOVQ R14, 48+y;
diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s b/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s
deleted file mode 100644
index 3f1f07c9..00000000
--- a/vendor/github.com/cloudflare/circl/math/fp448/fp_amd64.s
+++ /dev/null
@@ -1,75 +0,0 @@
-//go:build amd64 && !purego
-// +build amd64,!purego
-
-#include "textflag.h"
-#include "fp_amd64.h"
-
-// func cmovAmd64(x, y *Elt, n uint)
-TEXT ·cmovAmd64(SB),NOSPLIT,$0-24
- MOVQ x+0(FP), DI
- MOVQ y+8(FP), SI
- MOVQ n+16(FP), BX
- cselect(0(DI),0(SI),BX)
- RET
-
-// func cswapAmd64(x, y *Elt, n uint)
-TEXT ·cswapAmd64(SB),NOSPLIT,$0-24
- MOVQ x+0(FP), DI
- MOVQ y+8(FP), SI
- MOVQ n+16(FP), BX
- cswap(0(DI),0(SI),BX)
- RET
-
-// func subAmd64(z, x, y *Elt)
-TEXT ·subAmd64(SB),NOSPLIT,$0-24
- MOVQ z+0(FP), DI
- MOVQ x+8(FP), SI
- MOVQ y+16(FP), BX
- subtraction(0(DI),0(SI),0(BX))
- RET
-
-// func addsubAmd64(x, y *Elt)
-TEXT ·addsubAmd64(SB),NOSPLIT,$0-16
- MOVQ x+0(FP), DI
- MOVQ y+8(FP), SI
- addSub(0(DI),0(SI))
- RET
-
-#define addLegacy \
- additionLeg(0(DI),0(SI),0(BX))
-#define addBmi2Adx \
- additionAdx(0(DI),0(SI),0(BX))
-
-#define mulLegacy \
- integerMulLeg(0(SP),0(SI),0(BX)) \
- reduceFromDoubleLeg(0(DI),0(SP))
-#define mulBmi2Adx \
- integerMulAdx(0(SP),0(SI),0(BX)) \
- reduceFromDoubleAdx(0(DI),0(SP))
-
-#define sqrLegacy \
- integerSqrLeg(0(SP),0(SI)) \
- reduceFromDoubleLeg(0(DI),0(SP))
-#define sqrBmi2Adx \
- integerSqrAdx(0(SP),0(SI)) \
- reduceFromDoubleAdx(0(DI),0(SP))
-
-// func addAmd64(z, x, y *Elt)
-TEXT ·addAmd64(SB),NOSPLIT,$0-24
- MOVQ z+0(FP), DI
- MOVQ x+8(FP), SI
- MOVQ y+16(FP), BX
- CHECK_BMI2ADX(LADD, addLegacy, addBmi2Adx)
-
-// func mulAmd64(z, x, y *Elt)
-TEXT ·mulAmd64(SB),NOSPLIT,$112-24
- MOVQ z+0(FP), DI
- MOVQ x+8(FP), SI
- MOVQ y+16(FP), BX
- CHECK_BMI2ADX(LMUL, mulLegacy, mulBmi2Adx)
-
-// func sqrAmd64(z, x *Elt)
-TEXT ·sqrAmd64(SB),NOSPLIT,$112-16
- MOVQ z+0(FP), DI
- MOVQ x+8(FP), SI
- CHECK_BMI2ADX(LSQR, sqrLegacy, sqrBmi2Adx)
diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go b/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go
deleted file mode 100644
index 47a0b632..00000000
--- a/vendor/github.com/cloudflare/circl/math/fp448/fp_generic.go
+++ /dev/null
@@ -1,339 +0,0 @@
-package fp448
-
-import (
- "encoding/binary"
- "math/bits"
-)
-
-func cmovGeneric(x, y *Elt, n uint) {
- m := -uint64(n & 0x1)
- x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
- x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
- x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
- x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
- x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8])
- x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8])
- x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8])
-
- y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
- y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
- y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
- y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
- y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8])
- y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8])
- y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8])
-
- x0 = (x0 &^ m) | (y0 & m)
- x1 = (x1 &^ m) | (y1 & m)
- x2 = (x2 &^ m) | (y2 & m)
- x3 = (x3 &^ m) | (y3 & m)
- x4 = (x4 &^ m) | (y4 & m)
- x5 = (x5 &^ m) | (y5 & m)
- x6 = (x6 &^ m) | (y6 & m)
-
- binary.LittleEndian.PutUint64(x[0*8:1*8], x0)
- binary.LittleEndian.PutUint64(x[1*8:2*8], x1)
- binary.LittleEndian.PutUint64(x[2*8:3*8], x2)
- binary.LittleEndian.PutUint64(x[3*8:4*8], x3)
- binary.LittleEndian.PutUint64(x[4*8:5*8], x4)
- binary.LittleEndian.PutUint64(x[5*8:6*8], x5)
- binary.LittleEndian.PutUint64(x[6*8:7*8], x6)
-}
-
-func cswapGeneric(x, y *Elt, n uint) {
- m := -uint64(n & 0x1)
- x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
- x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
- x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
- x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
- x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8])
- x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8])
- x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8])
-
- y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
- y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
- y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
- y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
- y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8])
- y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8])
- y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8])
-
- t0 := m & (x0 ^ y0)
- t1 := m & (x1 ^ y1)
- t2 := m & (x2 ^ y2)
- t3 := m & (x3 ^ y3)
- t4 := m & (x4 ^ y4)
- t5 := m & (x5 ^ y5)
- t6 := m & (x6 ^ y6)
- x0 ^= t0
- x1 ^= t1
- x2 ^= t2
- x3 ^= t3
- x4 ^= t4
- x5 ^= t5
- x6 ^= t6
- y0 ^= t0
- y1 ^= t1
- y2 ^= t2
- y3 ^= t3
- y4 ^= t4
- y5 ^= t5
- y6 ^= t6
-
- binary.LittleEndian.PutUint64(x[0*8:1*8], x0)
- binary.LittleEndian.PutUint64(x[1*8:2*8], x1)
- binary.LittleEndian.PutUint64(x[2*8:3*8], x2)
- binary.LittleEndian.PutUint64(x[3*8:4*8], x3)
- binary.LittleEndian.PutUint64(x[4*8:5*8], x4)
- binary.LittleEndian.PutUint64(x[5*8:6*8], x5)
- binary.LittleEndian.PutUint64(x[6*8:7*8], x6)
-
- binary.LittleEndian.PutUint64(y[0*8:1*8], y0)
- binary.LittleEndian.PutUint64(y[1*8:2*8], y1)
- binary.LittleEndian.PutUint64(y[2*8:3*8], y2)
- binary.LittleEndian.PutUint64(y[3*8:4*8], y3)
- binary.LittleEndian.PutUint64(y[4*8:5*8], y4)
- binary.LittleEndian.PutUint64(y[5*8:6*8], y5)
- binary.LittleEndian.PutUint64(y[6*8:7*8], y6)
-}
-
-func addGeneric(z, x, y *Elt) {
- x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
- x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
- x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
- x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
- x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8])
- x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8])
- x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8])
-
- y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
- y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
- y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
- y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
- y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8])
- y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8])
- y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8])
-
- z0, c0 := bits.Add64(x0, y0, 0)
- z1, c1 := bits.Add64(x1, y1, c0)
- z2, c2 := bits.Add64(x2, y2, c1)
- z3, c3 := bits.Add64(x3, y3, c2)
- z4, c4 := bits.Add64(x4, y4, c3)
- z5, c5 := bits.Add64(x5, y5, c4)
- z6, z7 := bits.Add64(x6, y6, c5)
-
- z0, c0 = bits.Add64(z0, z7, 0)
- z1, c1 = bits.Add64(z1, 0, c0)
- z2, c2 = bits.Add64(z2, 0, c1)
- z3, c3 = bits.Add64(z3, z7<<32, c2)
- z4, c4 = bits.Add64(z4, 0, c3)
- z5, c5 = bits.Add64(z5, 0, c4)
- z6, z7 = bits.Add64(z6, 0, c5)
-
- z0, c0 = bits.Add64(z0, z7, 0)
- z1, c1 = bits.Add64(z1, 0, c0)
- z2, c2 = bits.Add64(z2, 0, c1)
- z3, c3 = bits.Add64(z3, z7<<32, c2)
- z4, c4 = bits.Add64(z4, 0, c3)
- z5, c5 = bits.Add64(z5, 0, c4)
- z6, _ = bits.Add64(z6, 0, c5)
-
- binary.LittleEndian.PutUint64(z[0*8:1*8], z0)
- binary.LittleEndian.PutUint64(z[1*8:2*8], z1)
- binary.LittleEndian.PutUint64(z[2*8:3*8], z2)
- binary.LittleEndian.PutUint64(z[3*8:4*8], z3)
- binary.LittleEndian.PutUint64(z[4*8:5*8], z4)
- binary.LittleEndian.PutUint64(z[5*8:6*8], z5)
- binary.LittleEndian.PutUint64(z[6*8:7*8], z6)
-}
-
-func subGeneric(z, x, y *Elt) {
- x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
- x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
- x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
- x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
- x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8])
- x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8])
- x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8])
-
- y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
- y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
- y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
- y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
- y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8])
- y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8])
- y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8])
-
- z0, c0 := bits.Sub64(x0, y0, 0)
- z1, c1 := bits.Sub64(x1, y1, c0)
- z2, c2 := bits.Sub64(x2, y2, c1)
- z3, c3 := bits.Sub64(x3, y3, c2)
- z4, c4 := bits.Sub64(x4, y4, c3)
- z5, c5 := bits.Sub64(x5, y5, c4)
- z6, z7 := bits.Sub64(x6, y6, c5)
-
- z0, c0 = bits.Sub64(z0, z7, 0)
- z1, c1 = bits.Sub64(z1, 0, c0)
- z2, c2 = bits.Sub64(z2, 0, c1)
- z3, c3 = bits.Sub64(z3, z7<<32, c2)
- z4, c4 = bits.Sub64(z4, 0, c3)
- z5, c5 = bits.Sub64(z5, 0, c4)
- z6, z7 = bits.Sub64(z6, 0, c5)
-
- z0, c0 = bits.Sub64(z0, z7, 0)
- z1, c1 = bits.Sub64(z1, 0, c0)
- z2, c2 = bits.Sub64(z2, 0, c1)
- z3, c3 = bits.Sub64(z3, z7<<32, c2)
- z4, c4 = bits.Sub64(z4, 0, c3)
- z5, c5 = bits.Sub64(z5, 0, c4)
- z6, _ = bits.Sub64(z6, 0, c5)
-
- binary.LittleEndian.PutUint64(z[0*8:1*8], z0)
- binary.LittleEndian.PutUint64(z[1*8:2*8], z1)
- binary.LittleEndian.PutUint64(z[2*8:3*8], z2)
- binary.LittleEndian.PutUint64(z[3*8:4*8], z3)
- binary.LittleEndian.PutUint64(z[4*8:5*8], z4)
- binary.LittleEndian.PutUint64(z[5*8:6*8], z5)
- binary.LittleEndian.PutUint64(z[6*8:7*8], z6)
-}
-
-func addsubGeneric(x, y *Elt) {
- z := &Elt{}
- addGeneric(z, x, y)
- subGeneric(y, x, y)
- *x = *z
-}
-
-func mulGeneric(z, x, y *Elt) {
- x0 := binary.LittleEndian.Uint64(x[0*8 : 1*8])
- x1 := binary.LittleEndian.Uint64(x[1*8 : 2*8])
- x2 := binary.LittleEndian.Uint64(x[2*8 : 3*8])
- x3 := binary.LittleEndian.Uint64(x[3*8 : 4*8])
- x4 := binary.LittleEndian.Uint64(x[4*8 : 5*8])
- x5 := binary.LittleEndian.Uint64(x[5*8 : 6*8])
- x6 := binary.LittleEndian.Uint64(x[6*8 : 7*8])
-
- y0 := binary.LittleEndian.Uint64(y[0*8 : 1*8])
- y1 := binary.LittleEndian.Uint64(y[1*8 : 2*8])
- y2 := binary.LittleEndian.Uint64(y[2*8 : 3*8])
- y3 := binary.LittleEndian.Uint64(y[3*8 : 4*8])
- y4 := binary.LittleEndian.Uint64(y[4*8 : 5*8])
- y5 := binary.LittleEndian.Uint64(y[5*8 : 6*8])
- y6 := binary.LittleEndian.Uint64(y[6*8 : 7*8])
-
- yy := [7]uint64{y0, y1, y2, y3, y4, y5, y6}
- zz := [7]uint64{}
-
- yi := yy[0]
- h0, l0 := bits.Mul64(x0, yi)
- h1, l1 := bits.Mul64(x1, yi)
- h2, l2 := bits.Mul64(x2, yi)
- h3, l3 := bits.Mul64(x3, yi)
- h4, l4 := bits.Mul64(x4, yi)
- h5, l5 := bits.Mul64(x5, yi)
- h6, l6 := bits.Mul64(x6, yi)
-
- zz[0] = l0
- a0, c0 := bits.Add64(h0, l1, 0)
- a1, c1 := bits.Add64(h1, l2, c0)
- a2, c2 := bits.Add64(h2, l3, c1)
- a3, c3 := bits.Add64(h3, l4, c2)
- a4, c4 := bits.Add64(h4, l5, c3)
- a5, c5 := bits.Add64(h5, l6, c4)
- a6, _ := bits.Add64(h6, 0, c5)
-
- for i := 1; i < 7; i++ {
- yi = yy[i]
- h0, l0 = bits.Mul64(x0, yi)
- h1, l1 = bits.Mul64(x1, yi)
- h2, l2 = bits.Mul64(x2, yi)
- h3, l3 = bits.Mul64(x3, yi)
- h4, l4 = bits.Mul64(x4, yi)
- h5, l5 = bits.Mul64(x5, yi)
- h6, l6 = bits.Mul64(x6, yi)
-
- zz[i], c0 = bits.Add64(a0, l0, 0)
- a0, c1 = bits.Add64(a1, l1, c0)
- a1, c2 = bits.Add64(a2, l2, c1)
- a2, c3 = bits.Add64(a3, l3, c2)
- a3, c4 = bits.Add64(a4, l4, c3)
- a4, c5 = bits.Add64(a5, l5, c4)
- a5, a6 = bits.Add64(a6, l6, c5)
-
- a0, c0 = bits.Add64(a0, h0, 0)
- a1, c1 = bits.Add64(a1, h1, c0)
- a2, c2 = bits.Add64(a2, h2, c1)
- a3, c3 = bits.Add64(a3, h3, c2)
- a4, c4 = bits.Add64(a4, h4, c3)
- a5, c5 = bits.Add64(a5, h5, c4)
- a6, _ = bits.Add64(a6, h6, c5)
- }
- red64(z, &zz, &[7]uint64{a0, a1, a2, a3, a4, a5, a6})
-}
-
-func sqrGeneric(z, x *Elt) { mulGeneric(z, x, x) }
-
-func red64(z *Elt, l, h *[7]uint64) {
- /* (2C13, 2C12, 2C11, 2C10|C10, C9, C8, C7) + (C6,...,C0) */
- h0 := h[0]
- h1 := h[1]
- h2 := h[2]
- h3 := ((h[3] & (0xFFFFFFFF << 32)) << 1) | (h[3] & 0xFFFFFFFF)
- h4 := (h[3] >> 63) | (h[4] << 1)
- h5 := (h[4] >> 63) | (h[5] << 1)
- h6 := (h[5] >> 63) | (h[6] << 1)
- h7 := (h[6] >> 63)
-
- l0, c0 := bits.Add64(h0, l[0], 0)
- l1, c1 := bits.Add64(h1, l[1], c0)
- l2, c2 := bits.Add64(h2, l[2], c1)
- l3, c3 := bits.Add64(h3, l[3], c2)
- l4, c4 := bits.Add64(h4, l[4], c3)
- l5, c5 := bits.Add64(h5, l[5], c4)
- l6, c6 := bits.Add64(h6, l[6], c5)
- l7, _ := bits.Add64(h7, 0, c6)
-
- /* (C10C9, C9C8,C8C7,C7C13,C13C12,C12C11,C11C10) + (C6,...,C0) */
- h0 = (h[3] >> 32) | (h[4] << 32)
- h1 = (h[4] >> 32) | (h[5] << 32)
- h2 = (h[5] >> 32) | (h[6] << 32)
- h3 = (h[6] >> 32) | (h[0] << 32)
- h4 = (h[0] >> 32) | (h[1] << 32)
- h5 = (h[1] >> 32) | (h[2] << 32)
- h6 = (h[2] >> 32) | (h[3] << 32)
-
- l0, c0 = bits.Add64(l0, h0, 0)
- l1, c1 = bits.Add64(l1, h1, c0)
- l2, c2 = bits.Add64(l2, h2, c1)
- l3, c3 = bits.Add64(l3, h3, c2)
- l4, c4 = bits.Add64(l4, h4, c3)
- l5, c5 = bits.Add64(l5, h5, c4)
- l6, c6 = bits.Add64(l6, h6, c5)
- l7, _ = bits.Add64(l7, 0, c6)
-
- /* (C7) + (C6,...,C0) */
- l0, c0 = bits.Add64(l0, l7, 0)
- l1, c1 = bits.Add64(l1, 0, c0)
- l2, c2 = bits.Add64(l2, 0, c1)
- l3, c3 = bits.Add64(l3, l7<<32, c2)
- l4, c4 = bits.Add64(l4, 0, c3)
- l5, c5 = bits.Add64(l5, 0, c4)
- l6, l7 = bits.Add64(l6, 0, c5)
-
- /* (C7) + (C6,...,C0) */
- l0, c0 = bits.Add64(l0, l7, 0)
- l1, c1 = bits.Add64(l1, 0, c0)
- l2, c2 = bits.Add64(l2, 0, c1)
- l3, c3 = bits.Add64(l3, l7<<32, c2)
- l4, c4 = bits.Add64(l4, 0, c3)
- l5, c5 = bits.Add64(l5, 0, c4)
- l6, _ = bits.Add64(l6, 0, c5)
-
- binary.LittleEndian.PutUint64(z[0*8:1*8], l0)
- binary.LittleEndian.PutUint64(z[1*8:2*8], l1)
- binary.LittleEndian.PutUint64(z[2*8:3*8], l2)
- binary.LittleEndian.PutUint64(z[3*8:4*8], l3)
- binary.LittleEndian.PutUint64(z[4*8:5*8], l4)
- binary.LittleEndian.PutUint64(z[5*8:6*8], l5)
- binary.LittleEndian.PutUint64(z[6*8:7*8], l6)
-}
diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go b/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go
deleted file mode 100644
index a62225d2..00000000
--- a/vendor/github.com/cloudflare/circl/math/fp448/fp_noasm.go
+++ /dev/null
@@ -1,12 +0,0 @@
-//go:build !amd64 || purego
-// +build !amd64 purego
-
-package fp448
-
-func cmov(x, y *Elt, n uint) { cmovGeneric(x, y, n) }
-func cswap(x, y *Elt, n uint) { cswapGeneric(x, y, n) }
-func add(z, x, y *Elt) { addGeneric(z, x, y) }
-func sub(z, x, y *Elt) { subGeneric(z, x, y) }
-func addsub(x, y *Elt) { addsubGeneric(x, y) }
-func mul(z, x, y *Elt) { mulGeneric(z, x, y) }
-func sqr(z, x *Elt) { sqrGeneric(z, x) }
diff --git a/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go b/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go
deleted file mode 100644
index 2d7afc80..00000000
--- a/vendor/github.com/cloudflare/circl/math/fp448/fuzzer.go
+++ /dev/null
@@ -1,75 +0,0 @@
-//go:build gofuzz
-// +build gofuzz
-
-// How to run the fuzzer:
-//
-// $ go get -u github.com/dvyukov/go-fuzz/go-fuzz
-// $ go get -u github.com/dvyukov/go-fuzz/go-fuzz-build
-// $ go-fuzz-build -libfuzzer -func FuzzReduction -o lib.a
-// $ clang -fsanitize=fuzzer lib.a -o fu.exe
-// $ ./fu.exe
-package fp448
-
-import (
- "encoding/binary"
- "fmt"
- "math/big"
-
- "github.com/cloudflare/circl/internal/conv"
-)
-
-// FuzzReduction is a fuzzer target for red64 function, which reduces t
-// (112 bits) to a number t' (56 bits) congruent modulo p448.
-func FuzzReduction(data []byte) int {
- if len(data) != 2*Size {
- return -1
- }
- var got, want Elt
- var lo, hi [7]uint64
- a := data[:Size]
- b := data[Size:]
- lo[0] = binary.LittleEndian.Uint64(a[0*8 : 1*8])
- lo[1] = binary.LittleEndian.Uint64(a[1*8 : 2*8])
- lo[2] = binary.LittleEndian.Uint64(a[2*8 : 3*8])
- lo[3] = binary.LittleEndian.Uint64(a[3*8 : 4*8])
- lo[4] = binary.LittleEndian.Uint64(a[4*8 : 5*8])
- lo[5] = binary.LittleEndian.Uint64(a[5*8 : 6*8])
- lo[6] = binary.LittleEndian.Uint64(a[6*8 : 7*8])
-
- hi[0] = binary.LittleEndian.Uint64(b[0*8 : 1*8])
- hi[1] = binary.LittleEndian.Uint64(b[1*8 : 2*8])
- hi[2] = binary.LittleEndian.Uint64(b[2*8 : 3*8])
- hi[3] = binary.LittleEndian.Uint64(b[3*8 : 4*8])
- hi[4] = binary.LittleEndian.Uint64(b[4*8 : 5*8])
- hi[5] = binary.LittleEndian.Uint64(b[5*8 : 6*8])
- hi[6] = binary.LittleEndian.Uint64(b[6*8 : 7*8])
-
- red64(&got, &lo, &hi)
-
- t := conv.BytesLe2BigInt(data[:2*Size])
-
- two448 := big.NewInt(1)
- two448.Lsh(two448, 448) // 2^448
- mask448 := big.NewInt(1)
- mask448.Sub(two448, mask448) // 2^448-1
- two224plus1 := big.NewInt(1)
- two224plus1.Lsh(two224plus1, 224)
- two224plus1.Add(two224plus1, big.NewInt(1)) // 2^224+1
-
- var loBig, hiBig big.Int
- for t.Cmp(two448) >= 0 {
- loBig.And(t, mask448)
- hiBig.Rsh(t, 448)
- t.Mul(&hiBig, two224plus1)
- t.Add(t, &loBig)
- }
- conv.BigInt2BytesLe(want[:], t)
-
- if got != want {
- fmt.Printf("in: %v\n", conv.BytesLe2BigInt(data[:2*Size]))
- fmt.Printf("got: %v\n", got)
- fmt.Printf("want: %v\n", want)
- panic("error found")
- }
- return 1
-}
diff --git a/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go b/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go
deleted file mode 100644
index a43851b8..00000000
--- a/vendor/github.com/cloudflare/circl/math/mlsbset/mlsbset.go
+++ /dev/null
@@ -1,122 +0,0 @@
-// Package mlsbset provides a constant-time exponentiation method with precomputation.
-//
-// References: "Efficient and secure algorithms for GLV-based scalar
-// multiplication and their implementation on GLV–GLS curves" by (Faz-Hernandez et al.)
-// - https://doi.org/10.1007/s13389-014-0085-7
-// - https://eprint.iacr.org/2013/158
-package mlsbset
-
-import (
- "errors"
- "fmt"
- "math/big"
-
- "github.com/cloudflare/circl/internal/conv"
-)
-
-// EltG is a group element.
-type EltG interface{}
-
-// EltP is a precomputed group element.
-type EltP interface{}
-
-// Group defines the operations required by MLSBSet exponentiation method.
-type Group interface {
- Identity() EltG // Returns the identity of the group.
- Sqr(x EltG) // Calculates x = x^2.
- Mul(x EltG, y EltP) // Calculates x = x*y.
- NewEltP() EltP // Returns an arbitrary precomputed element.
- ExtendedEltP() EltP // Returns the precomputed element x^(2^(w*d)).
- Lookup(a EltP, v uint, s, u int32) // Sets a = s*T[v][u].
-}
-
-// Params contains the parameters of the encoding.
-type Params struct {
- T uint // T is the maximum size (in bits) of exponents.
- V uint // V is the number of tables.
- W uint // W is the window size.
- E uint // E is the number of digits per table.
- D uint // D is the number of digits in total.
- L uint // L is the length of the code.
-}
-
-// Encoder allows to convert integers into valid powers.
-type Encoder struct{ p Params }
-
-// New produces an encoder of the MLSBSet algorithm.
-func New(t, v, w uint) (Encoder, error) {
- if !(t > 1 && v >= 1 && w >= 2) {
- return Encoder{}, errors.New("t>1, v>=1, w>=2")
- }
- e := (t + w*v - 1) / (w * v)
- d := e * v
- l := d * w
- return Encoder{Params{t, v, w, e, d, l}}, nil
-}
-
-// Encode converts an odd integer k into a valid power for exponentiation.
-func (m Encoder) Encode(k []byte) (*Power, error) {
- if len(k) == 0 {
- return nil, errors.New("empty slice")
- }
- if !(len(k) <= int(m.p.L+7)>>3) {
- return nil, errors.New("k too big")
- }
- if k[0]%2 == 0 {
- return nil, errors.New("k must be odd")
- }
- ap := int((m.p.L+7)/8) - len(k)
- k = append(k, make([]byte, ap)...)
- s := m.signs(k)
- b := make([]int32, m.p.L-m.p.D)
- c := conv.BytesLe2BigInt(k)
- c.Rsh(c, m.p.D)
- var bi big.Int
- for i := m.p.D; i < m.p.L; i++ {
- c0 := int32(c.Bit(0))
- b[i-m.p.D] = s[i%m.p.D] * c0
- bi.SetInt64(int64(b[i-m.p.D] >> 1))
- c.Rsh(c, 1)
- c.Sub(c, &bi)
- }
- carry := int(c.Int64())
- return &Power{m, s, b, carry}, nil
-}
-
-// signs calculates the set of signs.
-func (m Encoder) signs(k []byte) []int32 {
- s := make([]int32, m.p.D)
- s[m.p.D-1] = 1
- for i := uint(1); i < m.p.D; i++ {
- ki := int32((k[i>>3] >> (i & 0x7)) & 0x1)
- s[i-1] = 2*ki - 1
- }
- return s
-}
-
-// GetParams returns the complementary parameters of the encoding.
-func (m Encoder) GetParams() Params { return m.p }
-
-// tableSize returns the size of each table.
-func (m Encoder) tableSize() uint { return 1 << (m.p.W - 1) }
-
-// Elts returns the total number of elements that must be precomputed.
-func (m Encoder) Elts() uint { return m.p.V * m.tableSize() }
-
-// IsExtended returns true if the element x^(2^(wd)) must be calculated.
-func (m Encoder) IsExtended() bool { q := m.p.T / (m.p.V * m.p.W); return m.p.T == q*m.p.V*m.p.W }
-
-// Ops returns the number of squares and multiplications executed during an exponentiation.
-func (m Encoder) Ops() (S uint, M uint) {
- S = m.p.E
- M = m.p.E * m.p.V
- if m.IsExtended() {
- M++
- }
- return
-}
-
-func (m Encoder) String() string {
- return fmt.Sprintf("T: %v W: %v V: %v e: %v d: %v l: %v wv|t: %v",
- m.p.T, m.p.W, m.p.V, m.p.E, m.p.D, m.p.L, m.IsExtended())
-}
diff --git a/vendor/github.com/cloudflare/circl/math/mlsbset/power.go b/vendor/github.com/cloudflare/circl/math/mlsbset/power.go
deleted file mode 100644
index 3f214c30..00000000
--- a/vendor/github.com/cloudflare/circl/math/mlsbset/power.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package mlsbset
-
-import "fmt"
-
-// Power is a valid exponent produced by the MLSBSet encoding algorithm.
-type Power struct {
- set Encoder // parameters of code.
- s []int32 // set of signs.
- b []int32 // set of digits.
- c int // carry is {0,1}.
-}
-
-// Exp is calculates x^k, where x is a predetermined element of a group G.
-func (p *Power) Exp(G Group) EltG {
- a, b := G.Identity(), G.NewEltP()
- for e := int(p.set.p.E - 1); e >= 0; e-- {
- G.Sqr(a)
- for v := uint(0); v < p.set.p.V; v++ {
- sgnElt, idElt := p.Digit(v, uint(e))
- G.Lookup(b, v, sgnElt, idElt)
- G.Mul(a, b)
- }
- }
- if p.set.IsExtended() && p.c == 1 {
- G.Mul(a, G.ExtendedEltP())
- }
- return a
-}
-
-// Digit returns the (v,e)-th digit and its sign.
-func (p *Power) Digit(v, e uint) (sgn, dig int32) {
- sgn = p.bit(0, v, e)
- dig = 0
- for i := p.set.p.W - 1; i > 0; i-- {
- dig = 2*dig + p.bit(i, v, e)
- }
- mask := dig >> 31
- dig = (dig + mask) ^ mask
- return sgn, dig
-}
-
-// bit returns the (w,v,e)-th bit of the code.
-func (p *Power) bit(w, v, e uint) int32 {
- if !(w < p.set.p.W &&
- v < p.set.p.V &&
- e < p.set.p.E) {
- panic(fmt.Errorf("indexes outside (%v,%v,%v)", w, v, e))
- }
- if w == 0 {
- return p.s[p.set.p.E*v+e]
- }
- return p.b[p.set.p.D*(w-1)+p.set.p.E*v+e]
-}
-
-func (p *Power) String() string {
- dig := ""
- for j := uint(0); j < p.set.p.V; j++ {
- for i := uint(0); i < p.set.p.E; i++ {
- s, d := p.Digit(j, i)
- dig += fmt.Sprintf("(%2v,%2v) = %+2v %+2v\n", j, i, s, d)
- }
- }
- return fmt.Sprintf("len: %v\ncarry: %v\ndigits:\n%v", len(p.b)+len(p.s), p.c, dig)
-}
diff --git a/vendor/github.com/cloudflare/circl/math/primes.go b/vendor/github.com/cloudflare/circl/math/primes.go
deleted file mode 100644
index 158fd83a..00000000
--- a/vendor/github.com/cloudflare/circl/math/primes.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package math
-
-import (
- "crypto/rand"
- "io"
- "math/big"
-)
-
-// IsSafePrime reports whether p is (probably) a safe prime.
-// The prime p=2*q+1 is safe prime if both p and q are primes.
-// Note that ProbablyPrime is not suitable for judging primes
-// that an adversary may have crafted to fool the test.
-func IsSafePrime(p *big.Int) bool {
- pdiv2 := new(big.Int).Rsh(p, 1)
- return p.ProbablyPrime(20) && pdiv2.ProbablyPrime(20)
-}
-
-// SafePrime returns a number of the given bit length that is a safe prime with high probability.
-// The number returned p=2*q+1 is a safe prime if both p and q are primes.
-// SafePrime will return error for any error returned by rand.Read or if bits < 2.
-func SafePrime(random io.Reader, bits int) (*big.Int, error) {
- one := big.NewInt(1)
- p := new(big.Int)
- for {
- q, err := rand.Prime(random, bits-1)
- if err != nil {
- return nil, err
- }
- p.Lsh(q, 1).Add(p, one)
- if p.ProbablyPrime(20) {
- return p, nil
- }
- }
-}
diff --git a/vendor/github.com/cloudflare/circl/math/wnaf.go b/vendor/github.com/cloudflare/circl/math/wnaf.go
deleted file mode 100644
index 94a1ec50..00000000
--- a/vendor/github.com/cloudflare/circl/math/wnaf.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Package math provides some utility functions for big integers.
-package math
-
-import "math/big"
-
-// SignedDigit obtains the signed-digit recoding of n and returns a list L of
-// digits such that n = sum( L[i]*2^(i*(w-1)) ), and each L[i] is an odd number
-// in the set {±1, ±3, ..., ±2^(w-1)-1}. The third parameter ensures that the
-// output has ceil(l/(w-1)) digits.
-//
-// Restrictions:
-// - n is odd and n > 0.
-// - 1 < w < 32.
-// - l >= bit length of n.
-//
-// References:
-// - Alg.6 in "Exponent Recoding and Regular Exponentiation Algorithms"
-// by Joye-Tunstall. http://doi.org/10.1007/978-3-642-02384-2_21
-// - Alg.6 in "Selecting Elliptic Curves for Cryptography: An Efficiency and
-// Security Analysis" by Bos et al. http://doi.org/10.1007/s13389-015-0097-y
-func SignedDigit(n *big.Int, w, l uint) []int32 {
- if n.Sign() <= 0 || n.Bit(0) == 0 {
- panic("n must be non-zero, odd, and positive")
- }
- if w <= 1 || w >= 32 {
- panic("Verify that 1 < w < 32")
- }
- if uint(n.BitLen()) > l {
- panic("n is too big to fit in l digits")
- }
- lenN := (l + (w - 1) - 1) / (w - 1) // ceil(l/(w-1))
- L := make([]int32, lenN+1)
- var k, v big.Int
- k.Set(n)
-
- var i uint
- for i = 0; i < lenN; i++ {
- words := k.Bits()
- value := int32(words[0] & ((1 << w) - 1))
- value -= int32(1) << (w - 1)
- L[i] = value
- v.SetInt64(int64(value))
- k.Sub(&k, &v)
- k.Rsh(&k, w-1)
- }
- L[i] = int32(k.Int64())
- return L
-}
-
-// OmegaNAF obtains the window-w Non-Adjacent Form of a positive number n and
-// 1 < w < 32. The returned slice L holds n = sum( L[i]*2^i ).
-//
-// Reference:
-// - Alg.9 "Efficient arithmetic on Koblitz curves" by Solinas.
-// http://doi.org/10.1023/A:1008306223194
-func OmegaNAF(n *big.Int, w uint) (L []int32) {
- if n.Sign() < 0 {
- panic("n must be positive")
- }
- if w <= 1 || w >= 32 {
- panic("Verify that 1 < w < 32")
- }
-
- L = make([]int32, n.BitLen()+1)
- var k, v big.Int
- k.Set(n)
-
- i := 0
- for ; k.Sign() > 0; i++ {
- value := int32(0)
- if k.Bit(0) == 1 {
- words := k.Bits()
- value = int32(words[0] & ((1 << w) - 1))
- if value >= (int32(1) << (w - 1)) {
- value -= int32(1) << w
- }
- v.SetInt64(int64(value))
- k.Sub(&k, &v)
- }
- L[i] = value
- k.Rsh(&k, 1)
- }
- return L[:i]
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go b/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go
deleted file mode 100644
index 2c73c26f..00000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/ed25519.go
+++ /dev/null
@@ -1,453 +0,0 @@
-// Package ed25519 implements Ed25519 signature scheme as described in RFC-8032.
-//
-// This package provides optimized implementations of the three signature
-// variants and maintaining closer compatibility with crypto/ed25519.
-//
-// | Scheme Name | Sign Function | Verification | Context |
-// |-------------|-------------------|---------------|-------------------|
-// | Ed25519 | Sign | Verify | None |
-// | Ed25519Ph | SignPh | VerifyPh | Yes, can be empty |
-// | Ed25519Ctx | SignWithCtx | VerifyWithCtx | Yes, non-empty |
-// | All above | (PrivateKey).Sign | VerifyAny | As above |
-//
-// Specific functions for sign and verify are defined. A generic signing
-// function for all schemes is available through the crypto.Signer interface,
-// which is implemented by the PrivateKey type. A correspond all-in-one
-// verification method is provided by the VerifyAny function.
-//
-// Signing with Ed25519Ph or Ed25519Ctx requires a context string for domain
-// separation. This parameter is passed using a SignerOptions struct defined
-// in this package. While Ed25519Ph accepts an empty context, Ed25519Ctx
-// enforces non-empty context strings.
-//
-// # Compatibility with crypto.ed25519
-//
-// These functions are compatible with the “Ed25519” function defined in
-// RFC-8032. However, unlike RFC 8032's formulation, this package's private
-// key representation includes a public key suffix to make multiple signing
-// operations with the same key more efficient. This package refers to the
-// RFC-8032 private key as the “seed”.
-//
-// References
-//
-// - RFC-8032: https://rfc-editor.org/rfc/rfc8032.txt
-// - Ed25519: https://ed25519.cr.yp.to/
-// - EdDSA: High-speed high-security signatures. https://doi.org/10.1007/s13389-012-0027-1
-package ed25519
-
-import (
- "bytes"
- "crypto"
- cryptoRand "crypto/rand"
- "crypto/sha512"
- "crypto/subtle"
- "errors"
- "fmt"
- "io"
- "strconv"
-
- "github.com/cloudflare/circl/sign"
-)
-
-const (
- // ContextMaxSize is the maximum length (in bytes) allowed for context.
- ContextMaxSize = 255
- // PublicKeySize is the size, in bytes, of public keys as used in this package.
- PublicKeySize = 32
- // PrivateKeySize is the size, in bytes, of private keys as used in this package.
- PrivateKeySize = 64
- // SignatureSize is the size, in bytes, of signatures generated and verified by this package.
- SignatureSize = 64
- // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
- SeedSize = 32
-)
-
-const (
- paramB = 256 / 8 // Size of keys in bytes.
-)
-
-// SignerOptions implements crypto.SignerOpts and augments with parameters
-// that are specific to the Ed25519 signature schemes.
-type SignerOptions struct {
- // Hash must be crypto.Hash(0) for Ed25519/Ed25519ctx, or crypto.SHA512
- // for Ed25519ph.
- crypto.Hash
-
- // Context is an optional domain separation string for Ed25519ph and a
- // must for Ed25519ctx. Its length must be less or equal than 255 bytes.
- Context string
-
- // Scheme is an identifier for choosing a signature scheme. The zero value
- // is ED25519.
- Scheme SchemeID
-}
-
-// SchemeID is an identifier for each signature scheme.
-type SchemeID uint
-
-const (
- ED25519 SchemeID = iota
- ED25519Ph
- ED25519Ctx
-)
-
-// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
-type PrivateKey []byte
-
-// Equal reports whether priv and x have the same value.
-func (priv PrivateKey) Equal(x crypto.PrivateKey) bool {
- xx, ok := x.(PrivateKey)
- return ok && subtle.ConstantTimeCompare(priv, xx) == 1
-}
-
-// Public returns the PublicKey corresponding to priv.
-func (priv PrivateKey) Public() crypto.PublicKey {
- publicKey := make(PublicKey, PublicKeySize)
- copy(publicKey, priv[SeedSize:])
- return publicKey
-}
-
-// Seed returns the private key seed corresponding to priv. It is provided for
-// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds
-// in this package.
-func (priv PrivateKey) Seed() []byte {
- seed := make([]byte, SeedSize)
- copy(seed, priv[:SeedSize])
- return seed
-}
-
-func (priv PrivateKey) Scheme() sign.Scheme { return sch }
-
-func (pub PublicKey) Scheme() sign.Scheme { return sch }
-
-func (priv PrivateKey) MarshalBinary() (data []byte, err error) {
- privateKey := make(PrivateKey, PrivateKeySize)
- copy(privateKey, priv)
- return privateKey, nil
-}
-
-func (pub PublicKey) MarshalBinary() (data []byte, err error) {
- publicKey := make(PublicKey, PublicKeySize)
- copy(publicKey, pub)
- return publicKey, nil
-}
-
-// Equal reports whether pub and x have the same value.
-func (pub PublicKey) Equal(x crypto.PublicKey) bool {
- xx, ok := x.(PublicKey)
- return ok && bytes.Equal(pub, xx)
-}
-
-// Sign creates a signature of a message with priv key.
-// This function is compatible with crypto.ed25519 and also supports the
-// three signature variants defined in RFC-8032, namely Ed25519 (or pure
-// EdDSA), Ed25519Ph, and Ed25519Ctx.
-// The opts.HashFunc() must return zero to specify either Ed25519 or Ed25519Ctx
-// variant. This can be achieved by passing crypto.Hash(0) as the value for
-// opts.
-// The opts.HashFunc() must return SHA512 to specify the Ed25519Ph variant.
-// This can be achieved by passing crypto.SHA512 as the value for opts.
-// Use a SignerOptions struct (defined in this package) to pass a context
-// string for signing.
-func (priv PrivateKey) Sign(
- rand io.Reader,
- message []byte,
- opts crypto.SignerOpts,
-) (signature []byte, err error) {
- var ctx string
- var scheme SchemeID
- if o, ok := opts.(SignerOptions); ok {
- ctx = o.Context
- scheme = o.Scheme
- }
-
- switch true {
- case scheme == ED25519 && opts.HashFunc() == crypto.Hash(0):
- return Sign(priv, message), nil
- case scheme == ED25519Ph && opts.HashFunc() == crypto.SHA512:
- return SignPh(priv, message, ctx), nil
- case scheme == ED25519Ctx && opts.HashFunc() == crypto.Hash(0) && len(ctx) > 0:
- return SignWithCtx(priv, message, ctx), nil
- default:
- return nil, errors.New("ed25519: bad hash algorithm")
- }
-}
-
-// GenerateKey generates a public/private key pair using entropy from rand.
-// If rand is nil, crypto/rand.Reader will be used.
-func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
- if rand == nil {
- rand = cryptoRand.Reader
- }
-
- seed := make([]byte, SeedSize)
- if _, err := io.ReadFull(rand, seed); err != nil {
- return nil, nil, err
- }
-
- privateKey := NewKeyFromSeed(seed)
- publicKey := make(PublicKey, PublicKeySize)
- copy(publicKey, privateKey[SeedSize:])
-
- return publicKey, privateKey, nil
-}
-
-// NewKeyFromSeed calculates a private key from a seed. It will panic if
-// len(seed) is not SeedSize. This function is provided for interoperability
-// with RFC 8032. RFC 8032's private keys correspond to seeds in this
-// package.
-func NewKeyFromSeed(seed []byte) PrivateKey {
- privateKey := make(PrivateKey, PrivateKeySize)
- newKeyFromSeed(privateKey, seed)
- return privateKey
-}
-
-func newKeyFromSeed(privateKey, seed []byte) {
- if l := len(seed); l != SeedSize {
- panic("ed25519: bad seed length: " + strconv.Itoa(l))
- }
- var P pointR1
- k := sha512.Sum512(seed)
- clamp(k[:])
- reduceModOrder(k[:paramB], false)
- P.fixedMult(k[:paramB])
- copy(privateKey[:SeedSize], seed)
- _ = P.ToBytes(privateKey[SeedSize:])
-}
-
-func signAll(signature []byte, privateKey PrivateKey, message, ctx []byte, preHash bool) {
- if l := len(privateKey); l != PrivateKeySize {
- panic("ed25519: bad private key length: " + strconv.Itoa(l))
- }
-
- H := sha512.New()
- var PHM []byte
-
- if preHash {
- _, _ = H.Write(message)
- PHM = H.Sum(nil)
- H.Reset()
- } else {
- PHM = message
- }
-
- // 1. Hash the 32-byte private key using SHA-512.
- _, _ = H.Write(privateKey[:SeedSize])
- h := H.Sum(nil)
- clamp(h[:])
- prefix, s := h[paramB:], h[:paramB]
-
- // 2. Compute SHA-512(dom2(F, C) || prefix || PH(M))
- H.Reset()
-
- writeDom(H, ctx, preHash)
-
- _, _ = H.Write(prefix)
- _, _ = H.Write(PHM)
- r := H.Sum(nil)
- reduceModOrder(r[:], true)
-
- // 3. Compute the point [r]B.
- var P pointR1
- P.fixedMult(r[:paramB])
- R := (&[paramB]byte{})[:]
- if err := P.ToBytes(R); err != nil {
- panic(err)
- }
-
- // 4. Compute SHA512(dom2(F, C) || R || A || PH(M)).
- H.Reset()
-
- writeDom(H, ctx, preHash)
-
- _, _ = H.Write(R)
- _, _ = H.Write(privateKey[SeedSize:])
- _, _ = H.Write(PHM)
- hRAM := H.Sum(nil)
-
- reduceModOrder(hRAM[:], true)
-
- // 5. Compute S = (r + k * s) mod order.
- S := (&[paramB]byte{})[:]
- calculateS(S, r[:paramB], hRAM[:paramB], s)
-
- // 6. The signature is the concatenation of R and S.
- copy(signature[:paramB], R[:])
- copy(signature[paramB:], S[:])
-}
-
-// Sign signs the message with privateKey and returns a signature.
-// This function supports the signature variant defined in RFC-8032: Ed25519,
-// also known as the pure version of EdDSA.
-// It will panic if len(privateKey) is not PrivateKeySize.
-func Sign(privateKey PrivateKey, message []byte) []byte {
- signature := make([]byte, SignatureSize)
- signAll(signature, privateKey, message, []byte(""), false)
- return signature
-}
-
-// SignPh creates a signature of a message with private key and context.
-// This function supports the signature variant defined in RFC-8032: Ed25519ph,
-// meaning it internally hashes the message using SHA-512, and optionally
-// accepts a context string.
-// It will panic if len(privateKey) is not PrivateKeySize.
-// Context could be passed to this function, which length should be no more than
-// ContextMaxSize=255. It can be empty.
-func SignPh(privateKey PrivateKey, message []byte, ctx string) []byte {
- if len(ctx) > ContextMaxSize {
- panic(fmt.Errorf("ed25519: bad context length: %v", len(ctx)))
- }
-
- signature := make([]byte, SignatureSize)
- signAll(signature, privateKey, message, []byte(ctx), true)
- return signature
-}
-
-// SignWithCtx creates a signature of a message with private key and context.
-// This function supports the signature variant defined in RFC-8032: Ed25519ctx,
-// meaning it accepts a non-empty context string.
-// It will panic if len(privateKey) is not PrivateKeySize.
-// Context must be passed to this function, which length should be no more than
-// ContextMaxSize=255 and cannot be empty.
-func SignWithCtx(privateKey PrivateKey, message []byte, ctx string) []byte {
- if len(ctx) == 0 || len(ctx) > ContextMaxSize {
- panic(fmt.Errorf("ed25519: bad context length: %v > %v", len(ctx), ContextMaxSize))
- }
-
- signature := make([]byte, SignatureSize)
- signAll(signature, privateKey, message, []byte(ctx), false)
- return signature
-}
-
-func verify(public PublicKey, message, signature, ctx []byte, preHash bool) bool {
- if len(public) != PublicKeySize ||
- len(signature) != SignatureSize ||
- !isLessThanOrder(signature[paramB:]) {
- return false
- }
-
- var P pointR1
- if ok := P.FromBytes(public); !ok {
- return false
- }
-
- H := sha512.New()
- var PHM []byte
-
- if preHash {
- _, _ = H.Write(message)
- PHM = H.Sum(nil)
- H.Reset()
- } else {
- PHM = message
- }
-
- R := signature[:paramB]
-
- writeDom(H, ctx, preHash)
-
- _, _ = H.Write(R)
- _, _ = H.Write(public)
- _, _ = H.Write(PHM)
- hRAM := H.Sum(nil)
- reduceModOrder(hRAM[:], true)
-
- var Q pointR1
- encR := (&[paramB]byte{})[:]
- P.neg()
- Q.doubleMult(&P, signature[paramB:], hRAM[:paramB])
- _ = Q.ToBytes(encR)
- return bytes.Equal(R, encR)
-}
-
-// VerifyAny returns true if the signature is valid. Failure cases are invalid
-// signature, or when the public key cannot be decoded.
-// This function supports all the three signature variants defined in RFC-8032,
-// namely Ed25519 (or pure EdDSA), Ed25519Ph, and Ed25519Ctx.
-// The opts.HashFunc() must return zero to specify either Ed25519 or Ed25519Ctx
-// variant. This can be achieved by passing crypto.Hash(0) as the value for opts.
-// The opts.HashFunc() must return SHA512 to specify the Ed25519Ph variant.
-// This can be achieved by passing crypto.SHA512 as the value for opts.
-// Use a SignerOptions struct to pass a context string for signing.
-func VerifyAny(public PublicKey, message, signature []byte, opts crypto.SignerOpts) bool {
- var ctx string
- var scheme SchemeID
- if o, ok := opts.(SignerOptions); ok {
- ctx = o.Context
- scheme = o.Scheme
- }
-
- switch true {
- case scheme == ED25519 && opts.HashFunc() == crypto.Hash(0):
- return Verify(public, message, signature)
- case scheme == ED25519Ph && opts.HashFunc() == crypto.SHA512:
- return VerifyPh(public, message, signature, ctx)
- case scheme == ED25519Ctx && opts.HashFunc() == crypto.Hash(0) && len(ctx) > 0:
- return VerifyWithCtx(public, message, signature, ctx)
- default:
- return false
- }
-}
-
-// Verify returns true if the signature is valid. Failure cases are invalid
-// signature, or when the public key cannot be decoded.
-// This function supports the signature variant defined in RFC-8032: Ed25519,
-// also known as the pure version of EdDSA.
-func Verify(public PublicKey, message, signature []byte) bool {
- return verify(public, message, signature, []byte(""), false)
-}
-
-// VerifyPh returns true if the signature is valid. Failure cases are invalid
-// signature, or when the public key cannot be decoded.
-// This function supports the signature variant defined in RFC-8032: Ed25519ph,
-// meaning it internally hashes the message using SHA-512.
-// Context could be passed to this function, which length should be no more than
-// 255. It can be empty.
-func VerifyPh(public PublicKey, message, signature []byte, ctx string) bool {
- return verify(public, message, signature, []byte(ctx), true)
-}
-
-// VerifyWithCtx returns true if the signature is valid. Failure cases are invalid
-// signature, or when the public key cannot be decoded, or when context is
-// not provided.
-// This function supports the signature variant defined in RFC-8032: Ed25519ctx,
-// meaning it does not handle prehashed messages. Non-empty context string must be
-// provided, and must not be more than 255 of length.
-func VerifyWithCtx(public PublicKey, message, signature []byte, ctx string) bool {
- if len(ctx) == 0 || len(ctx) > ContextMaxSize {
- return false
- }
-
- return verify(public, message, signature, []byte(ctx), false)
-}
-
-func clamp(k []byte) {
- k[0] &= 248
- k[paramB-1] = (k[paramB-1] & 127) | 64
-}
-
-// isLessThanOrder returns true if 0 <= x < order.
-func isLessThanOrder(x []byte) bool {
- i := len(order) - 1
- for i > 0 && x[i] == order[i] {
- i--
- }
- return x[i] < order[i]
-}
-
-func writeDom(h io.Writer, ctx []byte, preHash bool) {
- dom2 := "SigEd25519 no Ed25519 collisions"
-
- if len(ctx) > 0 {
- _, _ = h.Write([]byte(dom2))
- if preHash {
- _, _ = h.Write([]byte{byte(0x01), byte(len(ctx))})
- } else {
- _, _ = h.Write([]byte{byte(0x00), byte(len(ctx))})
- }
- _, _ = h.Write(ctx)
- } else if preHash {
- _, _ = h.Write([]byte(dom2))
- _, _ = h.Write([]byte{0x01, 0x00})
- }
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go b/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go
deleted file mode 100644
index 10efafdc..00000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/modular.go
+++ /dev/null
@@ -1,175 +0,0 @@
-package ed25519
-
-import (
- "encoding/binary"
- "math/bits"
-)
-
-var order = [paramB]byte{
- 0xed, 0xd3, 0xf5, 0x5c, 0x1a, 0x63, 0x12, 0x58,
- 0xd6, 0x9c, 0xf7, 0xa2, 0xde, 0xf9, 0xde, 0x14,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,
-}
-
-// isLessThan returns true if 0 <= x < y, and assumes that slices have the same length.
-func isLessThan(x, y []byte) bool {
- i := len(x) - 1
- for i > 0 && x[i] == y[i] {
- i--
- }
- return x[i] < y[i]
-}
-
-// reduceModOrder calculates k = k mod order of the curve.
-func reduceModOrder(k []byte, is512Bit bool) {
- var X [((2 * paramB) * 8) / 64]uint64
- numWords := len(k) >> 3
- for i := 0; i < numWords; i++ {
- X[i] = binary.LittleEndian.Uint64(k[i*8 : (i+1)*8])
- }
- red512(&X, is512Bit)
- for i := 0; i < numWords; i++ {
- binary.LittleEndian.PutUint64(k[i*8:(i+1)*8], X[i])
- }
-}
-
-// red512 calculates x = x mod Order of the curve.
-func red512(x *[8]uint64, full bool) {
- // Implementation of Algs.(14.47)+(14.52) of Handbook of Applied
- // Cryptography, by A. Menezes, P. van Oorschot, and S. Vanstone.
- const (
- ell0 = uint64(0x5812631a5cf5d3ed)
- ell1 = uint64(0x14def9dea2f79cd6)
- ell160 = uint64(0x812631a5cf5d3ed0)
- ell161 = uint64(0x4def9dea2f79cd65)
- ell162 = uint64(0x0000000000000001)
- )
-
- var c0, c1, c2, c3 uint64
- r0, r1, r2, r3, r4 := x[0], x[1], x[2], x[3], uint64(0)
-
- if full {
- q0, q1, q2, q3 := x[4], x[5], x[6], x[7]
-
- for i := 0; i < 3; i++ {
- h0, s0 := bits.Mul64(q0, ell160)
- h1, s1 := bits.Mul64(q1, ell160)
- h2, s2 := bits.Mul64(q2, ell160)
- h3, s3 := bits.Mul64(q3, ell160)
-
- s1, c0 = bits.Add64(h0, s1, 0)
- s2, c1 = bits.Add64(h1, s2, c0)
- s3, c2 = bits.Add64(h2, s3, c1)
- s4, _ := bits.Add64(h3, 0, c2)
-
- h0, l0 := bits.Mul64(q0, ell161)
- h1, l1 := bits.Mul64(q1, ell161)
- h2, l2 := bits.Mul64(q2, ell161)
- h3, l3 := bits.Mul64(q3, ell161)
-
- l1, c0 = bits.Add64(h0, l1, 0)
- l2, c1 = bits.Add64(h1, l2, c0)
- l3, c2 = bits.Add64(h2, l3, c1)
- l4, _ := bits.Add64(h3, 0, c2)
-
- s1, c0 = bits.Add64(s1, l0, 0)
- s2, c1 = bits.Add64(s2, l1, c0)
- s3, c2 = bits.Add64(s3, l2, c1)
- s4, c3 = bits.Add64(s4, l3, c2)
- s5, s6 := bits.Add64(l4, 0, c3)
-
- s2, c0 = bits.Add64(s2, q0, 0)
- s3, c1 = bits.Add64(s3, q1, c0)
- s4, c2 = bits.Add64(s4, q2, c1)
- s5, c3 = bits.Add64(s5, q3, c2)
- s6, s7 := bits.Add64(s6, 0, c3)
-
- q := q0 | q1 | q2 | q3
- m := -((q | -q) >> 63) // if q=0 then m=0...0 else m=1..1
- s0 &= m
- s1 &= m
- s2 &= m
- s3 &= m
- q0, q1, q2, q3 = s4, s5, s6, s7
-
- if (i+1)%2 == 0 {
- r0, c0 = bits.Add64(r0, s0, 0)
- r1, c1 = bits.Add64(r1, s1, c0)
- r2, c2 = bits.Add64(r2, s2, c1)
- r3, c3 = bits.Add64(r3, s3, c2)
- r4, _ = bits.Add64(r4, 0, c3)
- } else {
- r0, c0 = bits.Sub64(r0, s0, 0)
- r1, c1 = bits.Sub64(r1, s1, c0)
- r2, c2 = bits.Sub64(r2, s2, c1)
- r3, c3 = bits.Sub64(r3, s3, c2)
- r4, _ = bits.Sub64(r4, 0, c3)
- }
- }
-
- m := -(r4 >> 63)
- r0, c0 = bits.Add64(r0, m&ell160, 0)
- r1, c1 = bits.Add64(r1, m&ell161, c0)
- r2, c2 = bits.Add64(r2, m&ell162, c1)
- r3, c3 = bits.Add64(r3, 0, c2)
- r4, _ = bits.Add64(r4, m&1, c3)
- x[4], x[5], x[6], x[7] = 0, 0, 0, 0
- }
-
- q0 := (r4 << 4) | (r3 >> 60)
- r3 &= (uint64(1) << 60) - 1
-
- h0, s0 := bits.Mul64(ell0, q0)
- h1, s1 := bits.Mul64(ell1, q0)
- s1, c0 = bits.Add64(h0, s1, 0)
- s2, _ := bits.Add64(h1, 0, c0)
-
- r0, c0 = bits.Sub64(r0, s0, 0)
- r1, c1 = bits.Sub64(r1, s1, c0)
- r2, c2 = bits.Sub64(r2, s2, c1)
- r3, _ = bits.Sub64(r3, 0, c2)
-
- x[0], x[1], x[2], x[3] = r0, r1, r2, r3
-}
-
-// calculateS performs s = r+k*a mod Order of the curve.
-func calculateS(s, r, k, a []byte) {
- K := [4]uint64{
- binary.LittleEndian.Uint64(k[0*8 : 1*8]),
- binary.LittleEndian.Uint64(k[1*8 : 2*8]),
- binary.LittleEndian.Uint64(k[2*8 : 3*8]),
- binary.LittleEndian.Uint64(k[3*8 : 4*8]),
- }
- S := [8]uint64{
- binary.LittleEndian.Uint64(r[0*8 : 1*8]),
- binary.LittleEndian.Uint64(r[1*8 : 2*8]),
- binary.LittleEndian.Uint64(r[2*8 : 3*8]),
- binary.LittleEndian.Uint64(r[3*8 : 4*8]),
- }
- var c3 uint64
- for i := range K {
- ai := binary.LittleEndian.Uint64(a[i*8 : (i+1)*8])
-
- h0, l0 := bits.Mul64(K[0], ai)
- h1, l1 := bits.Mul64(K[1], ai)
- h2, l2 := bits.Mul64(K[2], ai)
- h3, l3 := bits.Mul64(K[3], ai)
-
- l1, c0 := bits.Add64(h0, l1, 0)
- l2, c1 := bits.Add64(h1, l2, c0)
- l3, c2 := bits.Add64(h2, l3, c1)
- l4, _ := bits.Add64(h3, 0, c2)
-
- S[i+0], c0 = bits.Add64(S[i+0], l0, 0)
- S[i+1], c1 = bits.Add64(S[i+1], l1, c0)
- S[i+2], c2 = bits.Add64(S[i+2], l2, c1)
- S[i+3], c3 = bits.Add64(S[i+3], l3, c2)
- S[i+4], _ = bits.Add64(S[i+4], l4, c3)
- }
- red512(&S, true)
- binary.LittleEndian.PutUint64(s[0*8:1*8], S[0])
- binary.LittleEndian.PutUint64(s[1*8:2*8], S[1])
- binary.LittleEndian.PutUint64(s[2*8:3*8], S[2])
- binary.LittleEndian.PutUint64(s[3*8:4*8], S[3])
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go b/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go
deleted file mode 100644
index 3216aae3..00000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/mult.go
+++ /dev/null
@@ -1,180 +0,0 @@
-package ed25519
-
-import (
- "crypto/subtle"
- "encoding/binary"
- "math/bits"
-
- "github.com/cloudflare/circl/internal/conv"
- "github.com/cloudflare/circl/math"
- fp "github.com/cloudflare/circl/math/fp25519"
-)
-
-var paramD = fp.Elt{
- 0xa3, 0x78, 0x59, 0x13, 0xca, 0x4d, 0xeb, 0x75,
- 0xab, 0xd8, 0x41, 0x41, 0x4d, 0x0a, 0x70, 0x00,
- 0x98, 0xe8, 0x79, 0x77, 0x79, 0x40, 0xc7, 0x8c,
- 0x73, 0xfe, 0x6f, 0x2b, 0xee, 0x6c, 0x03, 0x52,
-}
-
-// mLSBRecoding parameters.
-const (
- fxT = 257
- fxV = 2
- fxW = 3
- fx2w1 = 1 << (uint(fxW) - 1)
- numWords64 = (paramB * 8 / 64)
-)
-
-// mLSBRecoding is the odd-only modified LSB-set.
-//
-// Reference:
-//
-// "Efficient and secure algorithms for GLV-based scalar multiplication and
-// their implementation on GLV–GLS curves" by (Faz-Hernandez et al.)
-// http://doi.org/10.1007/s13389-014-0085-7.
-func mLSBRecoding(L []int8, k []byte) {
- const ee = (fxT + fxW*fxV - 1) / (fxW * fxV)
- const dd = ee * fxV
- const ll = dd * fxW
- if len(L) == (ll + 1) {
- var m [numWords64 + 1]uint64
- for i := 0; i < numWords64; i++ {
- m[i] = binary.LittleEndian.Uint64(k[8*i : 8*i+8])
- }
- condAddOrderN(&m)
- L[dd-1] = 1
- for i := 0; i < dd-1; i++ {
- kip1 := (m[(i+1)/64] >> (uint(i+1) % 64)) & 0x1
- L[i] = int8(kip1<<1) - 1
- }
- { // right-shift by d
- right := uint(dd % 64)
- left := uint(64) - right
- lim := ((numWords64+1)*64 - dd) / 64
- j := dd / 64
- for i := 0; i < lim; i++ {
- m[i] = (m[i+j] >> right) | (m[i+j+1] << left)
- }
- m[lim] = m[lim+j] >> right
- }
- for i := dd; i < ll; i++ {
- L[i] = L[i%dd] * int8(m[0]&0x1)
- div2subY(m[:], int64(L[i]>>1), numWords64)
- }
- L[ll] = int8(m[0])
- }
-}
-
-// absolute returns always a positive value.
-func absolute(x int32) int32 {
- mask := x >> 31
- return (x + mask) ^ mask
-}
-
-// condAddOrderN updates x = x+order if x is even, otherwise x remains unchanged.
-func condAddOrderN(x *[numWords64 + 1]uint64) {
- isOdd := (x[0] & 0x1) - 1
- c := uint64(0)
- for i := 0; i < numWords64; i++ {
- orderWord := binary.LittleEndian.Uint64(order[8*i : 8*i+8])
- o := isOdd & orderWord
- x0, c0 := bits.Add64(x[i], o, c)
- x[i] = x0
- c = c0
- }
- x[numWords64], _ = bits.Add64(x[numWords64], 0, c)
-}
-
-// div2subY update x = (x/2) - y.
-func div2subY(x []uint64, y int64, l int) {
- s := uint64(y >> 63)
- for i := 0; i < l-1; i++ {
- x[i] = (x[i] >> 1) | (x[i+1] << 63)
- }
- x[l-1] = (x[l-1] >> 1)
-
- b := uint64(0)
- x0, b0 := bits.Sub64(x[0], uint64(y), b)
- x[0] = x0
- b = b0
- for i := 1; i < l-1; i++ {
- x0, b0 := bits.Sub64(x[i], s, b)
- x[i] = x0
- b = b0
- }
- x[l-1], _ = bits.Sub64(x[l-1], s, b)
-}
-
-func (P *pointR1) fixedMult(scalar []byte) {
- if len(scalar) != paramB {
- panic("wrong scalar size")
- }
- const ee = (fxT + fxW*fxV - 1) / (fxW * fxV)
- const dd = ee * fxV
- const ll = dd * fxW
-
- L := make([]int8, ll+1)
- mLSBRecoding(L[:], scalar)
- S := &pointR3{}
- P.SetIdentity()
- for ii := ee - 1; ii >= 0; ii-- {
- P.double()
- for j := 0; j < fxV; j++ {
- dig := L[fxW*dd-j*ee+ii-ee]
- for i := (fxW-1)*dd - j*ee + ii - ee; i >= (2*dd - j*ee + ii - ee); i = i - dd {
- dig = 2*dig + L[i]
- }
- idx := absolute(int32(dig))
- sig := L[dd-j*ee+ii-ee]
- Tabj := &tabSign[fxV-j-1]
- for k := 0; k < fx2w1; k++ {
- S.cmov(&Tabj[k], subtle.ConstantTimeEq(int32(k), idx))
- }
- S.cneg(subtle.ConstantTimeEq(int32(sig), -1))
- P.mixAdd(S)
- }
- }
-}
-
-const (
- omegaFix = 7
- omegaVar = 5
-)
-
-// doubleMult returns P=mG+nQ.
-func (P *pointR1) doubleMult(Q *pointR1, m, n []byte) {
- nafFix := math.OmegaNAF(conv.BytesLe2BigInt(m), omegaFix)
- nafVar := math.OmegaNAF(conv.BytesLe2BigInt(n), omegaVar)
-
- if len(nafFix) > len(nafVar) {
- nafVar = append(nafVar, make([]int32, len(nafFix)-len(nafVar))...)
- } else if len(nafFix) < len(nafVar) {
- nafFix = append(nafFix, make([]int32, len(nafVar)-len(nafFix))...)
- }
-
- var TabQ [1 << (omegaVar - 2)]pointR2
- Q.oddMultiples(TabQ[:])
- P.SetIdentity()
- for i := len(nafFix) - 1; i >= 0; i-- {
- P.double()
- // Generator point
- if nafFix[i] != 0 {
- idxM := absolute(nafFix[i]) >> 1
- R := tabVerif[idxM]
- if nafFix[i] < 0 {
- R.neg()
- }
- P.mixAdd(&R)
- }
- // Variable input point
- if nafVar[i] != 0 {
- idxN := absolute(nafVar[i]) >> 1
- S := TabQ[idxN]
- if nafVar[i] < 0 {
- S.neg()
- }
- P.add(&S)
- }
- }
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/point.go b/vendor/github.com/cloudflare/circl/sign/ed25519/point.go
deleted file mode 100644
index 374a6950..00000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/point.go
+++ /dev/null
@@ -1,195 +0,0 @@
-package ed25519
-
-import fp "github.com/cloudflare/circl/math/fp25519"
-
-type (
- pointR1 struct{ x, y, z, ta, tb fp.Elt }
- pointR2 struct {
- pointR3
- z2 fp.Elt
- }
-)
-type pointR3 struct{ addYX, subYX, dt2 fp.Elt }
-
-func (P *pointR1) neg() {
- fp.Neg(&P.x, &P.x)
- fp.Neg(&P.ta, &P.ta)
-}
-
-func (P *pointR1) SetIdentity() {
- P.x = fp.Elt{}
- fp.SetOne(&P.y)
- fp.SetOne(&P.z)
- P.ta = fp.Elt{}
- P.tb = fp.Elt{}
-}
-
-func (P *pointR1) toAffine() {
- fp.Inv(&P.z, &P.z)
- fp.Mul(&P.x, &P.x, &P.z)
- fp.Mul(&P.y, &P.y, &P.z)
- fp.Modp(&P.x)
- fp.Modp(&P.y)
- fp.SetOne(&P.z)
- P.ta = P.x
- P.tb = P.y
-}
-
-func (P *pointR1) ToBytes(k []byte) error {
- P.toAffine()
- var x [fp.Size]byte
- err := fp.ToBytes(k[:fp.Size], &P.y)
- if err != nil {
- return err
- }
- err = fp.ToBytes(x[:], &P.x)
- if err != nil {
- return err
- }
- b := x[0] & 1
- k[paramB-1] = k[paramB-1] | (b << 7)
- return nil
-}
-
-func (P *pointR1) FromBytes(k []byte) bool {
- if len(k) != paramB {
- panic("wrong size")
- }
- signX := k[paramB-1] >> 7
- copy(P.y[:], k[:fp.Size])
- P.y[fp.Size-1] &= 0x7F
- p := fp.P()
- if !isLessThan(P.y[:], p[:]) {
- return false
- }
-
- one, u, v := &fp.Elt{}, &fp.Elt{}, &fp.Elt{}
- fp.SetOne(one)
- fp.Sqr(u, &P.y) // u = y^2
- fp.Mul(v, u, ¶mD) // v = dy^2
- fp.Sub(u, u, one) // u = y^2-1
- fp.Add(v, v, one) // v = dy^2+1
- isQR := fp.InvSqrt(&P.x, u, v) // x = sqrt(u/v)
- if !isQR {
- return false
- }
- fp.Modp(&P.x) // x = x mod p
- if fp.IsZero(&P.x) && signX == 1 {
- return false
- }
- if signX != (P.x[0] & 1) {
- fp.Neg(&P.x, &P.x)
- }
- P.ta = P.x
- P.tb = P.y
- fp.SetOne(&P.z)
- return true
-}
-
-// double calculates 2P for curves with A=-1.
-func (P *pointR1) double() {
- Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb
- a, b, c, e, f, g, h := Px, Py, Pz, Pta, Px, Py, Ptb
- fp.Add(e, Px, Py) // x+y
- fp.Sqr(a, Px) // A = x^2
- fp.Sqr(b, Py) // B = y^2
- fp.Sqr(c, Pz) // z^2
- fp.Add(c, c, c) // C = 2*z^2
- fp.Add(h, a, b) // H = A+B
- fp.Sqr(e, e) // (x+y)^2
- fp.Sub(e, e, h) // E = (x+y)^2-A-B
- fp.Sub(g, b, a) // G = B-A
- fp.Sub(f, c, g) // F = C-G
- fp.Mul(Pz, f, g) // Z = F * G
- fp.Mul(Px, e, f) // X = E * F
- fp.Mul(Py, g, h) // Y = G * H, T = E * H
-}
-
-func (P *pointR1) mixAdd(Q *pointR3) {
- fp.Add(&P.z, &P.z, &P.z) // D = 2*z1
- P.coreAddition(Q)
-}
-
-func (P *pointR1) add(Q *pointR2) {
- fp.Mul(&P.z, &P.z, &Q.z2) // D = 2*z1*z2
- P.coreAddition(&Q.pointR3)
-}
-
-// coreAddition calculates P=P+Q for curves with A=-1.
-func (P *pointR1) coreAddition(Q *pointR3) {
- Px, Py, Pz, Pta, Ptb := &P.x, &P.y, &P.z, &P.ta, &P.tb
- addYX2, subYX2, dt2 := &Q.addYX, &Q.subYX, &Q.dt2
- a, b, c, d, e, f, g, h := Px, Py, &fp.Elt{}, Pz, Pta, Px, Py, Ptb
- fp.Mul(c, Pta, Ptb) // t1 = ta*tb
- fp.Sub(h, Py, Px) // y1-x1
- fp.Add(b, Py, Px) // y1+x1
- fp.Mul(a, h, subYX2) // A = (y1-x1)*(y2-x2)
- fp.Mul(b, b, addYX2) // B = (y1+x1)*(y2+x2)
- fp.Mul(c, c, dt2) // C = 2*D*t1*t2
- fp.Sub(e, b, a) // E = B-A
- fp.Add(h, b, a) // H = B+A
- fp.Sub(f, d, c) // F = D-C
- fp.Add(g, d, c) // G = D+C
- fp.Mul(Pz, f, g) // Z = F * G
- fp.Mul(Px, e, f) // X = E * F
- fp.Mul(Py, g, h) // Y = G * H, T = E * H
-}
-
-func (P *pointR1) oddMultiples(T []pointR2) {
- var R pointR2
- n := len(T)
- T[0].fromR1(P)
- _2P := *P
- _2P.double()
- R.fromR1(&_2P)
- for i := 1; i < n; i++ {
- P.add(&R)
- T[i].fromR1(P)
- }
-}
-
-func (P *pointR1) isEqual(Q *pointR1) bool {
- l, r := &fp.Elt{}, &fp.Elt{}
- fp.Mul(l, &P.x, &Q.z)
- fp.Mul(r, &Q.x, &P.z)
- fp.Sub(l, l, r)
- b := fp.IsZero(l)
- fp.Mul(l, &P.y, &Q.z)
- fp.Mul(r, &Q.y, &P.z)
- fp.Sub(l, l, r)
- b = b && fp.IsZero(l)
- fp.Mul(l, &P.ta, &P.tb)
- fp.Mul(l, l, &Q.z)
- fp.Mul(r, &Q.ta, &Q.tb)
- fp.Mul(r, r, &P.z)
- fp.Sub(l, l, r)
- b = b && fp.IsZero(l)
- return b
-}
-
-func (P *pointR3) neg() {
- P.addYX, P.subYX = P.subYX, P.addYX
- fp.Neg(&P.dt2, &P.dt2)
-}
-
-func (P *pointR2) fromR1(Q *pointR1) {
- fp.Add(&P.addYX, &Q.y, &Q.x)
- fp.Sub(&P.subYX, &Q.y, &Q.x)
- fp.Mul(&P.dt2, &Q.ta, &Q.tb)
- fp.Mul(&P.dt2, &P.dt2, ¶mD)
- fp.Add(&P.dt2, &P.dt2, &P.dt2)
- fp.Add(&P.z2, &Q.z, &Q.z)
-}
-
-func (P *pointR3) cneg(b int) {
- t := &fp.Elt{}
- fp.Cswap(&P.addYX, &P.subYX, uint(b))
- fp.Neg(t, &P.dt2)
- fp.Cmov(&P.dt2, t, uint(b))
-}
-
-func (P *pointR3) cmov(Q *pointR3, b int) {
- fp.Cmov(&P.addYX, &Q.addYX, uint(b))
- fp.Cmov(&P.subYX, &Q.subYX, uint(b))
- fp.Cmov(&P.dt2, &Q.dt2, uint(b))
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go b/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go
deleted file mode 100644
index c3505b67..00000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey.go
+++ /dev/null
@@ -1,9 +0,0 @@
-//go:build go1.13
-// +build go1.13
-
-package ed25519
-
-import cryptoEd25519 "crypto/ed25519"
-
-// PublicKey is the type of Ed25519 public keys.
-type PublicKey cryptoEd25519.PublicKey
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go b/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go
deleted file mode 100644
index d57d86ef..00000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/pubkey112.go
+++ /dev/null
@@ -1,7 +0,0 @@
-//go:build !go1.13
-// +build !go1.13
-
-package ed25519
-
-// PublicKey is the type of Ed25519 public keys.
-type PublicKey []byte
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go b/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go
deleted file mode 100644
index e4520f52..00000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/signapi.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package ed25519
-
-import (
- "crypto/rand"
- "encoding/asn1"
-
- "github.com/cloudflare/circl/sign"
-)
-
-var sch sign.Scheme = &scheme{}
-
-// Scheme returns a signature interface.
-func Scheme() sign.Scheme { return sch }
-
-type scheme struct{}
-
-func (*scheme) Name() string { return "Ed25519" }
-func (*scheme) PublicKeySize() int { return PublicKeySize }
-func (*scheme) PrivateKeySize() int { return PrivateKeySize }
-func (*scheme) SignatureSize() int { return SignatureSize }
-func (*scheme) SeedSize() int { return SeedSize }
-func (*scheme) TLSIdentifier() uint { return 0x0807 }
-func (*scheme) SupportsContext() bool { return false }
-func (*scheme) Oid() asn1.ObjectIdentifier {
- return asn1.ObjectIdentifier{1, 3, 101, 112}
-}
-
-func (*scheme) GenerateKey() (sign.PublicKey, sign.PrivateKey, error) {
- return GenerateKey(rand.Reader)
-}
-
-func (*scheme) Sign(
- sk sign.PrivateKey,
- message []byte,
- opts *sign.SignatureOpts,
-) []byte {
- priv, ok := sk.(PrivateKey)
- if !ok {
- panic(sign.ErrTypeMismatch)
- }
- if opts != nil && opts.Context != "" {
- panic(sign.ErrContextNotSupported)
- }
- return Sign(priv, message)
-}
-
-func (*scheme) Verify(
- pk sign.PublicKey,
- message, signature []byte,
- opts *sign.SignatureOpts,
-) bool {
- pub, ok := pk.(PublicKey)
- if !ok {
- panic(sign.ErrTypeMismatch)
- }
- if opts != nil {
- if opts.Context != "" {
- panic(sign.ErrContextNotSupported)
- }
- }
- return Verify(pub, message, signature)
-}
-
-func (*scheme) DeriveKey(seed []byte) (sign.PublicKey, sign.PrivateKey) {
- privateKey := NewKeyFromSeed(seed)
- publicKey := make(PublicKey, PublicKeySize)
- copy(publicKey, privateKey[SeedSize:])
- return publicKey, privateKey
-}
-
-func (*scheme) UnmarshalBinaryPublicKey(buf []byte) (sign.PublicKey, error) {
- if len(buf) < PublicKeySize {
- return nil, sign.ErrPubKeySize
- }
- pub := make(PublicKey, PublicKeySize)
- copy(pub, buf[:PublicKeySize])
- return pub, nil
-}
-
-func (*scheme) UnmarshalBinaryPrivateKey(buf []byte) (sign.PrivateKey, error) {
- if len(buf) < PrivateKeySize {
- return nil, sign.ErrPrivKeySize
- }
- priv := make(PrivateKey, PrivateKeySize)
- copy(priv, buf[:PrivateKeySize])
- return priv, nil
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go b/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go
deleted file mode 100644
index 8763b426..00000000
--- a/vendor/github.com/cloudflare/circl/sign/ed25519/tables.go
+++ /dev/null
@@ -1,213 +0,0 @@
-package ed25519
-
-import fp "github.com/cloudflare/circl/math/fp25519"
-
-var tabSign = [fxV][fx2w1]pointR3{
- {
- pointR3{
- addYX: fp.Elt{0x85, 0x3b, 0x8c, 0xf5, 0xc6, 0x93, 0xbc, 0x2f, 0x19, 0x0e, 0x8c, 0xfb, 0xc6, 0x2d, 0x93, 0xcf, 0xc2, 0x42, 0x3d, 0x64, 0x98, 0x48, 0x0b, 0x27, 0x65, 0xba, 0xd4, 0x33, 0x3a, 0x9d, 0xcf, 0x07},
- subYX: fp.Elt{0x3e, 0x91, 0x40, 0xd7, 0x05, 0x39, 0x10, 0x9d, 0xb3, 0xbe, 0x40, 0xd1, 0x05, 0x9f, 0x39, 0xfd, 0x09, 0x8a, 0x8f, 0x68, 0x34, 0x84, 0xc1, 0xa5, 0x67, 0x12, 0xf8, 0x98, 0x92, 0x2f, 0xfd, 0x44},
- dt2: fp.Elt{0x68, 0xaa, 0x7a, 0x87, 0x05, 0x12, 0xc9, 0xab, 0x9e, 0xc4, 0xaa, 0xcc, 0x23, 0xe8, 0xd9, 0x26, 0x8c, 0x59, 0x43, 0xdd, 0xcb, 0x7d, 0x1b, 0x5a, 0xa8, 0x65, 0x0c, 0x9f, 0x68, 0x7b, 0x11, 0x6f},
- },
- {
- addYX: fp.Elt{0x7c, 0xb0, 0x9e, 0xe6, 0xc5, 0xbf, 0xfa, 0x13, 0x8e, 0x0d, 0x22, 0xde, 0xc8, 0xd1, 0xce, 0x52, 0x02, 0xd5, 0x62, 0x31, 0x71, 0x0e, 0x8e, 0x9d, 0xb0, 0xd6, 0x00, 0xa5, 0x5a, 0x0e, 0xce, 0x72},
- subYX: fp.Elt{0x1a, 0x8e, 0x5c, 0xdc, 0xa4, 0xb3, 0x6c, 0x51, 0x18, 0xa0, 0x09, 0x80, 0x9a, 0x46, 0x33, 0xd5, 0xe0, 0x3c, 0x4d, 0x3b, 0xfc, 0x49, 0xa2, 0x43, 0x29, 0xe1, 0x29, 0xa9, 0x93, 0xea, 0x7c, 0x35},
- dt2: fp.Elt{0x08, 0x46, 0x6f, 0x68, 0x7f, 0x0b, 0x7c, 0x9e, 0xad, 0xba, 0x07, 0x61, 0x74, 0x83, 0x2f, 0xfc, 0x26, 0xd6, 0x09, 0xb9, 0x00, 0x34, 0x36, 0x4f, 0x01, 0xf3, 0x48, 0xdb, 0x43, 0xba, 0x04, 0x44},
- },
- {
- addYX: fp.Elt{0x4c, 0xda, 0x0d, 0x13, 0x66, 0xfd, 0x82, 0x84, 0x9f, 0x75, 0x5b, 0xa2, 0x17, 0xfe, 0x34, 0xbf, 0x1f, 0xcb, 0xba, 0x90, 0x55, 0x80, 0x83, 0xfd, 0x63, 0xb9, 0x18, 0xf8, 0x5b, 0x5d, 0x94, 0x1e},
- subYX: fp.Elt{0xb9, 0xdb, 0x6c, 0x04, 0x88, 0x22, 0xd8, 0x79, 0x83, 0x2f, 0x8d, 0x65, 0x6b, 0xd2, 0xab, 0x1b, 0xdd, 0x65, 0xe5, 0x93, 0x63, 0xf8, 0xa2, 0xd8, 0x3c, 0xf1, 0x4b, 0xc5, 0x99, 0xd1, 0xf2, 0x12},
- dt2: fp.Elt{0x05, 0x4c, 0xb8, 0x3b, 0xfe, 0xf5, 0x9f, 0x2e, 0xd1, 0xb2, 0xb8, 0xff, 0xfe, 0x6d, 0xd9, 0x37, 0xe0, 0xae, 0xb4, 0x5a, 0x51, 0x80, 0x7e, 0x9b, 0x1d, 0xd1, 0x8d, 0x8c, 0x56, 0xb1, 0x84, 0x35},
- },
- {
- addYX: fp.Elt{0x39, 0x71, 0x43, 0x34, 0xe3, 0x42, 0x45, 0xa1, 0xf2, 0x68, 0x71, 0xa7, 0xe8, 0x23, 0xfd, 0x9f, 0x86, 0x48, 0xff, 0xe5, 0x96, 0x74, 0xcf, 0x05, 0x49, 0xe2, 0xb3, 0x6c, 0x17, 0x77, 0x2f, 0x6d},
- subYX: fp.Elt{0x73, 0x3f, 0xc1, 0xc7, 0x6a, 0x66, 0xa1, 0x20, 0xdd, 0x11, 0xfb, 0x7a, 0x6e, 0xa8, 0x51, 0xb8, 0x3f, 0x9d, 0xa2, 0x97, 0x84, 0xb5, 0xc7, 0x90, 0x7c, 0xab, 0x48, 0xd6, 0x84, 0xa3, 0xd5, 0x1a},
- dt2: fp.Elt{0x63, 0x27, 0x3c, 0x49, 0x4b, 0xfc, 0x22, 0xf2, 0x0b, 0x50, 0xc2, 0x0f, 0xb4, 0x1f, 0x31, 0x0c, 0x2f, 0x53, 0xab, 0xaa, 0x75, 0x6f, 0xe0, 0x69, 0x39, 0x56, 0xe0, 0x3b, 0xb7, 0xa8, 0xbf, 0x45},
- },
- },
- {
- {
- addYX: fp.Elt{0x00, 0x45, 0xd9, 0x0d, 0x58, 0x03, 0xfc, 0x29, 0x93, 0xec, 0xbb, 0x6f, 0xa4, 0x7a, 0xd2, 0xec, 0xf8, 0xa7, 0xe2, 0xc2, 0x5f, 0x15, 0x0a, 0x13, 0xd5, 0xa1, 0x06, 0xb7, 0x1a, 0x15, 0x6b, 0x41},
- subYX: fp.Elt{0x85, 0x8c, 0xb2, 0x17, 0xd6, 0x3b, 0x0a, 0xd3, 0xea, 0x3b, 0x77, 0x39, 0xb7, 0x77, 0xd3, 0xc5, 0xbf, 0x5c, 0x6a, 0x1e, 0x8c, 0xe7, 0xc6, 0xc6, 0xc4, 0xb7, 0x2a, 0x8b, 0xf7, 0xb8, 0x61, 0x0d},
- dt2: fp.Elt{0xb0, 0x36, 0xc1, 0xe9, 0xef, 0xd7, 0xa8, 0x56, 0x20, 0x4b, 0xe4, 0x58, 0xcd, 0xe5, 0x07, 0xbd, 0xab, 0xe0, 0x57, 0x1b, 0xda, 0x2f, 0xe6, 0xaf, 0xd2, 0xe8, 0x77, 0x42, 0xf7, 0x2a, 0x1a, 0x19},
- },
- {
- addYX: fp.Elt{0x6a, 0x6d, 0x6d, 0xd1, 0xfa, 0xf5, 0x03, 0x30, 0xbd, 0x6d, 0xc2, 0xc8, 0xf5, 0x38, 0x80, 0x4f, 0xb2, 0xbe, 0xa1, 0x76, 0x50, 0x1a, 0x73, 0xf2, 0x78, 0x2b, 0x8e, 0x3a, 0x1e, 0x34, 0x47, 0x7b},
- subYX: fp.Elt{0xc3, 0x2c, 0x36, 0xdc, 0xc5, 0x45, 0xbc, 0xef, 0x1b, 0x64, 0xd6, 0x65, 0x28, 0xe9, 0xda, 0x84, 0x13, 0xbe, 0x27, 0x8e, 0x3f, 0x98, 0x2a, 0x37, 0xee, 0x78, 0x97, 0xd6, 0xc0, 0x6f, 0xb4, 0x53},
- dt2: fp.Elt{0x58, 0x5d, 0xa7, 0xa3, 0x68, 0xbb, 0x20, 0x30, 0x2e, 0x03, 0xe9, 0xb1, 0xd4, 0x90, 0x72, 0xe3, 0x71, 0xb2, 0x36, 0x3e, 0x73, 0xa0, 0x2e, 0x3d, 0xd1, 0x85, 0x33, 0x62, 0x4e, 0xa7, 0x7b, 0x31},
- },
- {
- addYX: fp.Elt{0xbf, 0xc4, 0x38, 0x53, 0xfb, 0x68, 0xa9, 0x77, 0xce, 0x55, 0xf9, 0x05, 0xcb, 0xeb, 0xfb, 0x8c, 0x46, 0xc2, 0x32, 0x7c, 0xf0, 0xdb, 0xd7, 0x2c, 0x62, 0x8e, 0xdd, 0x54, 0x75, 0xcf, 0x3f, 0x33},
- subYX: fp.Elt{0x49, 0x50, 0x1f, 0x4e, 0x6e, 0x55, 0x55, 0xde, 0x8c, 0x4e, 0x77, 0x96, 0x38, 0x3b, 0xfe, 0xb6, 0x43, 0x3c, 0x86, 0x69, 0xc2, 0x72, 0x66, 0x1f, 0x6b, 0xf9, 0x87, 0xbc, 0x4f, 0x37, 0x3e, 0x3c},
- dt2: fp.Elt{0xd2, 0x2f, 0x06, 0x6b, 0x08, 0x07, 0x69, 0x77, 0xc0, 0x94, 0xcc, 0xae, 0x43, 0x00, 0x59, 0x6e, 0xa3, 0x63, 0xa8, 0xdd, 0xfa, 0x24, 0x18, 0xd0, 0x35, 0xc7, 0x78, 0xf7, 0x0d, 0xd4, 0x5a, 0x1e},
- },
- {
- addYX: fp.Elt{0x45, 0xc1, 0x17, 0x51, 0xf8, 0xed, 0x7e, 0xc7, 0xa9, 0x1a, 0x11, 0x6e, 0x2d, 0xef, 0x0b, 0xd5, 0x3f, 0x98, 0xb0, 0xa3, 0x9d, 0x65, 0xf1, 0xcd, 0x53, 0x4a, 0x8a, 0x18, 0x70, 0x0a, 0x7f, 0x23},
- subYX: fp.Elt{0xdd, 0xef, 0xbe, 0x3a, 0x31, 0xe0, 0xbc, 0xbe, 0x6d, 0x5d, 0x79, 0x87, 0xd6, 0xbe, 0x68, 0xe3, 0x59, 0x76, 0x8c, 0x86, 0x0e, 0x7a, 0x92, 0x13, 0x14, 0x8f, 0x67, 0xb3, 0xcb, 0x1a, 0x76, 0x76},
- dt2: fp.Elt{0x56, 0x7a, 0x1c, 0x9d, 0xca, 0x96, 0xf9, 0xf9, 0x03, 0x21, 0xd4, 0xe8, 0xb3, 0xd5, 0xe9, 0x52, 0xc8, 0x54, 0x1e, 0x1b, 0x13, 0xb6, 0xfd, 0x47, 0x7d, 0x02, 0x32, 0x33, 0x27, 0xe2, 0x1f, 0x19},
- },
- },
-}
-
-var tabVerif = [1 << (omegaFix - 2)]pointR3{
- { /* 1P */
- addYX: fp.Elt{0x85, 0x3b, 0x8c, 0xf5, 0xc6, 0x93, 0xbc, 0x2f, 0x19, 0x0e, 0x8c, 0xfb, 0xc6, 0x2d, 0x93, 0xcf, 0xc2, 0x42, 0x3d, 0x64, 0x98, 0x48, 0x0b, 0x27, 0x65, 0xba, 0xd4, 0x33, 0x3a, 0x9d, 0xcf, 0x07},
- subYX: fp.Elt{0x3e, 0x91, 0x40, 0xd7, 0x05, 0x39, 0x10, 0x9d, 0xb3, 0xbe, 0x40, 0xd1, 0x05, 0x9f, 0x39, 0xfd, 0x09, 0x8a, 0x8f, 0x68, 0x34, 0x84, 0xc1, 0xa5, 0x67, 0x12, 0xf8, 0x98, 0x92, 0x2f, 0xfd, 0x44},
- dt2: fp.Elt{0x68, 0xaa, 0x7a, 0x87, 0x05, 0x12, 0xc9, 0xab, 0x9e, 0xc4, 0xaa, 0xcc, 0x23, 0xe8, 0xd9, 0x26, 0x8c, 0x59, 0x43, 0xdd, 0xcb, 0x7d, 0x1b, 0x5a, 0xa8, 0x65, 0x0c, 0x9f, 0x68, 0x7b, 0x11, 0x6f},
- },
- { /* 3P */
- addYX: fp.Elt{0x30, 0x97, 0xee, 0x4c, 0xa8, 0xb0, 0x25, 0xaf, 0x8a, 0x4b, 0x86, 0xe8, 0x30, 0x84, 0x5a, 0x02, 0x32, 0x67, 0x01, 0x9f, 0x02, 0x50, 0x1b, 0xc1, 0xf4, 0xf8, 0x80, 0x9a, 0x1b, 0x4e, 0x16, 0x7a},
- subYX: fp.Elt{0x65, 0xd2, 0xfc, 0xa4, 0xe8, 0x1f, 0x61, 0x56, 0x7d, 0xba, 0xc1, 0xe5, 0xfd, 0x53, 0xd3, 0x3b, 0xbd, 0xd6, 0x4b, 0x21, 0x1a, 0xf3, 0x31, 0x81, 0x62, 0xda, 0x5b, 0x55, 0x87, 0x15, 0xb9, 0x2a},
- dt2: fp.Elt{0x89, 0xd8, 0xd0, 0x0d, 0x3f, 0x93, 0xae, 0x14, 0x62, 0xda, 0x35, 0x1c, 0x22, 0x23, 0x94, 0x58, 0x4c, 0xdb, 0xf2, 0x8c, 0x45, 0xe5, 0x70, 0xd1, 0xc6, 0xb4, 0xb9, 0x12, 0xaf, 0x26, 0x28, 0x5a},
- },
- { /* 5P */
- addYX: fp.Elt{0x33, 0xbb, 0xa5, 0x08, 0x44, 0xbc, 0x12, 0xa2, 0x02, 0xed, 0x5e, 0xc7, 0xc3, 0x48, 0x50, 0x8d, 0x44, 0xec, 0xbf, 0x5a, 0x0c, 0xeb, 0x1b, 0xdd, 0xeb, 0x06, 0xe2, 0x46, 0xf1, 0xcc, 0x45, 0x29},
- subYX: fp.Elt{0xba, 0xd6, 0x47, 0xa4, 0xc3, 0x82, 0x91, 0x7f, 0xb7, 0x29, 0x27, 0x4b, 0xd1, 0x14, 0x00, 0xd5, 0x87, 0xa0, 0x64, 0xb8, 0x1c, 0xf1, 0x3c, 0xe3, 0xf3, 0x55, 0x1b, 0xeb, 0x73, 0x7e, 0x4a, 0x15},
- dt2: fp.Elt{0x85, 0x82, 0x2a, 0x81, 0xf1, 0xdb, 0xbb, 0xbc, 0xfc, 0xd1, 0xbd, 0xd0, 0x07, 0x08, 0x0e, 0x27, 0x2d, 0xa7, 0xbd, 0x1b, 0x0b, 0x67, 0x1b, 0xb4, 0x9a, 0xb6, 0x3b, 0x6b, 0x69, 0xbe, 0xaa, 0x43},
- },
- { /* 7P */
- addYX: fp.Elt{0xbf, 0xa3, 0x4e, 0x94, 0xd0, 0x5c, 0x1a, 0x6b, 0xd2, 0xc0, 0x9d, 0xb3, 0x3a, 0x35, 0x70, 0x74, 0x49, 0x2e, 0x54, 0x28, 0x82, 0x52, 0xb2, 0x71, 0x7e, 0x92, 0x3c, 0x28, 0x69, 0xea, 0x1b, 0x46},
- subYX: fp.Elt{0xb1, 0x21, 0x32, 0xaa, 0x9a, 0x2c, 0x6f, 0xba, 0xa7, 0x23, 0xba, 0x3b, 0x53, 0x21, 0xa0, 0x6c, 0x3a, 0x2c, 0x19, 0x92, 0x4f, 0x76, 0xea, 0x9d, 0xe0, 0x17, 0x53, 0x2e, 0x5d, 0xdd, 0x6e, 0x1d},
- dt2: fp.Elt{0xa2, 0xb3, 0xb8, 0x01, 0xc8, 0x6d, 0x83, 0xf1, 0x9a, 0xa4, 0x3e, 0x05, 0x47, 0x5f, 0x03, 0xb3, 0xf3, 0xad, 0x77, 0x58, 0xba, 0x41, 0x9c, 0x52, 0xa7, 0x90, 0x0f, 0x6a, 0x1c, 0xbb, 0x9f, 0x7a},
- },
- { /* 9P */
- addYX: fp.Elt{0x2f, 0x63, 0xa8, 0xa6, 0x8a, 0x67, 0x2e, 0x9b, 0xc5, 0x46, 0xbc, 0x51, 0x6f, 0x9e, 0x50, 0xa6, 0xb5, 0xf5, 0x86, 0xc6, 0xc9, 0x33, 0xb2, 0xce, 0x59, 0x7f, 0xdd, 0x8a, 0x33, 0xed, 0xb9, 0x34},
- subYX: fp.Elt{0x64, 0x80, 0x9d, 0x03, 0x7e, 0x21, 0x6e, 0xf3, 0x9b, 0x41, 0x20, 0xf5, 0xb6, 0x81, 0xa0, 0x98, 0x44, 0xb0, 0x5e, 0xe7, 0x08, 0xc6, 0xcb, 0x96, 0x8f, 0x9c, 0xdc, 0xfa, 0x51, 0x5a, 0xc0, 0x49},
- dt2: fp.Elt{0x1b, 0xaf, 0x45, 0x90, 0xbf, 0xe8, 0xb4, 0x06, 0x2f, 0xd2, 0x19, 0xa7, 0xe8, 0x83, 0xff, 0xe2, 0x16, 0xcf, 0xd4, 0x93, 0x29, 0xfc, 0xf6, 0xaa, 0x06, 0x8b, 0x00, 0x1b, 0x02, 0x72, 0xc1, 0x73},
- },
- { /* 11P */
- addYX: fp.Elt{0xde, 0x2a, 0x80, 0x8a, 0x84, 0x00, 0xbf, 0x2f, 0x27, 0x2e, 0x30, 0x02, 0xcf, 0xfe, 0xd9, 0xe5, 0x06, 0x34, 0x70, 0x17, 0x71, 0x84, 0x3e, 0x11, 0xaf, 0x8f, 0x6d, 0x54, 0xe2, 0xaa, 0x75, 0x42},
- subYX: fp.Elt{0x48, 0x43, 0x86, 0x49, 0x02, 0x5b, 0x5f, 0x31, 0x81, 0x83, 0x08, 0x77, 0x69, 0xb3, 0xd6, 0x3e, 0x95, 0xeb, 0x8d, 0x6a, 0x55, 0x75, 0xa0, 0xa3, 0x7f, 0xc7, 0xd5, 0x29, 0x80, 0x59, 0xab, 0x18},
- dt2: fp.Elt{0xe9, 0x89, 0x60, 0xfd, 0xc5, 0x2c, 0x2b, 0xd8, 0xa4, 0xe4, 0x82, 0x32, 0xa1, 0xb4, 0x1e, 0x03, 0x22, 0x86, 0x1a, 0xb5, 0x99, 0x11, 0x31, 0x44, 0x48, 0xf9, 0x3d, 0xb5, 0x22, 0x55, 0xc6, 0x3d},
- },
- { /* 13P */
- addYX: fp.Elt{0x6d, 0x7f, 0x00, 0xa2, 0x22, 0xc2, 0x70, 0xbf, 0xdb, 0xde, 0xbc, 0xb5, 0x9a, 0xb3, 0x84, 0xbf, 0x07, 0xba, 0x07, 0xfb, 0x12, 0x0e, 0x7a, 0x53, 0x41, 0xf2, 0x46, 0xc3, 0xee, 0xd7, 0x4f, 0x23},
- subYX: fp.Elt{0x93, 0xbf, 0x7f, 0x32, 0x3b, 0x01, 0x6f, 0x50, 0x6b, 0x6f, 0x77, 0x9b, 0xc9, 0xeb, 0xfc, 0xae, 0x68, 0x59, 0xad, 0xaa, 0x32, 0xb2, 0x12, 0x9d, 0xa7, 0x24, 0x60, 0x17, 0x2d, 0x88, 0x67, 0x02},
- dt2: fp.Elt{0x78, 0xa3, 0x2e, 0x73, 0x19, 0xa1, 0x60, 0x53, 0x71, 0xd4, 0x8d, 0xdf, 0xb1, 0xe6, 0x37, 0x24, 0x33, 0xe5, 0xa7, 0x91, 0xf8, 0x37, 0xef, 0xa2, 0x63, 0x78, 0x09, 0xaa, 0xfd, 0xa6, 0x7b, 0x49},
- },
- { /* 15P */
- addYX: fp.Elt{0xa0, 0xea, 0xcf, 0x13, 0x03, 0xcc, 0xce, 0x24, 0x6d, 0x24, 0x9c, 0x18, 0x8d, 0xc2, 0x48, 0x86, 0xd0, 0xd4, 0xf2, 0xc1, 0xfa, 0xbd, 0xbd, 0x2d, 0x2b, 0xe7, 0x2d, 0xf1, 0x17, 0x29, 0xe2, 0x61},
- subYX: fp.Elt{0x0b, 0xcf, 0x8c, 0x46, 0x86, 0xcd, 0x0b, 0x04, 0xd6, 0x10, 0x99, 0x2a, 0xa4, 0x9b, 0x82, 0xd3, 0x92, 0x51, 0xb2, 0x07, 0x08, 0x30, 0x08, 0x75, 0xbf, 0x5e, 0xd0, 0x18, 0x42, 0xcd, 0xb5, 0x43},
- dt2: fp.Elt{0x16, 0xb5, 0xd0, 0x9b, 0x2f, 0x76, 0x9a, 0x5d, 0xee, 0xde, 0x3f, 0x37, 0x4e, 0xaf, 0x38, 0xeb, 0x70, 0x42, 0xd6, 0x93, 0x7d, 0x5a, 0x2e, 0x03, 0x42, 0xd8, 0xe4, 0x0a, 0x21, 0x61, 0x1d, 0x51},
- },
- { /* 17P */
- addYX: fp.Elt{0x81, 0x9d, 0x0e, 0x95, 0xef, 0x76, 0xc6, 0x92, 0x4f, 0x04, 0xd7, 0xc0, 0xcd, 0x20, 0x46, 0xa5, 0x48, 0x12, 0x8f, 0x6f, 0x64, 0x36, 0x9b, 0xaa, 0xe3, 0x55, 0xb8, 0xdd, 0x24, 0x59, 0x32, 0x6d},
- subYX: fp.Elt{0x87, 0xde, 0x20, 0x44, 0x48, 0x86, 0x13, 0x08, 0xb4, 0xed, 0x92, 0xb5, 0x16, 0xf0, 0x1c, 0x8a, 0x25, 0x2d, 0x94, 0x29, 0x27, 0x4e, 0xfa, 0x39, 0x10, 0x28, 0x48, 0xe2, 0x6f, 0xfe, 0xa7, 0x71},
- dt2: fp.Elt{0x54, 0xc8, 0xc8, 0xa5, 0xb8, 0x82, 0x71, 0x6c, 0x03, 0x2a, 0x5f, 0xfe, 0x79, 0x14, 0xfd, 0x33, 0x0c, 0x8d, 0x77, 0x83, 0x18, 0x59, 0xcf, 0x72, 0xa9, 0xea, 0x9e, 0x55, 0xb6, 0xc4, 0x46, 0x47},
- },
- { /* 19P */
- addYX: fp.Elt{0x2b, 0x9a, 0xc6, 0x6d, 0x3c, 0x7b, 0x77, 0xd3, 0x17, 0xf6, 0x89, 0x6f, 0x27, 0xb2, 0xfa, 0xde, 0xb5, 0x16, 0x3a, 0xb5, 0xf7, 0x1c, 0x65, 0x45, 0xb7, 0x9f, 0xfe, 0x34, 0xde, 0x51, 0x9a, 0x5c},
- subYX: fp.Elt{0x47, 0x11, 0x74, 0x64, 0xc8, 0x46, 0x85, 0x34, 0x49, 0xc8, 0xfc, 0x0e, 0xdd, 0xae, 0x35, 0x7d, 0x32, 0xa3, 0x72, 0x06, 0x76, 0x9a, 0x93, 0xff, 0xd6, 0xe6, 0xb5, 0x7d, 0x49, 0x63, 0x96, 0x21},
- dt2: fp.Elt{0x67, 0x0e, 0xf1, 0x79, 0xcf, 0xf1, 0x10, 0xf5, 0x5b, 0x51, 0x58, 0xe6, 0xa1, 0xda, 0xdd, 0xff, 0x77, 0x22, 0x14, 0x10, 0x17, 0xa7, 0xc3, 0x09, 0xbb, 0x23, 0x82, 0x60, 0x3c, 0x50, 0x04, 0x48},
- },
- { /* 21P */
- addYX: fp.Elt{0xc7, 0x7f, 0xa3, 0x2c, 0xd0, 0x9e, 0x24, 0xc4, 0xab, 0xac, 0x15, 0xa6, 0xe3, 0xa0, 0x59, 0xa0, 0x23, 0x0e, 0x6e, 0xc9, 0xd7, 0x6e, 0xa9, 0x88, 0x6d, 0x69, 0x50, 0x16, 0xa5, 0x98, 0x33, 0x55},
- subYX: fp.Elt{0x75, 0xd1, 0x36, 0x3a, 0xd2, 0x21, 0x68, 0x3b, 0x32, 0x9e, 0x9b, 0xe9, 0xa7, 0x0a, 0xb4, 0xbb, 0x47, 0x8a, 0x83, 0x20, 0xe4, 0x5c, 0x9e, 0x5d, 0x5e, 0x4c, 0xde, 0x58, 0x88, 0x09, 0x1e, 0x77},
- dt2: fp.Elt{0xdf, 0x1e, 0x45, 0x78, 0xd2, 0xf5, 0x12, 0x9a, 0xcb, 0x9c, 0x89, 0x85, 0x79, 0x5d, 0xda, 0x3a, 0x08, 0x95, 0xa5, 0x9f, 0x2d, 0x4a, 0x7f, 0x47, 0x11, 0xa6, 0xf5, 0x8f, 0xd6, 0xd1, 0x5e, 0x5a},
- },
- { /* 23P */
- addYX: fp.Elt{0x83, 0x0e, 0x15, 0xfe, 0x2a, 0x12, 0x95, 0x11, 0xd8, 0x35, 0x4b, 0x7e, 0x25, 0x9a, 0x20, 0xcf, 0x20, 0x1e, 0x71, 0x1e, 0x29, 0xf8, 0x87, 0x73, 0xf0, 0x92, 0xbf, 0xd8, 0x97, 0xb8, 0xac, 0x44},
- subYX: fp.Elt{0x59, 0x73, 0x52, 0x58, 0xc5, 0xe0, 0xe5, 0xba, 0x7e, 0x9d, 0xdb, 0xca, 0x19, 0x5c, 0x2e, 0x39, 0xe9, 0xab, 0x1c, 0xda, 0x1e, 0x3c, 0x65, 0x28, 0x44, 0xdc, 0xef, 0x5f, 0x13, 0x60, 0x9b, 0x01},
- dt2: fp.Elt{0x83, 0x4b, 0x13, 0x5e, 0x14, 0x68, 0x60, 0x1e, 0x16, 0x4c, 0x30, 0x24, 0x4f, 0xe6, 0xf5, 0xc4, 0xd7, 0x3e, 0x1a, 0xfc, 0xa8, 0x88, 0x6e, 0x50, 0x92, 0x2f, 0xad, 0xe6, 0xfd, 0x49, 0x0c, 0x15},
- },
- { /* 25P */
- addYX: fp.Elt{0x38, 0x11, 0x47, 0x09, 0x95, 0xf2, 0x7b, 0x8e, 0x51, 0xa6, 0x75, 0x4f, 0x39, 0xef, 0x6f, 0x5d, 0xad, 0x08, 0xa7, 0x25, 0xc4, 0x79, 0xaf, 0x10, 0x22, 0x99, 0xb9, 0x5b, 0x07, 0x5a, 0x2b, 0x6b},
- subYX: fp.Elt{0x68, 0xa8, 0xdc, 0x9c, 0x3c, 0x86, 0x49, 0xb8, 0xd0, 0x4a, 0x71, 0xb8, 0xdb, 0x44, 0x3f, 0xc8, 0x8d, 0x16, 0x36, 0x0c, 0x56, 0xe3, 0x3e, 0xfe, 0xc1, 0xfb, 0x05, 0x1e, 0x79, 0xd7, 0xa6, 0x78},
- dt2: fp.Elt{0x76, 0xb9, 0xa0, 0x47, 0x4b, 0x70, 0xbf, 0x58, 0xd5, 0x48, 0x17, 0x74, 0x55, 0xb3, 0x01, 0xa6, 0x90, 0xf5, 0x42, 0xd5, 0xb1, 0x1f, 0x2b, 0xaa, 0x00, 0x5d, 0xd5, 0x4a, 0xfc, 0x7f, 0x5c, 0x72},
- },
- { /* 27P */
- addYX: fp.Elt{0xb2, 0x99, 0xcf, 0xd1, 0x15, 0x67, 0x42, 0xe4, 0x34, 0x0d, 0xa2, 0x02, 0x11, 0xd5, 0x52, 0x73, 0x9f, 0x10, 0x12, 0x8b, 0x7b, 0x15, 0xd1, 0x23, 0xa3, 0xf3, 0xb1, 0x7c, 0x27, 0xc9, 0x4c, 0x79},
- subYX: fp.Elt{0xc0, 0x98, 0xd0, 0x1c, 0xf7, 0x2b, 0x80, 0x91, 0x66, 0x63, 0x5e, 0xed, 0xa4, 0x6c, 0x41, 0xfe, 0x4c, 0x99, 0x02, 0x49, 0x71, 0x5d, 0x58, 0xdf, 0xe7, 0xfa, 0x55, 0xf8, 0x25, 0x46, 0xd5, 0x4c},
- dt2: fp.Elt{0x53, 0x50, 0xac, 0xc2, 0x26, 0xc4, 0xf6, 0x4a, 0x58, 0x72, 0xf6, 0x32, 0xad, 0xed, 0x9a, 0xbc, 0x21, 0x10, 0x31, 0x0a, 0xf1, 0x32, 0xd0, 0x2a, 0x85, 0x8e, 0xcc, 0x6f, 0x7b, 0x35, 0x08, 0x70},
- },
- { /* 29P */
- addYX: fp.Elt{0x01, 0x3f, 0x77, 0x38, 0x27, 0x67, 0x88, 0x0b, 0xfb, 0xcc, 0xfb, 0x95, 0xfa, 0xc8, 0xcc, 0xb8, 0xb6, 0x29, 0xad, 0xb9, 0xa3, 0xd5, 0x2d, 0x8d, 0x6a, 0x0f, 0xad, 0x51, 0x98, 0x7e, 0xef, 0x06},
- subYX: fp.Elt{0x34, 0x4a, 0x58, 0x82, 0xbb, 0x9f, 0x1b, 0xd0, 0x2b, 0x79, 0xb4, 0xd2, 0x63, 0x64, 0xab, 0x47, 0x02, 0x62, 0x53, 0x48, 0x9c, 0x63, 0x31, 0xb6, 0x28, 0xd4, 0xd6, 0x69, 0x36, 0x2a, 0xa9, 0x13},
- dt2: fp.Elt{0xe5, 0x7d, 0x57, 0xc0, 0x1c, 0x77, 0x93, 0xca, 0x5c, 0xdc, 0x35, 0x50, 0x1e, 0xe4, 0x40, 0x75, 0x71, 0xe0, 0x02, 0xd8, 0x01, 0x0f, 0x68, 0x24, 0x6a, 0xf8, 0x2a, 0x8a, 0xdf, 0x6d, 0x29, 0x3c},
- },
- { /* 31P */
- addYX: fp.Elt{0x13, 0xa7, 0x14, 0xd9, 0xf9, 0x15, 0xad, 0xae, 0x12, 0xf9, 0x8f, 0x8c, 0xf9, 0x7b, 0x2f, 0xa9, 0x30, 0xd7, 0x53, 0x9f, 0x17, 0x23, 0xf8, 0xaf, 0xba, 0x77, 0x0c, 0x49, 0x93, 0xd3, 0x99, 0x7a},
- subYX: fp.Elt{0x41, 0x25, 0x1f, 0xbb, 0x2e, 0x4d, 0xeb, 0xfc, 0x1f, 0xb9, 0xad, 0x40, 0xc7, 0x10, 0x95, 0xb8, 0x05, 0xad, 0xa1, 0xd0, 0x7d, 0xa3, 0x71, 0xfc, 0x7b, 0x71, 0x47, 0x07, 0x70, 0x2c, 0x89, 0x0a},
- dt2: fp.Elt{0xe8, 0xa3, 0xbd, 0x36, 0x24, 0xed, 0x52, 0x8f, 0x94, 0x07, 0xe8, 0x57, 0x41, 0xc8, 0xa8, 0x77, 0xe0, 0x9c, 0x2f, 0x26, 0x63, 0x65, 0xa9, 0xa5, 0xd2, 0xf7, 0x02, 0x83, 0xd2, 0x62, 0x67, 0x28},
- },
- { /* 33P */
- addYX: fp.Elt{0x25, 0x5b, 0xe3, 0x3c, 0x09, 0x36, 0x78, 0x4e, 0x97, 0xaa, 0x6b, 0xb2, 0x1d, 0x18, 0xe1, 0x82, 0x3f, 0xb8, 0xc7, 0xcb, 0xd3, 0x92, 0xc1, 0x0c, 0x3a, 0x9d, 0x9d, 0x6a, 0x04, 0xda, 0xf1, 0x32},
- subYX: fp.Elt{0xbd, 0xf5, 0x2e, 0xce, 0x2b, 0x8e, 0x55, 0x7c, 0x63, 0xbc, 0x47, 0x67, 0xb4, 0x6c, 0x98, 0xe4, 0xb8, 0x89, 0xbb, 0x3b, 0x9f, 0x17, 0x4a, 0x15, 0x7a, 0x76, 0xf1, 0xd6, 0xa3, 0xf2, 0x86, 0x76},
- dt2: fp.Elt{0x6a, 0x7c, 0x59, 0x6d, 0xa6, 0x12, 0x8d, 0xaa, 0x2b, 0x85, 0xd3, 0x04, 0x03, 0x93, 0x11, 0x8f, 0x22, 0xb0, 0x09, 0xc2, 0x73, 0xdc, 0x91, 0x3f, 0xa6, 0x28, 0xad, 0xa9, 0xf8, 0x05, 0x13, 0x56},
- },
- { /* 35P */
- addYX: fp.Elt{0xd1, 0xae, 0x92, 0xec, 0x8d, 0x97, 0x0c, 0x10, 0xe5, 0x73, 0x6d, 0x4d, 0x43, 0xd5, 0x43, 0xca, 0x48, 0xba, 0x47, 0xd8, 0x22, 0x1b, 0x13, 0x83, 0x2c, 0x4d, 0x5d, 0xe3, 0x53, 0xec, 0xaa},
- subYX: fp.Elt{0xd5, 0xc0, 0xb0, 0xe7, 0x28, 0xcc, 0x22, 0x67, 0x53, 0x5c, 0x07, 0xdb, 0xbb, 0xe9, 0x9d, 0x70, 0x61, 0x0a, 0x01, 0xd7, 0xa7, 0x8d, 0xf6, 0xca, 0x6c, 0xcc, 0x57, 0x2c, 0xef, 0x1a, 0x0a, 0x03},
- dt2: fp.Elt{0xaa, 0xd2, 0x3a, 0x00, 0x73, 0xf7, 0xb1, 0x7b, 0x08, 0x66, 0x21, 0x2b, 0x80, 0x29, 0x3f, 0x0b, 0x3e, 0xd2, 0x0e, 0x52, 0x86, 0xdc, 0x21, 0x78, 0x80, 0x54, 0x06, 0x24, 0x1c, 0x9c, 0xbe, 0x20},
- },
- { /* 37P */
- addYX: fp.Elt{0xa6, 0x73, 0x96, 0x24, 0xd8, 0x87, 0x53, 0xe1, 0x93, 0xe4, 0x46, 0xf5, 0x2d, 0xbc, 0x43, 0x59, 0xb5, 0x63, 0x6f, 0xc3, 0x81, 0x9a, 0x7f, 0x1c, 0xde, 0xc1, 0x0a, 0x1f, 0x36, 0xb3, 0x0a, 0x75},
- subYX: fp.Elt{0x60, 0x5e, 0x02, 0xe2, 0x4a, 0xe4, 0xe0, 0x20, 0x38, 0xb9, 0xdc, 0xcb, 0x2f, 0x3b, 0x3b, 0xb0, 0x1c, 0x0d, 0x5a, 0xf9, 0x9c, 0x63, 0x5d, 0x10, 0x11, 0xe3, 0x67, 0x50, 0x54, 0x4c, 0x76, 0x69},
- dt2: fp.Elt{0x37, 0x10, 0xf8, 0xa2, 0x83, 0x32, 0x8a, 0x1e, 0xf1, 0xcb, 0x7f, 0xbd, 0x23, 0xda, 0x2e, 0x6f, 0x63, 0x25, 0x2e, 0xac, 0x5b, 0xd1, 0x2f, 0xb7, 0x40, 0x50, 0x07, 0xb7, 0x3f, 0x6b, 0xf9, 0x54},
- },
- { /* 39P */
- addYX: fp.Elt{0x79, 0x92, 0x66, 0x29, 0x04, 0xf2, 0xad, 0x0f, 0x4a, 0x72, 0x7d, 0x7d, 0x04, 0xa2, 0xdd, 0x3a, 0xf1, 0x60, 0x57, 0x8c, 0x82, 0x94, 0x3d, 0x6f, 0x9e, 0x53, 0xb7, 0x2b, 0xc5, 0xe9, 0x7f, 0x3d},
- subYX: fp.Elt{0xcd, 0x1e, 0xb1, 0x16, 0xc6, 0xaf, 0x7d, 0x17, 0x79, 0x64, 0x57, 0xfa, 0x9c, 0x4b, 0x76, 0x89, 0x85, 0xe7, 0xec, 0xe6, 0x10, 0xa1, 0xa8, 0xb7, 0xf0, 0xdb, 0x85, 0xbe, 0x9f, 0x83, 0xe6, 0x78},
- dt2: fp.Elt{0x6b, 0x85, 0xb8, 0x37, 0xf7, 0x2d, 0x33, 0x70, 0x8a, 0x17, 0x1a, 0x04, 0x43, 0x5d, 0xd0, 0x75, 0x22, 0x9e, 0xe5, 0xa0, 0x4a, 0xf7, 0x0f, 0x32, 0x42, 0x82, 0x08, 0x50, 0xf3, 0x68, 0xf2, 0x70},
- },
- { /* 41P */
- addYX: fp.Elt{0x47, 0x5f, 0x80, 0xb1, 0x83, 0x45, 0x86, 0x66, 0x19, 0x7c, 0xdd, 0x60, 0xd1, 0xc5, 0x35, 0xf5, 0x06, 0xb0, 0x4c, 0x1e, 0xb7, 0x4e, 0x87, 0xe9, 0xd9, 0x89, 0xd8, 0xfa, 0x5c, 0x34, 0x0d, 0x7c},
- subYX: fp.Elt{0x55, 0xf3, 0xdc, 0x70, 0x20, 0x11, 0x24, 0x23, 0x17, 0xe1, 0xfc, 0xe7, 0x7e, 0xc9, 0x0c, 0x38, 0x98, 0xb6, 0x52, 0x35, 0xed, 0xde, 0x1d, 0xb3, 0xb9, 0xc4, 0xb8, 0x39, 0xc0, 0x56, 0x4e, 0x40},
- dt2: fp.Elt{0x8a, 0x33, 0x78, 0x8c, 0x4b, 0x1f, 0x1f, 0x59, 0xe1, 0xb5, 0xe0, 0x67, 0xb1, 0x6a, 0x36, 0xa0, 0x44, 0x3d, 0x5f, 0xb4, 0x52, 0x41, 0xbc, 0x5c, 0x77, 0xc7, 0xae, 0x2a, 0x76, 0x54, 0xd7, 0x20},
- },
- { /* 43P */
- addYX: fp.Elt{0x58, 0xb7, 0x3b, 0xc7, 0x6f, 0xc3, 0x8f, 0x5e, 0x9a, 0xbb, 0x3c, 0x36, 0xa5, 0x43, 0xe5, 0xac, 0x22, 0xc9, 0x3b, 0x90, 0x7d, 0x4a, 0x93, 0xa9, 0x62, 0xec, 0xce, 0xf3, 0x46, 0x1e, 0x8f, 0x2b},
- subYX: fp.Elt{0x43, 0xf5, 0xb9, 0x35, 0xb1, 0xfe, 0x74, 0x9d, 0x6c, 0x95, 0x8c, 0xde, 0xf1, 0x7d, 0xb3, 0x84, 0xa9, 0x8b, 0x13, 0x57, 0x07, 0x2b, 0x32, 0xe9, 0xe1, 0x4c, 0x0b, 0x79, 0xa8, 0xad, 0xb8, 0x38},
- dt2: fp.Elt{0x5d, 0xf9, 0x51, 0xdf, 0x9c, 0x4a, 0xc0, 0xb5, 0xac, 0xde, 0x1f, 0xcb, 0xae, 0x52, 0x39, 0x2b, 0xda, 0x66, 0x8b, 0x32, 0x8b, 0x6d, 0x10, 0x1d, 0x53, 0x19, 0xba, 0xce, 0x32, 0xeb, 0x9a, 0x04},
- },
- { /* 45P */
- addYX: fp.Elt{0x31, 0x79, 0xfc, 0x75, 0x0b, 0x7d, 0x50, 0xaa, 0xd3, 0x25, 0x67, 0x7a, 0x4b, 0x92, 0xef, 0x0f, 0x30, 0x39, 0x6b, 0x39, 0x2b, 0x54, 0x82, 0x1d, 0xfc, 0x74, 0xf6, 0x30, 0x75, 0xe1, 0x5e, 0x79},
- subYX: fp.Elt{0x7e, 0xfe, 0xdc, 0x63, 0x3c, 0x7d, 0x76, 0xd7, 0x40, 0x6e, 0x85, 0x97, 0x48, 0x59, 0x9c, 0x20, 0x13, 0x7c, 0x4f, 0xe1, 0x61, 0x68, 0x67, 0xb6, 0xfc, 0x25, 0xd6, 0xc8, 0xe0, 0x65, 0xc6, 0x51},
- dt2: fp.Elt{0x81, 0xbd, 0xec, 0x52, 0x0a, 0x5b, 0x4a, 0x25, 0xe7, 0xaf, 0x34, 0xe0, 0x6e, 0x1f, 0x41, 0x5d, 0x31, 0x4a, 0xee, 0xca, 0x0d, 0x4d, 0xa2, 0xe6, 0x77, 0x44, 0xc5, 0x9d, 0xf4, 0x9b, 0xd1, 0x6c},
- },
- { /* 47P */
- addYX: fp.Elt{0x86, 0xc3, 0xaf, 0x65, 0x21, 0x61, 0xfe, 0x1f, 0x10, 0x1b, 0xd5, 0xb8, 0x88, 0x2a, 0x2a, 0x08, 0xaa, 0x0b, 0x99, 0x20, 0x7e, 0x62, 0xf6, 0x76, 0xe7, 0x43, 0x9e, 0x42, 0xa7, 0xb3, 0x01, 0x5e},
- subYX: fp.Elt{0xa3, 0x9c, 0x17, 0x52, 0x90, 0x61, 0x87, 0x7e, 0x85, 0x9f, 0x2c, 0x0b, 0x06, 0x0a, 0x1d, 0x57, 0x1e, 0x71, 0x99, 0x84, 0xa8, 0xba, 0xa2, 0x80, 0x38, 0xe6, 0xb2, 0x40, 0xdb, 0xf3, 0x20, 0x75},
- dt2: fp.Elt{0xa1, 0x57, 0x93, 0xd3, 0xe3, 0x0b, 0xb5, 0x3d, 0xa5, 0x94, 0x9e, 0x59, 0xdd, 0x6c, 0x7b, 0x96, 0x6e, 0x1e, 0x31, 0xdf, 0x64, 0x9a, 0x30, 0x1a, 0x86, 0xc9, 0xf3, 0xce, 0x9c, 0x2c, 0x09, 0x71},
- },
- { /* 49P */
- addYX: fp.Elt{0xcf, 0x1d, 0x05, 0x74, 0xac, 0xd8, 0x6b, 0x85, 0x1e, 0xaa, 0xb7, 0x55, 0x08, 0xa4, 0xf6, 0x03, 0xeb, 0x3c, 0x74, 0xc9, 0xcb, 0xe7, 0x4a, 0x3a, 0xde, 0xab, 0x37, 0x71, 0xbb, 0xa5, 0x73, 0x41},
- subYX: fp.Elt{0x8c, 0x91, 0x64, 0x03, 0x3f, 0x52, 0xd8, 0x53, 0x1c, 0x6b, 0xab, 0x3f, 0xf4, 0x04, 0xb4, 0xa2, 0xa4, 0xe5, 0x81, 0x66, 0x9e, 0x4a, 0x0b, 0x08, 0xa7, 0x7b, 0x25, 0xd0, 0x03, 0x5b, 0xa1, 0x0e},
- dt2: fp.Elt{0x8a, 0x21, 0xf9, 0xf0, 0x31, 0x6e, 0xc5, 0x17, 0x08, 0x47, 0xfc, 0x1a, 0x2b, 0x6e, 0x69, 0x5a, 0x76, 0xf1, 0xb2, 0xf4, 0x68, 0x16, 0x93, 0xf7, 0x67, 0x3a, 0x4e, 0x4a, 0x61, 0x65, 0xc5, 0x5f},
- },
- { /* 51P */
- addYX: fp.Elt{0x8e, 0x98, 0x90, 0x77, 0xe6, 0xe1, 0x92, 0x48, 0x22, 0xd7, 0x5c, 0x1c, 0x0f, 0x95, 0xd5, 0x01, 0xed, 0x3e, 0x92, 0xe5, 0x9a, 0x81, 0xb0, 0xe3, 0x1b, 0x65, 0x46, 0x9d, 0x40, 0xc7, 0x14, 0x32},
- subYX: fp.Elt{0xe5, 0x7a, 0x6d, 0xc4, 0x0d, 0x57, 0x6e, 0x13, 0x8f, 0xdc, 0xf8, 0x54, 0xcc, 0xaa, 0xd0, 0x0f, 0x86, 0xad, 0x0d, 0x31, 0x03, 0x9f, 0x54, 0x59, 0xa1, 0x4a, 0x45, 0x4c, 0x41, 0x1c, 0x71, 0x62},
- dt2: fp.Elt{0x70, 0x17, 0x65, 0x06, 0x74, 0x82, 0x29, 0x13, 0x36, 0x94, 0x27, 0x8a, 0x66, 0xa0, 0xa4, 0x3b, 0x3c, 0x22, 0x5d, 0x18, 0xec, 0xb8, 0xb6, 0xd9, 0x3c, 0x83, 0xcb, 0x3e, 0x07, 0x94, 0xea, 0x5b},
- },
- { /* 53P */
- addYX: fp.Elt{0xf8, 0xd2, 0x43, 0xf3, 0x63, 0xce, 0x70, 0xb4, 0xf1, 0xe8, 0x43, 0x05, 0x8f, 0xba, 0x67, 0x00, 0x6f, 0x7b, 0x11, 0xa2, 0xa1, 0x51, 0xda, 0x35, 0x2f, 0xbd, 0xf1, 0x44, 0x59, 0x78, 0xd0, 0x4a},
- subYX: fp.Elt{0xe4, 0x9b, 0xc8, 0x12, 0x09, 0xbf, 0x1d, 0x64, 0x9c, 0x57, 0x6e, 0x7d, 0x31, 0x8b, 0xf3, 0xac, 0x65, 0xb0, 0x97, 0xf6, 0x02, 0x9e, 0xfe, 0xab, 0xec, 0x1e, 0xf6, 0x48, 0xc1, 0xd5, 0xac, 0x3a},
- dt2: fp.Elt{0x01, 0x83, 0x31, 0xc3, 0x34, 0x3b, 0x8e, 0x85, 0x26, 0x68, 0x31, 0x07, 0x47, 0xc0, 0x99, 0xdc, 0x8c, 0xa8, 0x9d, 0xd3, 0x2e, 0x5b, 0x08, 0x34, 0x3d, 0x85, 0x02, 0xd9, 0xb1, 0x0c, 0xff, 0x3a},
- },
- { /* 55P */
- addYX: fp.Elt{0x05, 0x35, 0xc5, 0xf4, 0x0b, 0x43, 0x26, 0x92, 0x83, 0x22, 0x1f, 0x26, 0x13, 0x9c, 0xe4, 0x68, 0xc6, 0x27, 0xd3, 0x8f, 0x78, 0x33, 0xef, 0x09, 0x7f, 0x9e, 0xd9, 0x2b, 0x73, 0x9f, 0xcf, 0x2c},
- subYX: fp.Elt{0x5e, 0x40, 0x20, 0x3a, 0xeb, 0xc7, 0xc5, 0x87, 0xc9, 0x56, 0xad, 0xed, 0xef, 0x11, 0xe3, 0x8e, 0xf9, 0xd5, 0x29, 0xad, 0x48, 0x2e, 0x25, 0x29, 0x1d, 0x25, 0xcd, 0xf4, 0x86, 0x7e, 0x0e, 0x11},
- dt2: fp.Elt{0xe4, 0xf5, 0x03, 0xd6, 0x9e, 0xd8, 0xc0, 0x57, 0x0c, 0x20, 0xb0, 0xf0, 0x28, 0x86, 0x88, 0x12, 0xb7, 0x3b, 0x2e, 0xa0, 0x09, 0x27, 0x17, 0x53, 0x37, 0x3a, 0x69, 0xb9, 0xe0, 0x57, 0xc5, 0x05},
- },
- { /* 57P */
- addYX: fp.Elt{0xb0, 0x0e, 0xc2, 0x89, 0xb0, 0xbb, 0x76, 0xf7, 0x5c, 0xd8, 0x0f, 0xfa, 0xf6, 0x5b, 0xf8, 0x61, 0xfb, 0x21, 0x44, 0x63, 0x4e, 0x3f, 0xb9, 0xb6, 0x05, 0x12, 0x86, 0x41, 0x08, 0xef, 0x9f, 0x28},
- subYX: fp.Elt{0x6f, 0x7e, 0xc9, 0x1f, 0x31, 0xce, 0xf9, 0xd8, 0xae, 0xfd, 0xf9, 0x11, 0x30, 0x26, 0x3f, 0x7a, 0xdd, 0x25, 0xed, 0x8b, 0xa0, 0x7e, 0x5b, 0xe1, 0x5a, 0x87, 0xe9, 0x8f, 0x17, 0x4c, 0x15, 0x6e},
- dt2: fp.Elt{0xbf, 0x9a, 0xd6, 0xfe, 0x36, 0x63, 0x61, 0xcf, 0x4f, 0xc9, 0x35, 0x83, 0xe7, 0xe4, 0x16, 0x9b, 0xe7, 0x7f, 0x3a, 0x75, 0x65, 0x97, 0x78, 0x13, 0x19, 0xa3, 0x5c, 0xa9, 0x42, 0xf6, 0xfb, 0x6a},
- },
- { /* 59P */
- addYX: fp.Elt{0xcc, 0xa8, 0x13, 0xf9, 0x70, 0x50, 0xe5, 0x5d, 0x61, 0xf5, 0x0c, 0x2b, 0x7b, 0x16, 0x1d, 0x7d, 0x89, 0xd4, 0xea, 0x90, 0xb6, 0x56, 0x29, 0xda, 0xd9, 0x1e, 0x80, 0xdb, 0xce, 0x93, 0xc0, 0x12},
- subYX: fp.Elt{0xc1, 0xd2, 0xf5, 0x62, 0x0c, 0xde, 0xa8, 0x7d, 0x9a, 0x7b, 0x0e, 0xb0, 0xa4, 0x3d, 0xfc, 0x98, 0xe0, 0x70, 0xad, 0x0d, 0xda, 0x6a, 0xeb, 0x7d, 0xc4, 0x38, 0x50, 0xb9, 0x51, 0xb8, 0xb4, 0x0d},
- dt2: fp.Elt{0x0f, 0x19, 0xb8, 0x08, 0x93, 0x7f, 0x14, 0xfc, 0x10, 0xe3, 0x1a, 0xa1, 0xa0, 0x9d, 0x96, 0x06, 0xfd, 0xd7, 0xc7, 0xda, 0x72, 0x55, 0xe7, 0xce, 0xe6, 0x5c, 0x63, 0xc6, 0x99, 0x87, 0xaa, 0x33},
- },
- { /* 61P */
- addYX: fp.Elt{0xb1, 0x6c, 0x15, 0xfc, 0x88, 0xf5, 0x48, 0x83, 0x27, 0x6d, 0x0a, 0x1a, 0x9b, 0xba, 0xa2, 0x6d, 0xb6, 0x5a, 0xca, 0x87, 0x5c, 0x2d, 0x26, 0xe2, 0xa6, 0x89, 0xd5, 0xc8, 0xc1, 0xd0, 0x2c, 0x21},
- subYX: fp.Elt{0xf2, 0x5c, 0x08, 0xbd, 0x1e, 0xf5, 0x0f, 0xaf, 0x1f, 0x3f, 0xd3, 0x67, 0x89, 0x1a, 0xf5, 0x78, 0x3c, 0x03, 0x60, 0x50, 0xe1, 0xbf, 0xc2, 0x6e, 0x86, 0x1a, 0xe2, 0xe8, 0x29, 0x6f, 0x3c, 0x23},
- dt2: fp.Elt{0x81, 0xc7, 0x18, 0x7f, 0x10, 0xd5, 0xf4, 0xd2, 0x28, 0x9d, 0x7e, 0x52, 0xf2, 0xcd, 0x2e, 0x12, 0x41, 0x33, 0x3d, 0x3d, 0x2a, 0x86, 0x0a, 0xa7, 0xe3, 0x4c, 0x91, 0x11, 0x89, 0x77, 0xb7, 0x1d},
- },
- { /* 63P */
- addYX: fp.Elt{0xb6, 0x1a, 0x70, 0xdd, 0x69, 0x47, 0x39, 0xb3, 0xa5, 0x8d, 0xcf, 0x19, 0xd4, 0xde, 0xb8, 0xe2, 0x52, 0xc8, 0x2a, 0xfd, 0x61, 0x41, 0xdf, 0x15, 0xbe, 0x24, 0x7d, 0x01, 0x8a, 0xca, 0xe2, 0x7a},
- subYX: fp.Elt{0x6f, 0xc2, 0x6b, 0x7c, 0x39, 0x52, 0xf3, 0xdd, 0x13, 0x01, 0xd5, 0x53, 0xcc, 0xe2, 0x97, 0x7a, 0x30, 0xa3, 0x79, 0xbf, 0x3a, 0xf4, 0x74, 0x7c, 0xfc, 0xad, 0xe2, 0x26, 0xad, 0x97, 0xad, 0x31},
- dt2: fp.Elt{0x62, 0xb9, 0x20, 0x09, 0xed, 0x17, 0xe8, 0xb7, 0x9d, 0xda, 0x19, 0x3f, 0xcc, 0x18, 0x85, 0x1e, 0x64, 0x0a, 0x56, 0x25, 0x4f, 0xc1, 0x91, 0xe4, 0x83, 0x2c, 0x62, 0xa6, 0x53, 0xfc, 0xd1, 0x1e},
- },
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go b/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go
deleted file mode 100644
index 324bd8f3..00000000
--- a/vendor/github.com/cloudflare/circl/sign/ed448/ed448.go
+++ /dev/null
@@ -1,411 +0,0 @@
-// Package ed448 implements Ed448 signature scheme as described in RFC-8032.
-//
-// This package implements two signature variants.
-//
-// | Scheme Name | Sign Function | Verification | Context |
-// |-------------|-------------------|---------------|-------------------|
-// | Ed448 | Sign | Verify | Yes, can be empty |
-// | Ed448Ph | SignPh | VerifyPh | Yes, can be empty |
-// | All above | (PrivateKey).Sign | VerifyAny | As above |
-//
-// Specific functions for sign and verify are defined. A generic signing
-// function for all schemes is available through the crypto.Signer interface,
-// which is implemented by the PrivateKey type. A correspond all-in-one
-// verification method is provided by the VerifyAny function.
-//
-// Both schemes require a context string for domain separation. This parameter
-// is passed using a SignerOptions struct defined in this package.
-//
-// References:
-//
-// - RFC8032: https://rfc-editor.org/rfc/rfc8032.txt
-// - EdDSA for more curves: https://eprint.iacr.org/2015/677
-// - High-speed high-security signatures: https://doi.org/10.1007/s13389-012-0027-1
-package ed448
-
-import (
- "bytes"
- "crypto"
- cryptoRand "crypto/rand"
- "crypto/subtle"
- "errors"
- "fmt"
- "io"
- "strconv"
-
- "github.com/cloudflare/circl/ecc/goldilocks"
- "github.com/cloudflare/circl/internal/sha3"
- "github.com/cloudflare/circl/sign"
-)
-
-const (
- // ContextMaxSize is the maximum length (in bytes) allowed for context.
- ContextMaxSize = 255
- // PublicKeySize is the length in bytes of Ed448 public keys.
- PublicKeySize = 57
- // PrivateKeySize is the length in bytes of Ed448 private keys.
- PrivateKeySize = 114
- // SignatureSize is the length in bytes of signatures.
- SignatureSize = 114
- // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
- SeedSize = 57
-)
-
-const (
- paramB = 456 / 8 // Size of keys in bytes.
- hashSize = 2 * paramB // Size of the hash function's output.
-)
-
-// SignerOptions implements crypto.SignerOpts and augments with parameters
-// that are specific to the Ed448 signature schemes.
-type SignerOptions struct {
- // Hash must be crypto.Hash(0) for both Ed448 and Ed448Ph.
- crypto.Hash
-
- // Context is an optional domain separation string for signing.
- // Its length must be less or equal than 255 bytes.
- Context string
-
- // Scheme is an identifier for choosing a signature scheme.
- Scheme SchemeID
-}
-
-// SchemeID is an identifier for each signature scheme.
-type SchemeID uint
-
-const (
- ED448 SchemeID = iota
- ED448Ph
-)
-
-// PublicKey is the type of Ed448 public keys.
-type PublicKey []byte
-
-// Equal reports whether pub and x have the same value.
-func (pub PublicKey) Equal(x crypto.PublicKey) bool {
- xx, ok := x.(PublicKey)
- return ok && bytes.Equal(pub, xx)
-}
-
-// PrivateKey is the type of Ed448 private keys. It implements crypto.Signer.
-type PrivateKey []byte
-
-// Equal reports whether priv and x have the same value.
-func (priv PrivateKey) Equal(x crypto.PrivateKey) bool {
- xx, ok := x.(PrivateKey)
- return ok && subtle.ConstantTimeCompare(priv, xx) == 1
-}
-
-// Public returns the PublicKey corresponding to priv.
-func (priv PrivateKey) Public() crypto.PublicKey {
- publicKey := make([]byte, PublicKeySize)
- copy(publicKey, priv[SeedSize:])
- return PublicKey(publicKey)
-}
-
-// Seed returns the private key seed corresponding to priv. It is provided for
-// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds
-// in this package.
-func (priv PrivateKey) Seed() []byte {
- seed := make([]byte, SeedSize)
- copy(seed, priv[:SeedSize])
- return seed
-}
-
-func (priv PrivateKey) Scheme() sign.Scheme { return sch }
-
-func (pub PublicKey) Scheme() sign.Scheme { return sch }
-
-func (priv PrivateKey) MarshalBinary() (data []byte, err error) {
- privateKey := make(PrivateKey, PrivateKeySize)
- copy(privateKey, priv)
- return privateKey, nil
-}
-
-func (pub PublicKey) MarshalBinary() (data []byte, err error) {
- publicKey := make(PublicKey, PublicKeySize)
- copy(publicKey, pub)
- return publicKey, nil
-}
-
-// Sign creates a signature of a message given a key pair.
-// This function supports all the two signature variants defined in RFC-8032,
-// namely Ed448 (or pure EdDSA) and Ed448Ph.
-// The opts.HashFunc() must return zero to the specify Ed448 variant. This can
-// be achieved by passing crypto.Hash(0) as the value for opts.
-// Use an Options struct to pass a bool indicating that the ed448Ph variant
-// should be used.
-// The struct can also be optionally used to pass a context string for signing.
-func (priv PrivateKey) Sign(
- rand io.Reader,
- message []byte,
- opts crypto.SignerOpts,
-) (signature []byte, err error) {
- var ctx string
- var scheme SchemeID
-
- if o, ok := opts.(SignerOptions); ok {
- ctx = o.Context
- scheme = o.Scheme
- }
-
- switch true {
- case scheme == ED448 && opts.HashFunc() == crypto.Hash(0):
- return Sign(priv, message, ctx), nil
- case scheme == ED448Ph && opts.HashFunc() == crypto.Hash(0):
- return SignPh(priv, message, ctx), nil
- default:
- return nil, errors.New("ed448: bad hash algorithm")
- }
-}
-
-// GenerateKey generates a public/private key pair using entropy from rand.
-// If rand is nil, crypto/rand.Reader will be used.
-func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
- if rand == nil {
- rand = cryptoRand.Reader
- }
-
- seed := make(PrivateKey, SeedSize)
- if _, err := io.ReadFull(rand, seed); err != nil {
- return nil, nil, err
- }
-
- privateKey := NewKeyFromSeed(seed)
- publicKey := make([]byte, PublicKeySize)
- copy(publicKey, privateKey[SeedSize:])
-
- return publicKey, privateKey, nil
-}
-
-// NewKeyFromSeed calculates a private key from a seed. It will panic if
-// len(seed) is not SeedSize. This function is provided for interoperability
-// with RFC 8032. RFC 8032's private keys correspond to seeds in this
-// package.
-func NewKeyFromSeed(seed []byte) PrivateKey {
- privateKey := make([]byte, PrivateKeySize)
- newKeyFromSeed(privateKey, seed)
- return privateKey
-}
-
-func newKeyFromSeed(privateKey, seed []byte) {
- if l := len(seed); l != SeedSize {
- panic("ed448: bad seed length: " + strconv.Itoa(l))
- }
-
- var h [hashSize]byte
- H := sha3.NewShake256()
- _, _ = H.Write(seed)
- _, _ = H.Read(h[:])
- s := &goldilocks.Scalar{}
- deriveSecretScalar(s, h[:paramB])
-
- copy(privateKey[:SeedSize], seed)
- _ = goldilocks.Curve{}.ScalarBaseMult(s).ToBytes(privateKey[SeedSize:])
-}
-
-func signAll(signature []byte, privateKey PrivateKey, message, ctx []byte, preHash bool) {
- if len(ctx) > ContextMaxSize {
- panic(fmt.Errorf("ed448: bad context length: " + strconv.Itoa(len(ctx))))
- }
-
- H := sha3.NewShake256()
- var PHM []byte
-
- if preHash {
- var h [64]byte
- _, _ = H.Write(message)
- _, _ = H.Read(h[:])
- PHM = h[:]
- H.Reset()
- } else {
- PHM = message
- }
-
- // 1. Hash the 57-byte private key using SHAKE256(x, 114).
- var h [hashSize]byte
- _, _ = H.Write(privateKey[:SeedSize])
- _, _ = H.Read(h[:])
- s := &goldilocks.Scalar{}
- deriveSecretScalar(s, h[:paramB])
- prefix := h[paramB:]
-
- // 2. Compute SHAKE256(dom4(F, C) || prefix || PH(M), 114).
- var rPM [hashSize]byte
- H.Reset()
-
- writeDom(&H, ctx, preHash)
-
- _, _ = H.Write(prefix)
- _, _ = H.Write(PHM)
- _, _ = H.Read(rPM[:])
-
- // 3. Compute the point [r]B.
- r := &goldilocks.Scalar{}
- r.FromBytes(rPM[:])
- R := (&[paramB]byte{})[:]
- if err := (goldilocks.Curve{}.ScalarBaseMult(r).ToBytes(R)); err != nil {
- panic(err)
- }
- // 4. Compute SHAKE256(dom4(F, C) || R || A || PH(M), 114)
- var hRAM [hashSize]byte
- H.Reset()
-
- writeDom(&H, ctx, preHash)
-
- _, _ = H.Write(R)
- _, _ = H.Write(privateKey[SeedSize:])
- _, _ = H.Write(PHM)
- _, _ = H.Read(hRAM[:])
-
- // 5. Compute S = (r + k * s) mod order.
- k := &goldilocks.Scalar{}
- k.FromBytes(hRAM[:])
- S := &goldilocks.Scalar{}
- S.Mul(k, s)
- S.Add(S, r)
-
- // 6. The signature is the concatenation of R and S.
- copy(signature[:paramB], R[:])
- copy(signature[paramB:], S[:])
-}
-
-// Sign signs the message with privateKey and returns a signature.
-// This function supports the signature variant defined in RFC-8032: Ed448,
-// also known as the pure version of EdDSA.
-// It will panic if len(privateKey) is not PrivateKeySize.
-func Sign(priv PrivateKey, message []byte, ctx string) []byte {
- signature := make([]byte, SignatureSize)
- signAll(signature, priv, message, []byte(ctx), false)
- return signature
-}
-
-// SignPh creates a signature of a message given a keypair.
-// This function supports the signature variant defined in RFC-8032: Ed448ph,
-// meaning it internally hashes the message using SHAKE-256.
-// Context could be passed to this function, which length should be no more than
-// 255. It can be empty.
-func SignPh(priv PrivateKey, message []byte, ctx string) []byte {
- signature := make([]byte, SignatureSize)
- signAll(signature, priv, message, []byte(ctx), true)
- return signature
-}
-
-func verify(public PublicKey, message, signature, ctx []byte, preHash bool) bool {
- if len(public) != PublicKeySize ||
- len(signature) != SignatureSize ||
- len(ctx) > ContextMaxSize ||
- !isLessThanOrder(signature[paramB:]) {
- return false
- }
-
- P, err := goldilocks.FromBytes(public)
- if err != nil {
- return false
- }
-
- H := sha3.NewShake256()
- var PHM []byte
-
- if preHash {
- var h [64]byte
- _, _ = H.Write(message)
- _, _ = H.Read(h[:])
- PHM = h[:]
- H.Reset()
- } else {
- PHM = message
- }
-
- var hRAM [hashSize]byte
- R := signature[:paramB]
-
- writeDom(&H, ctx, preHash)
-
- _, _ = H.Write(R)
- _, _ = H.Write(public)
- _, _ = H.Write(PHM)
- _, _ = H.Read(hRAM[:])
-
- k := &goldilocks.Scalar{}
- k.FromBytes(hRAM[:])
- S := &goldilocks.Scalar{}
- S.FromBytes(signature[paramB:])
-
- encR := (&[paramB]byte{})[:]
- P.Neg()
- _ = goldilocks.Curve{}.CombinedMult(S, k, P).ToBytes(encR)
- return bytes.Equal(R, encR)
-}
-
-// VerifyAny returns true if the signature is valid. Failure cases are invalid
-// signature, or when the public key cannot be decoded.
-// This function supports all the two signature variants defined in RFC-8032,
-// namely Ed448 (or pure EdDSA) and Ed448Ph.
-// The opts.HashFunc() must return zero, this can be achieved by passing
-// crypto.Hash(0) as the value for opts.
-// Use a SignerOptions struct to pass a context string for signing.
-func VerifyAny(public PublicKey, message, signature []byte, opts crypto.SignerOpts) bool {
- var ctx string
- var scheme SchemeID
- if o, ok := opts.(SignerOptions); ok {
- ctx = o.Context
- scheme = o.Scheme
- }
-
- switch true {
- case scheme == ED448 && opts.HashFunc() == crypto.Hash(0):
- return Verify(public, message, signature, ctx)
- case scheme == ED448Ph && opts.HashFunc() == crypto.Hash(0):
- return VerifyPh(public, message, signature, ctx)
- default:
- return false
- }
-}
-
-// Verify returns true if the signature is valid. Failure cases are invalid
-// signature, or when the public key cannot be decoded.
-// This function supports the signature variant defined in RFC-8032: Ed448,
-// also known as the pure version of EdDSA.
-func Verify(public PublicKey, message, signature []byte, ctx string) bool {
- return verify(public, message, signature, []byte(ctx), false)
-}
-
-// VerifyPh returns true if the signature is valid. Failure cases are invalid
-// signature, or when the public key cannot be decoded.
-// This function supports the signature variant defined in RFC-8032: Ed448ph,
-// meaning it internally hashes the message using SHAKE-256.
-// Context could be passed to this function, which length should be no more than
-// 255. It can be empty.
-func VerifyPh(public PublicKey, message, signature []byte, ctx string) bool {
- return verify(public, message, signature, []byte(ctx), true)
-}
-
-func deriveSecretScalar(s *goldilocks.Scalar, h []byte) {
- h[0] &= 0xFC // The two least significant bits of the first octet are cleared,
- h[paramB-1] = 0x00 // all eight bits the last octet are cleared, and
- h[paramB-2] |= 0x80 // the highest bit of the second to last octet is set.
- s.FromBytes(h[:paramB])
-}
-
-// isLessThanOrder returns true if 0 <= x < order and if the last byte of x is zero.
-func isLessThanOrder(x []byte) bool {
- order := goldilocks.Curve{}.Order()
- i := len(order) - 1
- for i > 0 && x[i] == order[i] {
- i--
- }
- return x[paramB-1] == 0 && x[i] < order[i]
-}
-
-func writeDom(h io.Writer, ctx []byte, preHash bool) {
- dom4 := "SigEd448"
- _, _ = h.Write([]byte(dom4))
-
- if preHash {
- _, _ = h.Write([]byte{byte(0x01), byte(len(ctx))})
- } else {
- _, _ = h.Write([]byte{byte(0x00), byte(len(ctx))})
- }
- _, _ = h.Write(ctx)
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go b/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go
deleted file mode 100644
index 22da8bc0..00000000
--- a/vendor/github.com/cloudflare/circl/sign/ed448/signapi.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package ed448
-
-import (
- "crypto/rand"
- "encoding/asn1"
-
- "github.com/cloudflare/circl/sign"
-)
-
-var sch sign.Scheme = &scheme{}
-
-// Scheme returns a signature interface.
-func Scheme() sign.Scheme { return sch }
-
-type scheme struct{}
-
-func (*scheme) Name() string { return "Ed448" }
-func (*scheme) PublicKeySize() int { return PublicKeySize }
-func (*scheme) PrivateKeySize() int { return PrivateKeySize }
-func (*scheme) SignatureSize() int { return SignatureSize }
-func (*scheme) SeedSize() int { return SeedSize }
-func (*scheme) TLSIdentifier() uint { return 0x0808 }
-func (*scheme) SupportsContext() bool { return true }
-func (*scheme) Oid() asn1.ObjectIdentifier {
- return asn1.ObjectIdentifier{1, 3, 101, 113}
-}
-
-func (*scheme) GenerateKey() (sign.PublicKey, sign.PrivateKey, error) {
- return GenerateKey(rand.Reader)
-}
-
-func (*scheme) Sign(
- sk sign.PrivateKey,
- message []byte,
- opts *sign.SignatureOpts,
-) []byte {
- priv, ok := sk.(PrivateKey)
- if !ok {
- panic(sign.ErrTypeMismatch)
- }
- ctx := ""
- if opts != nil {
- ctx = opts.Context
- }
- return Sign(priv, message, ctx)
-}
-
-func (*scheme) Verify(
- pk sign.PublicKey,
- message, signature []byte,
- opts *sign.SignatureOpts,
-) bool {
- pub, ok := pk.(PublicKey)
- if !ok {
- panic(sign.ErrTypeMismatch)
- }
- ctx := ""
- if opts != nil {
- ctx = opts.Context
- }
- return Verify(pub, message, signature, ctx)
-}
-
-func (*scheme) DeriveKey(seed []byte) (sign.PublicKey, sign.PrivateKey) {
- privateKey := NewKeyFromSeed(seed)
- publicKey := make(PublicKey, PublicKeySize)
- copy(publicKey, privateKey[SeedSize:])
- return publicKey, privateKey
-}
-
-func (*scheme) UnmarshalBinaryPublicKey(buf []byte) (sign.PublicKey, error) {
- if len(buf) < PublicKeySize {
- return nil, sign.ErrPubKeySize
- }
- pub := make(PublicKey, PublicKeySize)
- copy(pub, buf[:PublicKeySize])
- return pub, nil
-}
-
-func (*scheme) UnmarshalBinaryPrivateKey(buf []byte) (sign.PrivateKey, error) {
- if len(buf) < PrivateKeySize {
- return nil, sign.ErrPrivKeySize
- }
- priv := make(PrivateKey, PrivateKeySize)
- copy(priv, buf[:PrivateKeySize])
- return priv, nil
-}
diff --git a/vendor/github.com/cloudflare/circl/sign/sign.go b/vendor/github.com/cloudflare/circl/sign/sign.go
deleted file mode 100644
index 13b20fa4..00000000
--- a/vendor/github.com/cloudflare/circl/sign/sign.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Package sign provides unified interfaces for signature schemes.
-//
-// A register of schemes is available in the package
-//
-// github.com/cloudflare/circl/sign/schemes
-package sign
-
-import (
- "crypto"
- "encoding"
- "errors"
-)
-
-type SignatureOpts struct {
- // If non-empty, includes the given context in the signature if supported
- // and will cause an error during signing otherwise.
- Context string
-}
-
-// A public key is used to verify a signature set by the corresponding private
-// key.
-type PublicKey interface {
- // Returns the signature scheme for this public key.
- Scheme() Scheme
- Equal(crypto.PublicKey) bool
- encoding.BinaryMarshaler
- crypto.PublicKey
-}
-
-// A private key allows one to create signatures.
-type PrivateKey interface {
- // Returns the signature scheme for this private key.
- Scheme() Scheme
- Equal(crypto.PrivateKey) bool
- // For compatibility with Go standard library
- crypto.Signer
- crypto.PrivateKey
- encoding.BinaryMarshaler
-}
-
-// A Scheme represents a specific instance of a signature scheme.
-type Scheme interface {
- // Name of the scheme.
- Name() string
-
- // GenerateKey creates a new key-pair.
- GenerateKey() (PublicKey, PrivateKey, error)
-
- // Creates a signature using the PrivateKey on the given message and
- // returns the signature. opts are additional options which can be nil.
- //
- // Panics if key is nil or wrong type or opts context is not supported.
- Sign(sk PrivateKey, message []byte, opts *SignatureOpts) []byte
-
- // Checks whether the given signature is a valid signature set by
- // the private key corresponding to the given public key on the
- // given message. opts are additional options which can be nil.
- //
- // Panics if key is nil or wrong type or opts context is not supported.
- Verify(pk PublicKey, message []byte, signature []byte, opts *SignatureOpts) bool
-
- // Deterministically derives a keypair from a seed. If you're unsure,
- // you're better off using GenerateKey().
- //
- // Panics if seed is not of length SeedSize().
- DeriveKey(seed []byte) (PublicKey, PrivateKey)
-
- // Unmarshals a PublicKey from the provided buffer.
- UnmarshalBinaryPublicKey([]byte) (PublicKey, error)
-
- // Unmarshals a PublicKey from the provided buffer.
- UnmarshalBinaryPrivateKey([]byte) (PrivateKey, error)
-
- // Size of binary marshalled public keys.
- PublicKeySize() int
-
- // Size of binary marshalled public keys.
- PrivateKeySize() int
-
- // Size of signatures.
- SignatureSize() int
-
- // Size of seeds.
- SeedSize() int
-
- // Returns whether contexts are supported.
- SupportsContext() bool
-}
-
-var (
- // ErrTypeMismatch is the error used if types of, for instance, private
- // and public keys don't match.
- ErrTypeMismatch = errors.New("types mismatch")
-
- // ErrSeedSize is the error used if the provided seed is of the wrong
- // size.
- ErrSeedSize = errors.New("wrong seed size")
-
- // ErrPubKeySize is the error used if the provided public key is of
- // the wrong size.
- ErrPubKeySize = errors.New("wrong size for public key")
-
- // ErrPrivKeySize is the error used if the provided private key is of
- // the wrong size.
- ErrPrivKeySize = errors.New("wrong size for private key")
-
- // ErrContextNotSupported is the error used if a context is not
- // supported.
- ErrContextNotSupported = errors.New("context not supported")
-)
diff --git a/vendor/github.com/creasty/defaults/.gitignore b/vendor/github.com/creasty/defaults/.gitignore
deleted file mode 100644
index e43b0f98..00000000
--- a/vendor/github.com/creasty/defaults/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-.DS_Store
diff --git a/vendor/github.com/creasty/defaults/LICENSE b/vendor/github.com/creasty/defaults/LICENSE
deleted file mode 100644
index 1483dd2d..00000000
--- a/vendor/github.com/creasty/defaults/LICENSE
+++ /dev/null
@@ -1,22 +0,0 @@
-Copyright (c) 2017-present Yuki Iwanaga
-
-MIT License
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/creasty/defaults/Makefile b/vendor/github.com/creasty/defaults/Makefile
deleted file mode 100644
index 404212a2..00000000
--- a/vendor/github.com/creasty/defaults/Makefile
+++ /dev/null
@@ -1,29 +0,0 @@
-SHELL := /bin/bash -eu -o pipefail
-
-GO_TEST_FLAGS := -v
-
-PACKAGE_DIRS := $(shell go list ./... 2> /dev/null | grep -v /vendor/)
-SRC_FILES := $(shell find . -name '*.go' -not -path './vendor/*')
-
-
-# Tasks
-#-----------------------------------------------
-.PHONY: lint
-lint:
- @gofmt -e -d -s $(SRC_FILES) | awk '{ e = 1; print $0 } END { if (e) exit(1) }'
- @golangci-lint --disable errcheck,unused run
-
-.PHONY: test
-test: lint
- @go test $(GO_TEST_FLAGS) $(PACKAGE_DIRS)
-
-.PHONY: ci-test
-ci-test: lint
- @echo > coverage.txt
- @for d in $(PACKAGE_DIRS); do \
- go test -coverprofile=profile.out -covermode=atomic -race -v $$d; \
- if [ -f profile.out ]; then \
- cat profile.out >> coverage.txt; \
- rm profile.out; \
- fi; \
- done
diff --git a/vendor/github.com/creasty/defaults/README.md b/vendor/github.com/creasty/defaults/README.md
deleted file mode 100644
index 0efdc448..00000000
--- a/vendor/github.com/creasty/defaults/README.md
+++ /dev/null
@@ -1,75 +0,0 @@
-defaults
-========
-
-[](https://circleci.com/gh/creasty/defaults/tree/master)
-[](https://codecov.io/gh/creasty/defaults)
-[](https://github.com/creasty/defaults/releases)
-[](./LICENSE)
-
-Initialize structs with default values
-
-- Supports almost all kind of types
- - Scalar types
- - `int/8/16/32/64`, `uint/8/16/32/64`, `float32/64`
- - `uintptr`, `bool`, `string`
- - Complex types
- - `map`, `slice`, `struct`
- - Nested types
- - `map[K1]map[K2]Struct`, `[]map[K1]Struct[]`
- - Aliased types
- - `time.Duration`
- - e.g., `type Enum string`
- - Pointer types
- - e.g., `*SampleStruct`, `*int`
-- Recursively initializes fields in a struct
-- Dynamically sets default values by [`defaults.Setter`](./setter.go) interface
-- Preserves non-initial values from being reset with a default value
-
-
-Usage
------
-
-```go
-type Gender string
-
-type Sample struct {
- Name string `default:"John Smith"`
- Age int `default:"27"`
- Gender Gender `default:"m"`
-
- Slice []string `default:"[]"`
- SliceByJSON []int `default:"[1, 2, 3]"` // Supports JSON
-
- Map map[string]int `default:"{}"`
- MapByJSON map[string]int `default:"{\"foo\": 123}"`
- MapOfStruct map[string]OtherStruct
- MapOfPtrStruct map[string]*OtherStruct
- MapOfStructWithTag map[string]OtherStruct `default:"{\"Key1\": {\"Foo\":123}}"`
-
- Struct OtherStruct `default:"{}"`
- StructPtr *OtherStruct `default:"{\"Foo\": 123}"`
-
- NoTag OtherStruct // Recurses into a nested struct by default
- OptOut OtherStruct `default:"-"` // Opt-out
-}
-
-type OtherStruct struct {
- Hello string `default:"world"` // Tags in a nested struct also work
- Foo int `default:"-"`
- Random int `default:"-"`
-}
-
-// SetDefaults implements defaults.Setter interface
-func (s *OtherStruct) SetDefaults() {
- if defaults.CanUpdate(s.Random) { // Check if it's a zero value (recommended)
- s.Random = rand.Int() // Set a dynamic value
- }
-}
-```
-
-```go
-obj := &Sample{}
-if err := defaults.Set(obj); err != nil {
- panic(err)
-}
-```
diff --git a/vendor/github.com/creasty/defaults/defaults.go b/vendor/github.com/creasty/defaults/defaults.go
deleted file mode 100644
index b5e7eb9c..00000000
--- a/vendor/github.com/creasty/defaults/defaults.go
+++ /dev/null
@@ -1,244 +0,0 @@
-package defaults
-
-import (
- "encoding"
- "encoding/json"
- "errors"
- "reflect"
- "strconv"
- "time"
-)
-
-var (
- errInvalidType = errors.New("not a struct pointer")
-)
-
-const (
- fieldName = "default"
-)
-
-// Set initializes members in a struct referenced by a pointer.
-// Maps and slices are initialized by `make` and other primitive types are set with default values.
-// `ptr` should be a struct pointer
-func Set(ptr interface{}) error {
- if reflect.TypeOf(ptr).Kind() != reflect.Ptr {
- return errInvalidType
- }
-
- v := reflect.ValueOf(ptr).Elem()
- t := v.Type()
-
- if t.Kind() != reflect.Struct {
- return errInvalidType
- }
-
- for i := 0; i < t.NumField(); i++ {
- if defaultVal := t.Field(i).Tag.Get(fieldName); defaultVal != "-" {
- if err := setField(v.Field(i), defaultVal); err != nil {
- return err
- }
- }
- }
- callSetter(ptr)
- return nil
-}
-
-// MustSet function is a wrapper of Set function
-// It will call Set and panic if err not equals nil.
-func MustSet(ptr interface{}) {
- if err := Set(ptr); err != nil {
- panic(err)
- }
-}
-
-func setField(field reflect.Value, defaultVal string) error {
- if !field.CanSet() {
- return nil
- }
-
- if !shouldInitializeField(field, defaultVal) {
- return nil
- }
-
- isInitial := isInitialValue(field)
- if isInitial {
- if unmarshalByInterface(field, defaultVal) {
- return nil
- }
-
- switch field.Kind() {
- case reflect.Bool:
- if val, err := strconv.ParseBool(defaultVal); err == nil {
- field.Set(reflect.ValueOf(val).Convert(field.Type()))
- }
- case reflect.Int:
- if val, err := strconv.ParseInt(defaultVal, 0, strconv.IntSize); err == nil {
- field.Set(reflect.ValueOf(int(val)).Convert(field.Type()))
- }
- case reflect.Int8:
- if val, err := strconv.ParseInt(defaultVal, 0, 8); err == nil {
- field.Set(reflect.ValueOf(int8(val)).Convert(field.Type()))
- }
- case reflect.Int16:
- if val, err := strconv.ParseInt(defaultVal, 0, 16); err == nil {
- field.Set(reflect.ValueOf(int16(val)).Convert(field.Type()))
- }
- case reflect.Int32:
- if val, err := strconv.ParseInt(defaultVal, 0, 32); err == nil {
- field.Set(reflect.ValueOf(int32(val)).Convert(field.Type()))
- }
- case reflect.Int64:
- if val, err := time.ParseDuration(defaultVal); err == nil {
- field.Set(reflect.ValueOf(val).Convert(field.Type()))
- } else if val, err := strconv.ParseInt(defaultVal, 0, 64); err == nil {
- field.Set(reflect.ValueOf(val).Convert(field.Type()))
- }
- case reflect.Uint:
- if val, err := strconv.ParseUint(defaultVal, 0, strconv.IntSize); err == nil {
- field.Set(reflect.ValueOf(uint(val)).Convert(field.Type()))
- }
- case reflect.Uint8:
- if val, err := strconv.ParseUint(defaultVal, 0, 8); err == nil {
- field.Set(reflect.ValueOf(uint8(val)).Convert(field.Type()))
- }
- case reflect.Uint16:
- if val, err := strconv.ParseUint(defaultVal, 0, 16); err == nil {
- field.Set(reflect.ValueOf(uint16(val)).Convert(field.Type()))
- }
- case reflect.Uint32:
- if val, err := strconv.ParseUint(defaultVal, 0, 32); err == nil {
- field.Set(reflect.ValueOf(uint32(val)).Convert(field.Type()))
- }
- case reflect.Uint64:
- if val, err := strconv.ParseUint(defaultVal, 0, 64); err == nil {
- field.Set(reflect.ValueOf(val).Convert(field.Type()))
- }
- case reflect.Uintptr:
- if val, err := strconv.ParseUint(defaultVal, 0, strconv.IntSize); err == nil {
- field.Set(reflect.ValueOf(uintptr(val)).Convert(field.Type()))
- }
- case reflect.Float32:
- if val, err := strconv.ParseFloat(defaultVal, 32); err == nil {
- field.Set(reflect.ValueOf(float32(val)).Convert(field.Type()))
- }
- case reflect.Float64:
- if val, err := strconv.ParseFloat(defaultVal, 64); err == nil {
- field.Set(reflect.ValueOf(val).Convert(field.Type()))
- }
- case reflect.String:
- field.Set(reflect.ValueOf(defaultVal).Convert(field.Type()))
-
- case reflect.Slice:
- ref := reflect.New(field.Type())
- ref.Elem().Set(reflect.MakeSlice(field.Type(), 0, 0))
- if defaultVal != "" && defaultVal != "[]" {
- if err := json.Unmarshal([]byte(defaultVal), ref.Interface()); err != nil {
- return err
- }
- }
- field.Set(ref.Elem().Convert(field.Type()))
- case reflect.Map:
- ref := reflect.New(field.Type())
- ref.Elem().Set(reflect.MakeMap(field.Type()))
- if defaultVal != "" && defaultVal != "{}" {
- if err := json.Unmarshal([]byte(defaultVal), ref.Interface()); err != nil {
- return err
- }
- }
- field.Set(ref.Elem().Convert(field.Type()))
- case reflect.Struct:
- if defaultVal != "" && defaultVal != "{}" {
- if err := json.Unmarshal([]byte(defaultVal), field.Addr().Interface()); err != nil {
- return err
- }
- }
- case reflect.Ptr:
- field.Set(reflect.New(field.Type().Elem()))
- }
- }
-
- switch field.Kind() {
- case reflect.Ptr:
- if isInitial || field.Elem().Kind() == reflect.Struct {
- setField(field.Elem(), defaultVal)
- callSetter(field.Interface())
- }
- case reflect.Struct:
- if err := Set(field.Addr().Interface()); err != nil {
- return err
- }
- case reflect.Slice:
- for j := 0; j < field.Len(); j++ {
- if err := setField(field.Index(j), defaultVal); err != nil {
- return err
- }
- }
- case reflect.Map:
- for _, e := range field.MapKeys() {
- var v = field.MapIndex(e)
-
- switch v.Kind() {
- case reflect.Ptr:
- switch v.Elem().Kind() {
- case reflect.Struct, reflect.Slice, reflect.Map:
- if err := setField(v.Elem(), ""); err != nil {
- return err
- }
- }
- case reflect.Struct, reflect.Slice, reflect.Map:
- ref := reflect.New(v.Type())
- ref.Elem().Set(v)
- if err := setField(ref.Elem(), ""); err != nil {
- return err
- }
- field.SetMapIndex(e, ref.Elem().Convert(v.Type()))
- }
- }
- }
-
- return nil
-}
-
-func unmarshalByInterface(field reflect.Value, defaultVal string) bool {
- asText, ok := field.Addr().Interface().(encoding.TextUnmarshaler)
- if ok && defaultVal != "" {
- // if field implements encode.TextUnmarshaler, try to use it before decode by kind
- if err := asText.UnmarshalText([]byte(defaultVal)); err == nil {
- return true
- }
- }
- asJSON, ok := field.Addr().Interface().(json.Unmarshaler)
- if ok && defaultVal != "" && defaultVal != "{}" && defaultVal != "[]" {
- // if field implements json.Unmarshaler, try to use it before decode by kind
- if err := asJSON.UnmarshalJSON([]byte(defaultVal)); err == nil {
- return true
- }
- }
- return false
-}
-
-func isInitialValue(field reflect.Value) bool {
- return reflect.DeepEqual(reflect.Zero(field.Type()).Interface(), field.Interface())
-}
-
-func shouldInitializeField(field reflect.Value, tag string) bool {
- switch field.Kind() {
- case reflect.Struct:
- return true
- case reflect.Ptr:
- if !field.IsNil() && field.Elem().Kind() == reflect.Struct {
- return true
- }
- case reflect.Slice:
- return field.Len() > 0 || tag != ""
- case reflect.Map:
- return field.Len() > 0 || tag != ""
- }
-
- return tag != ""
-}
-
-// CanUpdate returns true when the given value is an initial value of its type
-func CanUpdate(v interface{}) bool {
- return isInitialValue(reflect.ValueOf(v))
-}
diff --git a/vendor/github.com/creasty/defaults/setter.go b/vendor/github.com/creasty/defaults/setter.go
deleted file mode 100644
index 1f64aa65..00000000
--- a/vendor/github.com/creasty/defaults/setter.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package defaults
-
-// Setter is an interface for setting default values
-type Setter interface {
- SetDefaults()
-}
-
-func callSetter(v interface{}) {
- if ds, ok := v.(Setter); ok {
- ds.SetDefaults()
- }
-}
diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md
deleted file mode 100644
index 25fdaf63..00000000
--- a/vendor/github.com/fatih/color/LICENSE.md
+++ /dev/null
@@ -1,20 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013 Fatih Arslan
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md
deleted file mode 100644
index be82827c..00000000
--- a/vendor/github.com/fatih/color/README.md
+++ /dev/null
@@ -1,176 +0,0 @@
-# color [](https://github.com/fatih/color/actions) [](https://pkg.go.dev/github.com/fatih/color)
-
-Color lets you use colorized outputs in terms of [ANSI Escape
-Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It
-has support for Windows too! The API can be used in several ways, pick one that
-suits you.
-
-
-
-## Install
-
-```bash
-go get github.com/fatih/color
-```
-
-## Examples
-
-### Standard colors
-
-```go
-// Print with default helper functions
-color.Cyan("Prints text in cyan.")
-
-// A newline will be appended automatically
-color.Blue("Prints %s in blue.", "text")
-
-// These are using the default foreground colors
-color.Red("We have red")
-color.Magenta("And many others ..")
-
-```
-
-### Mix and reuse colors
-
-```go
-// Create a new color object
-c := color.New(color.FgCyan).Add(color.Underline)
-c.Println("Prints cyan text with an underline.")
-
-// Or just add them to New()
-d := color.New(color.FgCyan, color.Bold)
-d.Printf("This prints bold cyan %s\n", "too!.")
-
-// Mix up foreground and background colors, create new mixes!
-red := color.New(color.FgRed)
-
-boldRed := red.Add(color.Bold)
-boldRed.Println("This will print text in bold red.")
-
-whiteBackground := red.Add(color.BgWhite)
-whiteBackground.Println("Red text with white background.")
-```
-
-### Use your own output (io.Writer)
-
-```go
-// Use your own io.Writer output
-color.New(color.FgBlue).Fprintln(myWriter, "blue color!")
-
-blue := color.New(color.FgBlue)
-blue.Fprint(writer, "This will print text in blue.")
-```
-
-### Custom print functions (PrintFunc)
-
-```go
-// Create a custom print function for convenience
-red := color.New(color.FgRed).PrintfFunc()
-red("Warning")
-red("Error: %s", err)
-
-// Mix up multiple attributes
-notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
-notice("Don't forget this...")
-```
-
-### Custom fprint functions (FprintFunc)
-
-```go
-blue := color.New(color.FgBlue).FprintfFunc()
-blue(myWriter, "important notice: %s", stars)
-
-// Mix up with multiple attributes
-success := color.New(color.Bold, color.FgGreen).FprintlnFunc()
-success(myWriter, "Don't forget this...")
-```
-
-### Insert into noncolor strings (SprintFunc)
-
-```go
-// Create SprintXxx functions to mix strings with other non-colorized strings:
-yellow := color.New(color.FgYellow).SprintFunc()
-red := color.New(color.FgRed).SprintFunc()
-fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error"))
-
-info := color.New(color.FgWhite, color.BgGreen).SprintFunc()
-fmt.Printf("This %s rocks!\n", info("package"))
-
-// Use helper functions
-fmt.Println("This", color.RedString("warning"), "should be not neglected.")
-fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.")
-
-// Windows supported too! Just don't forget to change the output to color.Output
-fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
-```
-
-### Plug into existing code
-
-```go
-// Use handy standard colors
-color.Set(color.FgYellow)
-
-fmt.Println("Existing text will now be in yellow")
-fmt.Printf("This one %s\n", "too")
-
-color.Unset() // Don't forget to unset
-
-// You can mix up parameters
-color.Set(color.FgMagenta, color.Bold)
-defer color.Unset() // Use it in your function
-
-fmt.Println("All text will now be bold magenta.")
-```
-
-### Disable/Enable color
-
-There might be a case where you want to explicitly disable/enable color output. the
-`go-isatty` package will automatically disable color output for non-tty output streams
-(for example if the output were piped directly to `less`).
-
-The `color` package also disables color output if the [`NO_COLOR`](https://no-color.org) environment
-variable is set to a non-empty string.
-
-`Color` has support to disable/enable colors programmatically both globally and
-for single color definitions. For example suppose you have a CLI app and a
-`-no-color` bool flag. You can easily disable the color output with:
-
-```go
-var flagNoColor = flag.Bool("no-color", false, "Disable color output")
-
-if *flagNoColor {
- color.NoColor = true // disables colorized output
-}
-```
-
-It also has support for single color definitions (local). You can
-disable/enable color output on the fly:
-
-```go
-c := color.New(color.FgCyan)
-c.Println("Prints cyan text")
-
-c.DisableColor()
-c.Println("This is printed without any color")
-
-c.EnableColor()
-c.Println("This prints again cyan...")
-```
-
-## GitHub Actions
-
-To output color in GitHub Actions (or other CI systems that support ANSI colors), make sure to set `color.NoColor = false` so that it bypasses the check for non-tty output streams.
-
-## Todo
-
-* Save/Return previous values
-* Evaluate fmt.Formatter interface
-
-## Credits
-
-* [Fatih Arslan](https://github.com/fatih)
-* Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable)
-
-## License
-
-The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details
diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go
deleted file mode 100644
index 81094e87..00000000
--- a/vendor/github.com/fatih/color/color.go
+++ /dev/null
@@ -1,655 +0,0 @@
-package color
-
-import (
- "fmt"
- "io"
- "os"
- "strconv"
- "strings"
- "sync"
-
- "github.com/mattn/go-colorable"
- "github.com/mattn/go-isatty"
-)
-
-var (
- // NoColor defines if the output is colorized or not. It's dynamically set to
- // false or true based on the stdout's file descriptor referring to a terminal
- // or not. It's also set to true if the NO_COLOR environment variable is
- // set (regardless of its value). This is a global option and affects all
- // colors. For more control over each color block use the methods
- // DisableColor() individually.
- NoColor = noColorIsSet() || os.Getenv("TERM") == "dumb" ||
- (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd()))
-
- // Output defines the standard output of the print functions. By default,
- // os.Stdout is used.
- Output = colorable.NewColorableStdout()
-
- // Error defines a color supporting writer for os.Stderr.
- Error = colorable.NewColorableStderr()
-
- // colorsCache is used to reduce the count of created Color objects and
- // allows to reuse already created objects with required Attribute.
- colorsCache = make(map[Attribute]*Color)
- colorsCacheMu sync.Mutex // protects colorsCache
-)
-
-// noColorIsSet returns true if the environment variable NO_COLOR is set to a non-empty string.
-func noColorIsSet() bool {
- return os.Getenv("NO_COLOR") != ""
-}
-
-// Color defines a custom color object which is defined by SGR parameters.
-type Color struct {
- params []Attribute
- noColor *bool
-}
-
-// Attribute defines a single SGR Code
-type Attribute int
-
-const escape = "\x1b"
-
-// Base attributes
-const (
- Reset Attribute = iota
- Bold
- Faint
- Italic
- Underline
- BlinkSlow
- BlinkRapid
- ReverseVideo
- Concealed
- CrossedOut
-)
-
-const (
- ResetBold Attribute = iota + 22
- ResetItalic
- ResetUnderline
- ResetBlinking
- _
- ResetReversed
- ResetConcealed
- ResetCrossedOut
-)
-
-var mapResetAttributes map[Attribute]Attribute = map[Attribute]Attribute{
- Bold: ResetBold,
- Faint: ResetBold,
- Italic: ResetItalic,
- Underline: ResetUnderline,
- BlinkSlow: ResetBlinking,
- BlinkRapid: ResetBlinking,
- ReverseVideo: ResetReversed,
- Concealed: ResetConcealed,
- CrossedOut: ResetCrossedOut,
-}
-
-// Foreground text colors
-const (
- FgBlack Attribute = iota + 30
- FgRed
- FgGreen
- FgYellow
- FgBlue
- FgMagenta
- FgCyan
- FgWhite
-)
-
-// Foreground Hi-Intensity text colors
-const (
- FgHiBlack Attribute = iota + 90
- FgHiRed
- FgHiGreen
- FgHiYellow
- FgHiBlue
- FgHiMagenta
- FgHiCyan
- FgHiWhite
-)
-
-// Background text colors
-const (
- BgBlack Attribute = iota + 40
- BgRed
- BgGreen
- BgYellow
- BgBlue
- BgMagenta
- BgCyan
- BgWhite
-)
-
-// Background Hi-Intensity text colors
-const (
- BgHiBlack Attribute = iota + 100
- BgHiRed
- BgHiGreen
- BgHiYellow
- BgHiBlue
- BgHiMagenta
- BgHiCyan
- BgHiWhite
-)
-
-// New returns a newly created color object.
-func New(value ...Attribute) *Color {
- c := &Color{
- params: make([]Attribute, 0),
- }
-
- if noColorIsSet() {
- c.noColor = boolPtr(true)
- }
-
- c.Add(value...)
- return c
-}
-
-// Set sets the given parameters immediately. It will change the color of
-// output with the given SGR parameters until color.Unset() is called.
-func Set(p ...Attribute) *Color {
- c := New(p...)
- c.Set()
- return c
-}
-
-// Unset resets all escape attributes and clears the output. Usually should
-// be called after Set().
-func Unset() {
- if NoColor {
- return
- }
-
- fmt.Fprintf(Output, "%s[%dm", escape, Reset)
-}
-
-// Set sets the SGR sequence.
-func (c *Color) Set() *Color {
- if c.isNoColorSet() {
- return c
- }
-
- fmt.Fprint(Output, c.format())
- return c
-}
-
-func (c *Color) unset() {
- if c.isNoColorSet() {
- return
- }
-
- Unset()
-}
-
-// SetWriter is used to set the SGR sequence with the given io.Writer. This is
-// a low-level function, and users should use the higher-level functions, such
-// as color.Fprint, color.Print, etc.
-func (c *Color) SetWriter(w io.Writer) *Color {
- if c.isNoColorSet() {
- return c
- }
-
- fmt.Fprint(w, c.format())
- return c
-}
-
-// UnsetWriter resets all escape attributes and clears the output with the give
-// io.Writer. Usually should be called after SetWriter().
-func (c *Color) UnsetWriter(w io.Writer) {
- if c.isNoColorSet() {
- return
- }
-
- if NoColor {
- return
- }
-
- fmt.Fprintf(w, "%s[%dm", escape, Reset)
-}
-
-// Add is used to chain SGR parameters. Use as many as parameters to combine
-// and create custom color objects. Example: Add(color.FgRed, color.Underline).
-func (c *Color) Add(value ...Attribute) *Color {
- c.params = append(c.params, value...)
- return c
-}
-
-// Fprint formats using the default formats for its operands and writes to w.
-// Spaces are added between operands when neither is a string.
-// It returns the number of bytes written and any write error encountered.
-// On Windows, users should wrap w with colorable.NewColorable() if w is of
-// type *os.File.
-func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
- c.SetWriter(w)
- defer c.UnsetWriter(w)
-
- return fmt.Fprint(w, a...)
-}
-
-// Print formats using the default formats for its operands and writes to
-// standard output. Spaces are added between operands when neither is a
-// string. It returns the number of bytes written and any write error
-// encountered. This is the standard fmt.Print() method wrapped with the given
-// color.
-func (c *Color) Print(a ...interface{}) (n int, err error) {
- c.Set()
- defer c.unset()
-
- return fmt.Fprint(Output, a...)
-}
-
-// Fprintf formats according to a format specifier and writes to w.
-// It returns the number of bytes written and any write error encountered.
-// On Windows, users should wrap w with colorable.NewColorable() if w is of
-// type *os.File.
-func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
- c.SetWriter(w)
- defer c.UnsetWriter(w)
-
- return fmt.Fprintf(w, format, a...)
-}
-
-// Printf formats according to a format specifier and writes to standard output.
-// It returns the number of bytes written and any write error encountered.
-// This is the standard fmt.Printf() method wrapped with the given color.
-func (c *Color) Printf(format string, a ...interface{}) (n int, err error) {
- c.Set()
- defer c.unset()
-
- return fmt.Fprintf(Output, format, a...)
-}
-
-// Fprintln formats using the default formats for its operands and writes to w.
-// Spaces are always added between operands and a newline is appended.
-// On Windows, users should wrap w with colorable.NewColorable() if w is of
-// type *os.File.
-func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
- return fmt.Fprintln(w, c.wrap(sprintln(a...)))
-}
-
-// Println formats using the default formats for its operands and writes to
-// standard output. Spaces are always added between operands and a newline is
-// appended. It returns the number of bytes written and any write error
-// encountered. This is the standard fmt.Print() method wrapped with the given
-// color.
-func (c *Color) Println(a ...interface{}) (n int, err error) {
- return fmt.Fprintln(Output, c.wrap(sprintln(a...)))
-}
-
-// Sprint is just like Print, but returns a string instead of printing it.
-func (c *Color) Sprint(a ...interface{}) string {
- return c.wrap(fmt.Sprint(a...))
-}
-
-// Sprintln is just like Println, but returns a string instead of printing it.
-func (c *Color) Sprintln(a ...interface{}) string {
- return c.wrap(sprintln(a...)) + "\n"
-}
-
-// Sprintf is just like Printf, but returns a string instead of printing it.
-func (c *Color) Sprintf(format string, a ...interface{}) string {
- return c.wrap(fmt.Sprintf(format, a...))
-}
-
-// FprintFunc returns a new function that prints the passed arguments as
-// colorized with color.Fprint().
-func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) {
- return func(w io.Writer, a ...interface{}) {
- c.Fprint(w, a...)
- }
-}
-
-// PrintFunc returns a new function that prints the passed arguments as
-// colorized with color.Print().
-func (c *Color) PrintFunc() func(a ...interface{}) {
- return func(a ...interface{}) {
- c.Print(a...)
- }
-}
-
-// FprintfFunc returns a new function that prints the passed arguments as
-// colorized with color.Fprintf().
-func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) {
- return func(w io.Writer, format string, a ...interface{}) {
- c.Fprintf(w, format, a...)
- }
-}
-
-// PrintfFunc returns a new function that prints the passed arguments as
-// colorized with color.Printf().
-func (c *Color) PrintfFunc() func(format string, a ...interface{}) {
- return func(format string, a ...interface{}) {
- c.Printf(format, a...)
- }
-}
-
-// FprintlnFunc returns a new function that prints the passed arguments as
-// colorized with color.Fprintln().
-func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) {
- return func(w io.Writer, a ...interface{}) {
- c.Fprintln(w, a...)
- }
-}
-
-// PrintlnFunc returns a new function that prints the passed arguments as
-// colorized with color.Println().
-func (c *Color) PrintlnFunc() func(a ...interface{}) {
- return func(a ...interface{}) {
- c.Println(a...)
- }
-}
-
-// SprintFunc returns a new function that returns colorized strings for the
-// given arguments with fmt.Sprint(). Useful to put into or mix into other
-// string. Windows users should use this in conjunction with color.Output, example:
-//
-// put := New(FgYellow).SprintFunc()
-// fmt.Fprintf(color.Output, "This is a %s", put("warning"))
-func (c *Color) SprintFunc() func(a ...interface{}) string {
- return func(a ...interface{}) string {
- return c.wrap(fmt.Sprint(a...))
- }
-}
-
-// SprintfFunc returns a new function that returns colorized strings for the
-// given arguments with fmt.Sprintf(). Useful to put into or mix into other
-// string. Windows users should use this in conjunction with color.Output.
-func (c *Color) SprintfFunc() func(format string, a ...interface{}) string {
- return func(format string, a ...interface{}) string {
- return c.wrap(fmt.Sprintf(format, a...))
- }
-}
-
-// SprintlnFunc returns a new function that returns colorized strings for the
-// given arguments with fmt.Sprintln(). Useful to put into or mix into other
-// string. Windows users should use this in conjunction with color.Output.
-func (c *Color) SprintlnFunc() func(a ...interface{}) string {
- return func(a ...interface{}) string {
- return c.wrap(sprintln(a...)) + "\n"
- }
-}
-
-// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m"
-// an example output might be: "1;36" -> bold cyan
-func (c *Color) sequence() string {
- format := make([]string, len(c.params))
- for i, v := range c.params {
- format[i] = strconv.Itoa(int(v))
- }
-
- return strings.Join(format, ";")
-}
-
-// wrap wraps the s string with the colors attributes. The string is ready to
-// be printed.
-func (c *Color) wrap(s string) string {
- if c.isNoColorSet() {
- return s
- }
-
- return c.format() + s + c.unformat()
-}
-
-func (c *Color) format() string {
- return fmt.Sprintf("%s[%sm", escape, c.sequence())
-}
-
-func (c *Color) unformat() string {
- //return fmt.Sprintf("%s[%dm", escape, Reset)
- //for each element in sequence let's use the speficic reset escape, ou the generic one if not found
- format := make([]string, len(c.params))
- for i, v := range c.params {
- format[i] = strconv.Itoa(int(Reset))
- ra, ok := mapResetAttributes[v]
- if ok {
- format[i] = strconv.Itoa(int(ra))
- }
- }
-
- return fmt.Sprintf("%s[%sm", escape, strings.Join(format, ";"))
-}
-
-// DisableColor disables the color output. Useful to not change any existing
-// code and still being able to output. Can be used for flags like
-// "--no-color". To enable back use EnableColor() method.
-func (c *Color) DisableColor() {
- c.noColor = boolPtr(true)
-}
-
-// EnableColor enables the color output. Use it in conjunction with
-// DisableColor(). Otherwise, this method has no side effects.
-func (c *Color) EnableColor() {
- c.noColor = boolPtr(false)
-}
-
-func (c *Color) isNoColorSet() bool {
- // check first if we have user set action
- if c.noColor != nil {
- return *c.noColor
- }
-
- // if not return the global option, which is disabled by default
- return NoColor
-}
-
-// Equals returns a boolean value indicating whether two colors are equal.
-func (c *Color) Equals(c2 *Color) bool {
- if c == nil && c2 == nil {
- return true
- }
- if c == nil || c2 == nil {
- return false
- }
- if len(c.params) != len(c2.params) {
- return false
- }
-
- for _, attr := range c.params {
- if !c2.attrExists(attr) {
- return false
- }
- }
-
- return true
-}
-
-func (c *Color) attrExists(a Attribute) bool {
- for _, attr := range c.params {
- if attr == a {
- return true
- }
- }
-
- return false
-}
-
-func boolPtr(v bool) *bool {
- return &v
-}
-
-func getCachedColor(p Attribute) *Color {
- colorsCacheMu.Lock()
- defer colorsCacheMu.Unlock()
-
- c, ok := colorsCache[p]
- if !ok {
- c = New(p)
- colorsCache[p] = c
- }
-
- return c
-}
-
-func colorPrint(format string, p Attribute, a ...interface{}) {
- c := getCachedColor(p)
-
- if !strings.HasSuffix(format, "\n") {
- format += "\n"
- }
-
- if len(a) == 0 {
- c.Print(format)
- } else {
- c.Printf(format, a...)
- }
-}
-
-func colorString(format string, p Attribute, a ...interface{}) string {
- c := getCachedColor(p)
-
- if len(a) == 0 {
- return c.SprintFunc()(format)
- }
-
- return c.SprintfFunc()(format, a...)
-}
-
-// Black is a convenient helper function to print with black foreground. A
-// newline is appended to format by default.
-func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) }
-
-// Red is a convenient helper function to print with red foreground. A
-// newline is appended to format by default.
-func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) }
-
-// Green is a convenient helper function to print with green foreground. A
-// newline is appended to format by default.
-func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) }
-
-// Yellow is a convenient helper function to print with yellow foreground.
-// A newline is appended to format by default.
-func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) }
-
-// Blue is a convenient helper function to print with blue foreground. A
-// newline is appended to format by default.
-func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) }
-
-// Magenta is a convenient helper function to print with magenta foreground.
-// A newline is appended to format by default.
-func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) }
-
-// Cyan is a convenient helper function to print with cyan foreground. A
-// newline is appended to format by default.
-func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) }
-
-// White is a convenient helper function to print with white foreground. A
-// newline is appended to format by default.
-func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) }
-
-// BlackString is a convenient helper function to return a string with black
-// foreground.
-func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) }
-
-// RedString is a convenient helper function to return a string with red
-// foreground.
-func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) }
-
-// GreenString is a convenient helper function to return a string with green
-// foreground.
-func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) }
-
-// YellowString is a convenient helper function to return a string with yellow
-// foreground.
-func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) }
-
-// BlueString is a convenient helper function to return a string with blue
-// foreground.
-func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) }
-
-// MagentaString is a convenient helper function to return a string with magenta
-// foreground.
-func MagentaString(format string, a ...interface{}) string {
- return colorString(format, FgMagenta, a...)
-}
-
-// CyanString is a convenient helper function to return a string with cyan
-// foreground.
-func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) }
-
-// WhiteString is a convenient helper function to return a string with white
-// foreground.
-func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) }
-
-// HiBlack is a convenient helper function to print with hi-intensity black foreground. A
-// newline is appended to format by default.
-func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) }
-
-// HiRed is a convenient helper function to print with hi-intensity red foreground. A
-// newline is appended to format by default.
-func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) }
-
-// HiGreen is a convenient helper function to print with hi-intensity green foreground. A
-// newline is appended to format by default.
-func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) }
-
-// HiYellow is a convenient helper function to print with hi-intensity yellow foreground.
-// A newline is appended to format by default.
-func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) }
-
-// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A
-// newline is appended to format by default.
-func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) }
-
-// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground.
-// A newline is appended to format by default.
-func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) }
-
-// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A
-// newline is appended to format by default.
-func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) }
-
-// HiWhite is a convenient helper function to print with hi-intensity white foreground. A
-// newline is appended to format by default.
-func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) }
-
-// HiBlackString is a convenient helper function to return a string with hi-intensity black
-// foreground.
-func HiBlackString(format string, a ...interface{}) string {
- return colorString(format, FgHiBlack, a...)
-}
-
-// HiRedString is a convenient helper function to return a string with hi-intensity red
-// foreground.
-func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) }
-
-// HiGreenString is a convenient helper function to return a string with hi-intensity green
-// foreground.
-func HiGreenString(format string, a ...interface{}) string {
- return colorString(format, FgHiGreen, a...)
-}
-
-// HiYellowString is a convenient helper function to return a string with hi-intensity yellow
-// foreground.
-func HiYellowString(format string, a ...interface{}) string {
- return colorString(format, FgHiYellow, a...)
-}
-
-// HiBlueString is a convenient helper function to return a string with hi-intensity blue
-// foreground.
-func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) }
-
-// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta
-// foreground.
-func HiMagentaString(format string, a ...interface{}) string {
- return colorString(format, FgHiMagenta, a...)
-}
-
-// HiCyanString is a convenient helper function to return a string with hi-intensity cyan
-// foreground.
-func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) }
-
-// HiWhiteString is a convenient helper function to return a string with hi-intensity white
-// foreground.
-func HiWhiteString(format string, a ...interface{}) string {
- return colorString(format, FgHiWhite, a...)
-}
-
-// sprintln is a helper function to format a string with fmt.Sprintln and trim the trailing newline.
-func sprintln(a ...interface{}) string {
- return strings.TrimSuffix(fmt.Sprintln(a...), "\n")
-}
diff --git a/vendor/github.com/fatih/color/color_windows.go b/vendor/github.com/fatih/color/color_windows.go
deleted file mode 100644
index be01c558..00000000
--- a/vendor/github.com/fatih/color/color_windows.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package color
-
-import (
- "os"
-
- "golang.org/x/sys/windows"
-)
-
-func init() {
- // Opt-in for ansi color support for current process.
- // https://learn.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences#output-sequences
- var outMode uint32
- out := windows.Handle(os.Stdout.Fd())
- if err := windows.GetConsoleMode(out, &outMode); err != nil {
- return
- }
- outMode |= windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
- _ = windows.SetConsoleMode(out, outMode)
-}
diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go
deleted file mode 100644
index 9491ad54..00000000
--- a/vendor/github.com/fatih/color/doc.go
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
-Package color is an ANSI color package to output colorized or SGR defined
-output to the standard output. The API can be used in several way, pick one
-that suits you.
-
-Use simple and default helper functions with predefined foreground colors:
-
- color.Cyan("Prints text in cyan.")
-
- // a newline will be appended automatically
- color.Blue("Prints %s in blue.", "text")
-
- // More default foreground colors..
- color.Red("We have red")
- color.Yellow("Yellow color too!")
- color.Magenta("And many others ..")
-
- // Hi-intensity colors
- color.HiGreen("Bright green color.")
- color.HiBlack("Bright black means gray..")
- color.HiWhite("Shiny white color!")
-
-However, there are times when custom color mixes are required. Below are some
-examples to create custom color objects and use the print functions of each
-separate color object.
-
- // Create a new color object
- c := color.New(color.FgCyan).Add(color.Underline)
- c.Println("Prints cyan text with an underline.")
-
- // Or just add them to New()
- d := color.New(color.FgCyan, color.Bold)
- d.Printf("This prints bold cyan %s\n", "too!.")
-
-
- // Mix up foreground and background colors, create new mixes!
- red := color.New(color.FgRed)
-
- boldRed := red.Add(color.Bold)
- boldRed.Println("This will print text in bold red.")
-
- whiteBackground := red.Add(color.BgWhite)
- whiteBackground.Println("Red text with White background.")
-
- // Use your own io.Writer output
- color.New(color.FgBlue).Fprintln(myWriter, "blue color!")
-
- blue := color.New(color.FgBlue)
- blue.Fprint(myWriter, "This will print text in blue.")
-
-You can create PrintXxx functions to simplify even more:
-
- // Create a custom print function for convenient
- red := color.New(color.FgRed).PrintfFunc()
- red("warning")
- red("error: %s", err)
-
- // Mix up multiple attributes
- notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
- notice("don't forget this...")
-
-You can also FprintXxx functions to pass your own io.Writer:
-
- blue := color.New(FgBlue).FprintfFunc()
- blue(myWriter, "important notice: %s", stars)
-
- // Mix up with multiple attributes
- success := color.New(color.Bold, color.FgGreen).FprintlnFunc()
- success(myWriter, don't forget this...")
-
-Or create SprintXxx functions to mix strings with other non-colorized strings:
-
- yellow := New(FgYellow).SprintFunc()
- red := New(FgRed).SprintFunc()
-
- fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error"))
-
- info := New(FgWhite, BgGreen).SprintFunc()
- fmt.Printf("this %s rocks!\n", info("package"))
-
-Windows support is enabled by default. All Print functions work as intended.
-However, only for color.SprintXXX functions, user should use fmt.FprintXXX and
-set the output to color.Output:
-
- fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
-
- info := New(FgWhite, BgGreen).SprintFunc()
- fmt.Fprintf(color.Output, "this %s rocks!\n", info("package"))
-
-Using with existing code is possible. Just use the Set() method to set the
-standard output to the given parameters. That way a rewrite of an existing
-code is not required.
-
- // Use handy standard colors.
- color.Set(color.FgYellow)
-
- fmt.Println("Existing text will be now in Yellow")
- fmt.Printf("This one %s\n", "too")
-
- color.Unset() // don't forget to unset
-
- // You can mix up parameters
- color.Set(color.FgMagenta, color.Bold)
- defer color.Unset() // use it in your function
-
- fmt.Println("All text will be now bold magenta.")
-
-There might be a case where you want to disable color output (for example to
-pipe the standard output of your app to somewhere else). `Color` has support to
-disable colors both globally and for single color definition. For example
-suppose you have a CLI app and a `--no-color` bool flag. You can easily disable
-the color output with:
-
- var flagNoColor = flag.Bool("no-color", false, "Disable color output")
-
- if *flagNoColor {
- color.NoColor = true // disables colorized output
- }
-
-You can also disable the color by setting the NO_COLOR environment variable to any value.
-
-It also has support for single color definitions (local). You can
-disable/enable color output on the fly:
-
- c := color.New(color.FgCyan)
- c.Println("Prints cyan text")
-
- c.DisableColor()
- c.Println("This is printed without any color")
-
- c.EnableColor()
- c.Println("This prints again cyan...")
-*/
-package color
diff --git a/vendor/github.com/felixge/httpsnoop/.gitignore b/vendor/github.com/felixge/httpsnoop/.gitignore
deleted file mode 100644
index e69de29b..00000000
diff --git a/vendor/github.com/felixge/httpsnoop/LICENSE.txt b/vendor/github.com/felixge/httpsnoop/LICENSE.txt
deleted file mode 100644
index e028b46a..00000000
--- a/vendor/github.com/felixge/httpsnoop/LICENSE.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com)
-
- Permission is hereby granted, free of charge, to any person obtaining a copy
- of this software and associated documentation files (the "Software"), to deal
- in the Software without restriction, including without limitation the rights
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the Software is
- furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in
- all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- THE SOFTWARE.
diff --git a/vendor/github.com/felixge/httpsnoop/Makefile b/vendor/github.com/felixge/httpsnoop/Makefile
deleted file mode 100644
index 4e12afdd..00000000
--- a/vendor/github.com/felixge/httpsnoop/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-.PHONY: ci generate clean
-
-ci: clean generate
- go test -race -v ./...
-
-generate:
- go generate .
-
-clean:
- rm -rf *_generated*.go
diff --git a/vendor/github.com/felixge/httpsnoop/README.md b/vendor/github.com/felixge/httpsnoop/README.md
deleted file mode 100644
index cf6b42f3..00000000
--- a/vendor/github.com/felixge/httpsnoop/README.md
+++ /dev/null
@@ -1,95 +0,0 @@
-# httpsnoop
-
-Package httpsnoop provides an easy way to capture http related metrics (i.e.
-response time, bytes written, and http status code) from your application's
-http.Handlers.
-
-Doing this requires non-trivial wrapping of the http.ResponseWriter interface,
-which is also exposed for users interested in a more low-level API.
-
-[](https://pkg.go.dev/github.com/felixge/httpsnoop)
-[](https://github.com/felixge/httpsnoop/actions/workflows/main.yaml)
-
-## Usage Example
-
-```go
-// myH is your app's http handler, perhaps a http.ServeMux or similar.
-var myH http.Handler
-// wrappedH wraps myH in order to log every request.
-wrappedH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- m := httpsnoop.CaptureMetrics(myH, w, r)
- log.Printf(
- "%s %s (code=%d dt=%s written=%d)",
- r.Method,
- r.URL,
- m.Code,
- m.Duration,
- m.Written,
- )
-})
-http.ListenAndServe(":8080", wrappedH)
-```
-
-## Why this package exists
-
-Instrumenting an application's http.Handler is surprisingly difficult.
-
-However if you google for e.g. "capture ResponseWriter status code" you'll find
-lots of advise and code examples that suggest it to be a fairly trivial
-undertaking. Unfortunately everything I've seen so far has a high chance of
-breaking your application.
-
-The main problem is that a `http.ResponseWriter` often implements additional
-interfaces such as `http.Flusher`, `http.CloseNotifier`, `http.Hijacker`, `http.Pusher`, and
-`io.ReaderFrom`. So the naive approach of just wrapping `http.ResponseWriter`
-in your own struct that also implements the `http.ResponseWriter` interface
-will hide the additional interfaces mentioned above. This has a high change of
-introducing subtle bugs into any non-trivial application.
-
-Another approach I've seen people take is to return a struct that implements
-all of the interfaces above. However, that's also problematic, because it's
-difficult to fake some of these interfaces behaviors when the underlying
-`http.ResponseWriter` doesn't have an implementation. It's also dangerous,
-because an application may choose to operate differently, merely because it
-detects the presence of these additional interfaces.
-
-This package solves this problem by checking which additional interfaces a
-`http.ResponseWriter` implements, returning a wrapped version implementing the
-exact same set of interfaces.
-
-Additionally this package properly handles edge cases such as `WriteHeader` not
-being called, or called more than once, as well as concurrent calls to
-`http.ResponseWriter` methods, and even calls happening after the wrapped
-`ServeHTTP` has already returned.
-
-Unfortunately this package is not perfect either. It's possible that it is
-still missing some interfaces provided by the go core (let me know if you find
-one), and it won't work for applications adding their own interfaces into the
-mix. You can however use `httpsnoop.Unwrap(w)` to access the underlying
-`http.ResponseWriter` and type-assert the result to its other interfaces.
-
-However, hopefully the explanation above has sufficiently scared you of rolling
-your own solution to this problem. httpsnoop may still break your application,
-but at least it tries to avoid it as much as possible.
-
-Anyway, the real problem here is that smuggling additional interfaces inside
-`http.ResponseWriter` is a problematic design choice, but it probably goes as
-deep as the Go language specification itself. But that's okay, I still prefer
-Go over the alternatives ;).
-
-## Performance
-
-```
-BenchmarkBaseline-8 20000 94912 ns/op
-BenchmarkCaptureMetrics-8 20000 95461 ns/op
-```
-
-As you can see, using `CaptureMetrics` on a vanilla http.Handler introduces an
-overhead of ~500 ns per http request on my machine. However, the margin of
-error appears to be larger than that, therefor it should be reasonable to
-assume that the overhead introduced by `CaptureMetrics` is absolutely
-negligible.
-
-## License
-
-MIT
diff --git a/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/vendor/github.com/felixge/httpsnoop/capture_metrics.go
deleted file mode 100644
index bec7b71b..00000000
--- a/vendor/github.com/felixge/httpsnoop/capture_metrics.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package httpsnoop
-
-import (
- "io"
- "net/http"
- "time"
-)
-
-// Metrics holds metrics captured from CaptureMetrics.
-type Metrics struct {
- // Code is the first http response code passed to the WriteHeader func of
- // the ResponseWriter. If no such call is made, a default code of 200 is
- // assumed instead.
- Code int
- // Duration is the time it took to execute the handler.
- Duration time.Duration
- // Written is the number of bytes successfully written by the Write or
- // ReadFrom function of the ResponseWriter. ResponseWriters may also write
- // data to their underlaying connection directly (e.g. headers), but those
- // are not tracked. Therefor the number of Written bytes will usually match
- // the size of the response body.
- Written int64
-}
-
-// CaptureMetrics wraps the given hnd, executes it with the given w and r, and
-// returns the metrics it captured from it.
-func CaptureMetrics(hnd http.Handler, w http.ResponseWriter, r *http.Request) Metrics {
- return CaptureMetricsFn(w, func(ww http.ResponseWriter) {
- hnd.ServeHTTP(ww, r)
- })
-}
-
-// CaptureMetricsFn wraps w and calls fn with the wrapped w and returns the
-// resulting metrics. This is very similar to CaptureMetrics (which is just
-// sugar on top of this func), but is a more usable interface if your
-// application doesn't use the Go http.Handler interface.
-func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics {
- m := Metrics{Code: http.StatusOK}
- m.CaptureMetrics(w, fn)
- return m
-}
-
-// CaptureMetrics wraps w and calls fn with the wrapped w and updates
-// Metrics m with the resulting metrics. This is similar to CaptureMetricsFn,
-// but allows one to customize starting Metrics object.
-func (m *Metrics) CaptureMetrics(w http.ResponseWriter, fn func(http.ResponseWriter)) {
- var (
- start = time.Now()
- headerWritten bool
- hooks = Hooks{
- WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc {
- return func(code int) {
- next(code)
-
- if !(code >= 100 && code <= 199) && !headerWritten {
- m.Code = code
- headerWritten = true
- }
- }
- },
-
- Write: func(next WriteFunc) WriteFunc {
- return func(p []byte) (int, error) {
- n, err := next(p)
-
- m.Written += int64(n)
- headerWritten = true
- return n, err
- }
- },
-
- ReadFrom: func(next ReadFromFunc) ReadFromFunc {
- return func(src io.Reader) (int64, error) {
- n, err := next(src)
-
- headerWritten = true
- m.Written += n
- return n, err
- }
- },
- }
- )
-
- fn(Wrap(w, hooks))
- m.Duration += time.Since(start)
-}
diff --git a/vendor/github.com/felixge/httpsnoop/docs.go b/vendor/github.com/felixge/httpsnoop/docs.go
deleted file mode 100644
index 203c35b3..00000000
--- a/vendor/github.com/felixge/httpsnoop/docs.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Package httpsnoop provides an easy way to capture http related metrics (i.e.
-// response time, bytes written, and http status code) from your application's
-// http.Handlers.
-//
-// Doing this requires non-trivial wrapping of the http.ResponseWriter
-// interface, which is also exposed for users interested in a more low-level
-// API.
-package httpsnoop
-
-//go:generate go run codegen/main.go
diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
deleted file mode 100644
index 101cedde..00000000
--- a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go
+++ /dev/null
@@ -1,436 +0,0 @@
-// +build go1.8
-// Code generated by "httpsnoop/codegen"; DO NOT EDIT.
-
-package httpsnoop
-
-import (
- "bufio"
- "io"
- "net"
- "net/http"
-)
-
-// HeaderFunc is part of the http.ResponseWriter interface.
-type HeaderFunc func() http.Header
-
-// WriteHeaderFunc is part of the http.ResponseWriter interface.
-type WriteHeaderFunc func(code int)
-
-// WriteFunc is part of the http.ResponseWriter interface.
-type WriteFunc func(b []byte) (int, error)
-
-// FlushFunc is part of the http.Flusher interface.
-type FlushFunc func()
-
-// CloseNotifyFunc is part of the http.CloseNotifier interface.
-type CloseNotifyFunc func() <-chan bool
-
-// HijackFunc is part of the http.Hijacker interface.
-type HijackFunc func() (net.Conn, *bufio.ReadWriter, error)
-
-// ReadFromFunc is part of the io.ReaderFrom interface.
-type ReadFromFunc func(src io.Reader) (int64, error)
-
-// PushFunc is part of the http.Pusher interface.
-type PushFunc func(target string, opts *http.PushOptions) error
-
-// Hooks defines a set of method interceptors for methods included in
-// http.ResponseWriter as well as some others. You can think of them as
-// middleware for the function calls they target. See Wrap for more details.
-type Hooks struct {
- Header func(HeaderFunc) HeaderFunc
- WriteHeader func(WriteHeaderFunc) WriteHeaderFunc
- Write func(WriteFunc) WriteFunc
- Flush func(FlushFunc) FlushFunc
- CloseNotify func(CloseNotifyFunc) CloseNotifyFunc
- Hijack func(HijackFunc) HijackFunc
- ReadFrom func(ReadFromFunc) ReadFromFunc
- Push func(PushFunc) PushFunc
-}
-
-// Wrap returns a wrapped version of w that provides the exact same interface
-// as w. Specifically if w implements any combination of:
-//
-// - http.Flusher
-// - http.CloseNotifier
-// - http.Hijacker
-// - io.ReaderFrom
-// - http.Pusher
-//
-// The wrapped version will implement the exact same combination. If no hooks
-// are set, the wrapped version also behaves exactly as w. Hooks targeting
-// methods not supported by w are ignored. Any other hooks will intercept the
-// method they target and may modify the call's arguments and/or return values.
-// The CaptureMetrics implementation serves as a working example for how the
-// hooks can be used.
-func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter {
- rw := &rw{w: w, h: hooks}
- _, i0 := w.(http.Flusher)
- _, i1 := w.(http.CloseNotifier)
- _, i2 := w.(http.Hijacker)
- _, i3 := w.(io.ReaderFrom)
- _, i4 := w.(http.Pusher)
- switch {
- // combination 1/32
- case !i0 && !i1 && !i2 && !i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- }{rw, rw}
- // combination 2/32
- case !i0 && !i1 && !i2 && !i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Pusher
- }{rw, rw, rw}
- // combination 3/32
- case !i0 && !i1 && !i2 && i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- io.ReaderFrom
- }{rw, rw, rw}
- // combination 4/32
- case !i0 && !i1 && !i2 && i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- io.ReaderFrom
- http.Pusher
- }{rw, rw, rw, rw}
- // combination 5/32
- case !i0 && !i1 && i2 && !i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Hijacker
- }{rw, rw, rw}
- // combination 6/32
- case !i0 && !i1 && i2 && !i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Hijacker
- http.Pusher
- }{rw, rw, rw, rw}
- // combination 7/32
- case !i0 && !i1 && i2 && i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Hijacker
- io.ReaderFrom
- }{rw, rw, rw, rw}
- // combination 8/32
- case !i0 && !i1 && i2 && i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Hijacker
- io.ReaderFrom
- http.Pusher
- }{rw, rw, rw, rw, rw}
- // combination 9/32
- case !i0 && i1 && !i2 && !i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- }{rw, rw, rw}
- // combination 10/32
- case !i0 && i1 && !i2 && !i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- http.Pusher
- }{rw, rw, rw, rw}
- // combination 11/32
- case !i0 && i1 && !i2 && i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- io.ReaderFrom
- }{rw, rw, rw, rw}
- // combination 12/32
- case !i0 && i1 && !i2 && i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- io.ReaderFrom
- http.Pusher
- }{rw, rw, rw, rw, rw}
- // combination 13/32
- case !i0 && i1 && i2 && !i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- http.Hijacker
- }{rw, rw, rw, rw}
- // combination 14/32
- case !i0 && i1 && i2 && !i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- http.Hijacker
- http.Pusher
- }{rw, rw, rw, rw, rw}
- // combination 15/32
- case !i0 && i1 && i2 && i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- http.Hijacker
- io.ReaderFrom
- }{rw, rw, rw, rw, rw}
- // combination 16/32
- case !i0 && i1 && i2 && i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- http.Hijacker
- io.ReaderFrom
- http.Pusher
- }{rw, rw, rw, rw, rw, rw}
- // combination 17/32
- case i0 && !i1 && !i2 && !i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- }{rw, rw, rw}
- // combination 18/32
- case i0 && !i1 && !i2 && !i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.Pusher
- }{rw, rw, rw, rw}
- // combination 19/32
- case i0 && !i1 && !i2 && i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- io.ReaderFrom
- }{rw, rw, rw, rw}
- // combination 20/32
- case i0 && !i1 && !i2 && i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- io.ReaderFrom
- http.Pusher
- }{rw, rw, rw, rw, rw}
- // combination 21/32
- case i0 && !i1 && i2 && !i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.Hijacker
- }{rw, rw, rw, rw}
- // combination 22/32
- case i0 && !i1 && i2 && !i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.Hijacker
- http.Pusher
- }{rw, rw, rw, rw, rw}
- // combination 23/32
- case i0 && !i1 && i2 && i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.Hijacker
- io.ReaderFrom
- }{rw, rw, rw, rw, rw}
- // combination 24/32
- case i0 && !i1 && i2 && i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.Hijacker
- io.ReaderFrom
- http.Pusher
- }{rw, rw, rw, rw, rw, rw}
- // combination 25/32
- case i0 && i1 && !i2 && !i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- }{rw, rw, rw, rw}
- // combination 26/32
- case i0 && i1 && !i2 && !i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- http.Pusher
- }{rw, rw, rw, rw, rw}
- // combination 27/32
- case i0 && i1 && !i2 && i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- io.ReaderFrom
- }{rw, rw, rw, rw, rw}
- // combination 28/32
- case i0 && i1 && !i2 && i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- io.ReaderFrom
- http.Pusher
- }{rw, rw, rw, rw, rw, rw}
- // combination 29/32
- case i0 && i1 && i2 && !i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- http.Hijacker
- }{rw, rw, rw, rw, rw}
- // combination 30/32
- case i0 && i1 && i2 && !i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- http.Hijacker
- http.Pusher
- }{rw, rw, rw, rw, rw, rw}
- // combination 31/32
- case i0 && i1 && i2 && i3 && !i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- http.Hijacker
- io.ReaderFrom
- }{rw, rw, rw, rw, rw, rw}
- // combination 32/32
- case i0 && i1 && i2 && i3 && i4:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- http.Hijacker
- io.ReaderFrom
- http.Pusher
- }{rw, rw, rw, rw, rw, rw, rw}
- }
- panic("unreachable")
-}
-
-type rw struct {
- w http.ResponseWriter
- h Hooks
-}
-
-func (w *rw) Unwrap() http.ResponseWriter {
- return w.w
-}
-
-func (w *rw) Header() http.Header {
- f := w.w.(http.ResponseWriter).Header
- if w.h.Header != nil {
- f = w.h.Header(f)
- }
- return f()
-}
-
-func (w *rw) WriteHeader(code int) {
- f := w.w.(http.ResponseWriter).WriteHeader
- if w.h.WriteHeader != nil {
- f = w.h.WriteHeader(f)
- }
- f(code)
-}
-
-func (w *rw) Write(b []byte) (int, error) {
- f := w.w.(http.ResponseWriter).Write
- if w.h.Write != nil {
- f = w.h.Write(f)
- }
- return f(b)
-}
-
-func (w *rw) Flush() {
- f := w.w.(http.Flusher).Flush
- if w.h.Flush != nil {
- f = w.h.Flush(f)
- }
- f()
-}
-
-func (w *rw) CloseNotify() <-chan bool {
- f := w.w.(http.CloseNotifier).CloseNotify
- if w.h.CloseNotify != nil {
- f = w.h.CloseNotify(f)
- }
- return f()
-}
-
-func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) {
- f := w.w.(http.Hijacker).Hijack
- if w.h.Hijack != nil {
- f = w.h.Hijack(f)
- }
- return f()
-}
-
-func (w *rw) ReadFrom(src io.Reader) (int64, error) {
- f := w.w.(io.ReaderFrom).ReadFrom
- if w.h.ReadFrom != nil {
- f = w.h.ReadFrom(f)
- }
- return f(src)
-}
-
-func (w *rw) Push(target string, opts *http.PushOptions) error {
- f := w.w.(http.Pusher).Push
- if w.h.Push != nil {
- f = w.h.Push(f)
- }
- return f(target, opts)
-}
-
-type Unwrapper interface {
- Unwrap() http.ResponseWriter
-}
-
-// Unwrap returns the underlying http.ResponseWriter from within zero or more
-// layers of httpsnoop wrappers.
-func Unwrap(w http.ResponseWriter) http.ResponseWriter {
- if rw, ok := w.(Unwrapper); ok {
- // recurse until rw.Unwrap() returns a non-Unwrapper
- return Unwrap(rw.Unwrap())
- } else {
- return w
- }
-}
diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
deleted file mode 100644
index e0951df1..00000000
--- a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go
+++ /dev/null
@@ -1,278 +0,0 @@
-// +build !go1.8
-// Code generated by "httpsnoop/codegen"; DO NOT EDIT.
-
-package httpsnoop
-
-import (
- "bufio"
- "io"
- "net"
- "net/http"
-)
-
-// HeaderFunc is part of the http.ResponseWriter interface.
-type HeaderFunc func() http.Header
-
-// WriteHeaderFunc is part of the http.ResponseWriter interface.
-type WriteHeaderFunc func(code int)
-
-// WriteFunc is part of the http.ResponseWriter interface.
-type WriteFunc func(b []byte) (int, error)
-
-// FlushFunc is part of the http.Flusher interface.
-type FlushFunc func()
-
-// CloseNotifyFunc is part of the http.CloseNotifier interface.
-type CloseNotifyFunc func() <-chan bool
-
-// HijackFunc is part of the http.Hijacker interface.
-type HijackFunc func() (net.Conn, *bufio.ReadWriter, error)
-
-// ReadFromFunc is part of the io.ReaderFrom interface.
-type ReadFromFunc func(src io.Reader) (int64, error)
-
-// Hooks defines a set of method interceptors for methods included in
-// http.ResponseWriter as well as some others. You can think of them as
-// middleware for the function calls they target. See Wrap for more details.
-type Hooks struct {
- Header func(HeaderFunc) HeaderFunc
- WriteHeader func(WriteHeaderFunc) WriteHeaderFunc
- Write func(WriteFunc) WriteFunc
- Flush func(FlushFunc) FlushFunc
- CloseNotify func(CloseNotifyFunc) CloseNotifyFunc
- Hijack func(HijackFunc) HijackFunc
- ReadFrom func(ReadFromFunc) ReadFromFunc
-}
-
-// Wrap returns a wrapped version of w that provides the exact same interface
-// as w. Specifically if w implements any combination of:
-//
-// - http.Flusher
-// - http.CloseNotifier
-// - http.Hijacker
-// - io.ReaderFrom
-//
-// The wrapped version will implement the exact same combination. If no hooks
-// are set, the wrapped version also behaves exactly as w. Hooks targeting
-// methods not supported by w are ignored. Any other hooks will intercept the
-// method they target and may modify the call's arguments and/or return values.
-// The CaptureMetrics implementation serves as a working example for how the
-// hooks can be used.
-func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter {
- rw := &rw{w: w, h: hooks}
- _, i0 := w.(http.Flusher)
- _, i1 := w.(http.CloseNotifier)
- _, i2 := w.(http.Hijacker)
- _, i3 := w.(io.ReaderFrom)
- switch {
- // combination 1/16
- case !i0 && !i1 && !i2 && !i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- }{rw, rw}
- // combination 2/16
- case !i0 && !i1 && !i2 && i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- io.ReaderFrom
- }{rw, rw, rw}
- // combination 3/16
- case !i0 && !i1 && i2 && !i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Hijacker
- }{rw, rw, rw}
- // combination 4/16
- case !i0 && !i1 && i2 && i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Hijacker
- io.ReaderFrom
- }{rw, rw, rw, rw}
- // combination 5/16
- case !i0 && i1 && !i2 && !i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- }{rw, rw, rw}
- // combination 6/16
- case !i0 && i1 && !i2 && i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- io.ReaderFrom
- }{rw, rw, rw, rw}
- // combination 7/16
- case !i0 && i1 && i2 && !i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- http.Hijacker
- }{rw, rw, rw, rw}
- // combination 8/16
- case !i0 && i1 && i2 && i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.CloseNotifier
- http.Hijacker
- io.ReaderFrom
- }{rw, rw, rw, rw, rw}
- // combination 9/16
- case i0 && !i1 && !i2 && !i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- }{rw, rw, rw}
- // combination 10/16
- case i0 && !i1 && !i2 && i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- io.ReaderFrom
- }{rw, rw, rw, rw}
- // combination 11/16
- case i0 && !i1 && i2 && !i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.Hijacker
- }{rw, rw, rw, rw}
- // combination 12/16
- case i0 && !i1 && i2 && i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.Hijacker
- io.ReaderFrom
- }{rw, rw, rw, rw, rw}
- // combination 13/16
- case i0 && i1 && !i2 && !i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- }{rw, rw, rw, rw}
- // combination 14/16
- case i0 && i1 && !i2 && i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- io.ReaderFrom
- }{rw, rw, rw, rw, rw}
- // combination 15/16
- case i0 && i1 && i2 && !i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- http.Hijacker
- }{rw, rw, rw, rw, rw}
- // combination 16/16
- case i0 && i1 && i2 && i3:
- return struct {
- Unwrapper
- http.ResponseWriter
- http.Flusher
- http.CloseNotifier
- http.Hijacker
- io.ReaderFrom
- }{rw, rw, rw, rw, rw, rw}
- }
- panic("unreachable")
-}
-
-type rw struct {
- w http.ResponseWriter
- h Hooks
-}
-
-func (w *rw) Unwrap() http.ResponseWriter {
- return w.w
-}
-
-func (w *rw) Header() http.Header {
- f := w.w.(http.ResponseWriter).Header
- if w.h.Header != nil {
- f = w.h.Header(f)
- }
- return f()
-}
-
-func (w *rw) WriteHeader(code int) {
- f := w.w.(http.ResponseWriter).WriteHeader
- if w.h.WriteHeader != nil {
- f = w.h.WriteHeader(f)
- }
- f(code)
-}
-
-func (w *rw) Write(b []byte) (int, error) {
- f := w.w.(http.ResponseWriter).Write
- if w.h.Write != nil {
- f = w.h.Write(f)
- }
- return f(b)
-}
-
-func (w *rw) Flush() {
- f := w.w.(http.Flusher).Flush
- if w.h.Flush != nil {
- f = w.h.Flush(f)
- }
- f()
-}
-
-func (w *rw) CloseNotify() <-chan bool {
- f := w.w.(http.CloseNotifier).CloseNotify
- if w.h.CloseNotify != nil {
- f = w.h.CloseNotify(f)
- }
- return f()
-}
-
-func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) {
- f := w.w.(http.Hijacker).Hijack
- if w.h.Hijack != nil {
- f = w.h.Hijack(f)
- }
- return f()
-}
-
-func (w *rw) ReadFrom(src io.Reader) (int64, error) {
- f := w.w.(io.ReaderFrom).ReadFrom
- if w.h.ReadFrom != nil {
- f = w.h.ReadFrom(f)
- }
- return f(src)
-}
-
-type Unwrapper interface {
- Unwrap() http.ResponseWriter
-}
-
-// Unwrap returns the underlying http.ResponseWriter from within zero or more
-// layers of httpsnoop wrappers.
-func Unwrap(w http.ResponseWriter) http.ResponseWriter {
- if rw, ok := w.(Unwrapper); ok {
- // recurse until rw.Unwrap() returns a non-Unwrapper
- return Unwrap(rw.Unwrap())
- } else {
- return w
- }
-}
diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml
deleted file mode 100644
index 0cffafa7..00000000
--- a/vendor/github.com/go-logr/logr/.golangci.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-run:
- timeout: 1m
- tests: true
-
-linters:
- disable-all: true
- enable:
- - asciicheck
- - errcheck
- - forcetypeassert
- - gocritic
- - gofmt
- - goimports
- - gosimple
- - govet
- - ineffassign
- - misspell
- - revive
- - staticcheck
- - typecheck
- - unused
-
-issues:
- exclude-use-default: false
- max-issues-per-linter: 0
- max-same-issues: 10
diff --git a/vendor/github.com/go-logr/logr/CHANGELOG.md b/vendor/github.com/go-logr/logr/CHANGELOG.md
deleted file mode 100644
index c3569600..00000000
--- a/vendor/github.com/go-logr/logr/CHANGELOG.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# CHANGELOG
-
-## v1.0.0-rc1
-
-This is the first logged release. Major changes (including breaking changes)
-have occurred since earlier tags.
diff --git a/vendor/github.com/go-logr/logr/CONTRIBUTING.md b/vendor/github.com/go-logr/logr/CONTRIBUTING.md
deleted file mode 100644
index 5d37e294..00000000
--- a/vendor/github.com/go-logr/logr/CONTRIBUTING.md
+++ /dev/null
@@ -1,17 +0,0 @@
-# Contributing
-
-Logr is open to pull-requests, provided they fit within the intended scope of
-the project. Specifically, this library aims to be VERY small and minimalist,
-with no external dependencies.
-
-## Compatibility
-
-This project intends to follow [semantic versioning](http://semver.org) and
-is very strict about compatibility. Any proposed changes MUST follow those
-rules.
-
-## Performance
-
-As a logging library, logr must be as light-weight as possible. Any proposed
-code change must include results of running the [benchmark](./benchmark)
-before and after the change.
diff --git a/vendor/github.com/go-logr/logr/LICENSE b/vendor/github.com/go-logr/logr/LICENSE
deleted file mode 100644
index 8dada3ed..00000000
--- a/vendor/github.com/go-logr/logr/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md
deleted file mode 100644
index 7c7f0c69..00000000
--- a/vendor/github.com/go-logr/logr/README.md
+++ /dev/null
@@ -1,407 +0,0 @@
-# A minimal logging API for Go
-
-[](https://pkg.go.dev/github.com/go-logr/logr)
-[](https://goreportcard.com/report/github.com/go-logr/logr)
-[](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr)
-
-logr offers an(other) opinion on how Go programs and libraries can do logging
-without becoming coupled to a particular logging implementation. This is not
-an implementation of logging - it is an API. In fact it is two APIs with two
-different sets of users.
-
-The `Logger` type is intended for application and library authors. It provides
-a relatively small API which can be used everywhere you want to emit logs. It
-defers the actual act of writing logs (to files, to stdout, or whatever) to the
-`LogSink` interface.
-
-The `LogSink` interface is intended for logging library implementers. It is a
-pure interface which can be implemented by logging frameworks to provide the actual logging
-functionality.
-
-This decoupling allows application and library developers to write code in
-terms of `logr.Logger` (which has very low dependency fan-out) while the
-implementation of logging is managed "up stack" (e.g. in or near `main()`.)
-Application developers can then switch out implementations as necessary.
-
-Many people assert that libraries should not be logging, and as such efforts
-like this are pointless. Those people are welcome to convince the authors of
-the tens-of-thousands of libraries that *DO* write logs that they are all
-wrong. In the meantime, logr takes a more practical approach.
-
-## Typical usage
-
-Somewhere, early in an application's life, it will make a decision about which
-logging library (implementation) it actually wants to use. Something like:
-
-```
- func main() {
- // ... other setup code ...
-
- // Create the "root" logger. We have chosen the "logimpl" implementation,
- // which takes some initial parameters and returns a logr.Logger.
- logger := logimpl.New(param1, param2)
-
- // ... other setup code ...
-```
-
-Most apps will call into other libraries, create structures to govern the flow,
-etc. The `logr.Logger` object can be passed to these other libraries, stored
-in structs, or even used as a package-global variable, if needed. For example:
-
-```
- app := createTheAppObject(logger)
- app.Run()
-```
-
-Outside of this early setup, no other packages need to know about the choice of
-implementation. They write logs in terms of the `logr.Logger` that they
-received:
-
-```
- type appObject struct {
- // ... other fields ...
- logger logr.Logger
- // ... other fields ...
- }
-
- func (app *appObject) Run() {
- app.logger.Info("starting up", "timestamp", time.Now())
-
- // ... app code ...
-```
-
-## Background
-
-If the Go standard library had defined an interface for logging, this project
-probably would not be needed. Alas, here we are.
-
-When the Go developers started developing such an interface with
-[slog](https://github.com/golang/go/issues/56345), they adopted some of the
-logr design but also left out some parts and changed others:
-
-| Feature | logr | slog |
-|---------|------|------|
-| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) |
-| Low-level API | `LogSink` | `Handler` |
-| Stack unwinding | done by `LogSink` | done by `Logger` |
-| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) |
-| Generating a value for logging on demand | `Marshaler` | `LogValuer` |
-| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" |
-| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` |
-| Passing logger via context | `NewContext`, `FromContext` | no API |
-| Adding a name to a logger | `WithName` | no API |
-| Modify verbosity of log entries in a call chain | `V` | no API |
-| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
-| Pass context for extracting additional values | no API | API variants like `InfoCtx` |
-
-The high-level slog API is explicitly meant to be one of many different APIs
-that can be layered on top of a shared `slog.Handler`. logr is one such
-alternative API, with [interoperability](#slog-interoperability) provided by
-some conversion functions.
-
-### Inspiration
-
-Before you consider this package, please read [this blog post by the
-inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what
-he has to say, and it largely aligns with our own experiences.
-
-### Differences from Dave's ideas
-
-The main differences are:
-
-1. Dave basically proposes doing away with the notion of a logging API in favor
-of `fmt.Printf()`. We disagree, especially when you consider things like output
-locations, timestamps, file and line decorations, and structured logging. This
-package restricts the logging API to just 2 types of logs: info and error.
-
-Info logs are things you want to tell the user which are not errors. Error
-logs are, well, errors. If your code receives an `error` from a subordinate
-function call and is logging that `error` *and not returning it*, use error
-logs.
-
-2. Verbosity-levels on info logs. This gives developers a chance to indicate
-arbitrary grades of importance for info logs, without assigning names with
-semantic meaning such as "warning", "trace", and "debug." Superficially this
-may feel very similar, but the primary difference is the lack of semantics.
-Because verbosity is a numerical value, it's safe to assume that an app running
-with higher verbosity means more (and less important) logs will be generated.
-
-## Implementations (non-exhaustive)
-
-There are implementations for the following logging libraries:
-
-- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr)
-- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr)
-- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
-- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr)
-- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting)
-- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
-- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr)
-- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
-- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend)
-- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr)
-- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr)
-- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
-- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
-
-## slog interoperability
-
-Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
-and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and
-`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`.
-As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
-slog API.
-
-### Using a `logr.LogSink` as backend for slog
-
-Ideally, a logr sink implementation should support both logr and slog by
-implementing both the normal logr interface(s) and `SlogSink`. Because
-of a conflict in the parameters of the common `Enabled` method, it is [not
-possible to implement both slog.Handler and logr.Sink in the same
-type](https://github.com/golang/go/issues/59110).
-
-If both are supported, log calls can go from the high-level APIs to the backend
-without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can
-convert back and forth without adding additional wrappers, with one exception:
-when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
-`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future
-log calls.
-
-Such an implementation should also support values that implement specific
-interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`,
-`slog.GroupValue`). logr does not convert those.
-
-Not supporting slog has several drawbacks:
-- Recording source code locations works correctly if the handler gets called
- through `slog.Logger`, but may be wrong in other cases. That's because a
- `logr.Sink` does its own stack unwinding instead of using the program counter
- provided by the high-level API.
-- slog levels <= 0 can be mapped to logr levels by negating the level without a
- loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as
- used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink
- because logr does not support "more important than info" levels.
-- The slog group concept is supported by prefixing each key in a key/value
- pair with the group names, separated by a dot. For structured output like
- JSON it would be better to group the key/value pairs inside an object.
-- Special slog values and interfaces don't work as expected.
-- The overhead is likely to be higher.
-
-These drawbacks are severe enough that applications using a mixture of slog and
-logr should switch to a different backend.
-
-### Using a `slog.Handler` as backend for logr
-
-Using a plain `slog.Handler` without support for logr works better than the
-other direction:
-- All logr verbosity levels can be mapped 1:1 to their corresponding slog level
- by negating them.
-- Stack unwinding is done by the `SlogSink` and the resulting program
- counter is passed to the `slog.Handler`.
-- Names added via `Logger.WithName` are gathered and recorded in an additional
- attribute with `logger` as key and the names separated by slash as value.
-- `Logger.Error` is turned into a log record with `slog.LevelError` as level
- and an additional attribute with `err` as key, if an error was provided.
-
-The main drawback is that `logr.Marshaler` will not be supported. Types should
-ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
-with logr implementations without slog support is not important, then
-`slog.Valuer` is sufficient.
-
-### Context support for slog
-
-Storing a logger in a `context.Context` is not supported by
-slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be
-used to fill this gap. They store and retrieve a `slog.Logger` pointer
-under the same context key that is also used by `NewContext` and
-`FromContext` for `logr.Logger` value.
-
-When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will
-automatically convert the `slog.Logger` to a
-`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction.
-
-With this approach, binaries which use either slog or logr are as efficient as
-possible with no unnecessary allocations. This is also why the API stores a
-`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger`
-on retrieval would need to allocate one.
-
-The downside is that switching back and forth needs more allocations. Because
-logr is the API that is already in use by different packages, in particular
-Kubernetes, the recommendation is to use the `logr.Logger` API in code which
-uses contextual logging.
-
-An alternative to adding values to a logger and storing that logger in the
-context is to store the values in the context and to configure a logging
-backend to extract those values when emitting log entries. This only works when
-log calls are passed the context, which is not supported by the logr API.
-
-With the slog API, it is possible, but not
-required. https://github.com/veqryn/slog-context is a package for slog which
-provides additional support code for this approach. It also contains wrappers
-for the context functions in logr, so developers who prefer to not use the logr
-APIs directly can use those instead and the resulting code will still be
-interoperable with logr.
-
-## FAQ
-
-### Conceptual
-
-#### Why structured logging?
-
-- **Structured logs are more easily queryable**: Since you've got
- key-value pairs, it's much easier to query your structured logs for
- particular values by filtering on the contents of a particular key --
- think searching request logs for error codes, Kubernetes reconcilers for
- the name and namespace of the reconciled object, etc.
-
-- **Structured logging makes it easier to have cross-referenceable logs**:
- Similarly to searchability, if you maintain conventions around your
- keys, it becomes easy to gather all log lines related to a particular
- concept.
-
-- **Structured logs allow better dimensions of filtering**: if you have
- structure to your logs, you've got more precise control over how much
- information is logged -- you might choose in a particular configuration
- to log certain keys but not others, only log lines where a certain key
- matches a certain value, etc., instead of just having v-levels and names
- to key off of.
-
-- **Structured logs better represent structured data**: sometimes, the
- data that you want to log is inherently structured (think tuple-link
- objects.) Structured logs allow you to preserve that structure when
- outputting.
-
-#### Why V-levels?
-
-**V-levels give operators an easy way to control the chattiness of log
-operations**. V-levels provide a way for a given package to distinguish
-the relative importance or verbosity of a given log message. Then, if
-a particular logger or package is logging too many messages, the user
-of the package can simply change the v-levels for that library.
-
-#### Why not named levels, like Info/Warning/Error?
-
-Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences
-from Dave's ideas](#differences-from-daves-ideas).
-
-#### Why not allow format strings, too?
-
-**Format strings negate many of the benefits of structured logs**:
-
-- They're not easily searchable without resorting to fuzzy searching,
- regular expressions, etc.
-
-- They don't store structured data well, since contents are flattened into
- a string.
-
-- They're not cross-referenceable.
-
-- They don't compress easily, since the message is not constant.
-
-(Unless you turn positional parameters into key-value pairs with numerical
-keys, at which point you've gotten key-value logging with meaningless
-keys.)
-
-### Practical
-
-#### Why key-value pairs, and not a map?
-
-Key-value pairs are *much* easier to optimize, especially around
-allocations. Zap (a structured logger that inspired logr's interface) has
-[performance measurements](https://github.com/uber-go/zap#performance)
-that show this quite nicely.
-
-While the interface ends up being a little less obvious, you get
-potentially better performance, plus avoid making users type
-`map[string]string{}` every time they want to log.
-
-#### What if my V-levels differ between libraries?
-
-That's fine. Control your V-levels on a per-logger basis, and use the
-`WithName` method to pass different loggers to different libraries.
-
-Generally, you should take care to ensure that you have relatively
-consistent V-levels within a given logger, however, as this makes deciding
-on what verbosity of logs to request easier.
-
-#### But I really want to use a format string!
-
-That's not actually a question. Assuming your question is "how do
-I convert my mental model of logging with format strings to logging with
-constant messages":
-
-1. Figure out what the error actually is, as you'd write in a TL;DR style,
- and use that as a message.
-
-2. For every place you'd write a format specifier, look to the word before
- it, and add that as a key value pair.
-
-For instance, consider the following examples (all taken from spots in the
-Kubernetes codebase):
-
-- `klog.V(4).Infof("Client is returning errors: code %v, error %v",
- responseCode, err)` becomes `logger.Error(err, "client returned an
- error", "code", responseCode)`
-
-- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v",
- seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after
- response when requesting url", "attempt", retries, "after
- seconds", seconds, "url", url)`
-
-If you *really* must use a format string, use it in a key's value, and
-call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to
-reflect over type %T")` becomes `logger.Info("unable to reflect over
-type", "type", fmt.Sprintf("%T"))`. In general though, the cases where
-this is necessary should be few and far between.
-
-#### How do I choose my V-levels?
-
-This is basically the only hard constraint: increase V-levels to denote
-more verbose or more debug-y logs.
-
-Otherwise, you can start out with `0` as "you always want to see this",
-`1` as "common logging that you might *possibly* want to turn off", and
-`10` as "I would like to performance-test your log collection stack."
-
-Then gradually choose levels in between as you need them, working your way
-down from 10 (for debug and trace style logs) and up from 1 (for chattier
-info-type logs). For reference, slog pre-defines -4 for debug logs
-(corresponds to 4 in logr), which matches what is
-[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use).
-
-#### How do I choose my keys?
-
-Keys are fairly flexible, and can hold more or less any string
-value. For best compatibility with implementations and consistency
-with existing code in other projects, there are a few conventions you
-should consider.
-
-- Make your keys human-readable.
-- Constant keys are generally a good idea.
-- Be consistent across your codebase.
-- Keys should naturally match parts of the message string.
-- Use lower case for simple keys and
- [lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for
- more complex ones. Kubernetes is one example of a project that has
- [adopted that
- convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments).
-
-While key names are mostly unrestricted (and spaces are acceptable),
-it's generally a good idea to stick to printable ascii characters, or at
-least match the general character set of your log lines.
-
-#### Why should keys be constant values?
-
-The point of structured logging is to make later log processing easier. Your
-keys are, effectively, the schema of each log message. If you use different
-keys across instances of the same log line, you will make your structured logs
-much harder to use. `Sprintf()` is for values, not for keys!
-
-#### Why is this not a pure interface?
-
-The Logger type is implemented as a struct in order to allow the Go compiler to
-optimize things like high-V `Info` logs that are not triggered. Not all of
-these implementations are implemented yet, but this structure was suggested as
-a way to ensure they *can* be implemented. All of the real work is behind the
-`LogSink` interface.
-
-[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging
diff --git a/vendor/github.com/go-logr/logr/SECURITY.md b/vendor/github.com/go-logr/logr/SECURITY.md
deleted file mode 100644
index 1ca756fc..00000000
--- a/vendor/github.com/go-logr/logr/SECURITY.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# Security Policy
-
-If you have discovered a security vulnerability in this project, please report it
-privately. **Do not disclose it as a public issue.** This gives us time to work with you
-to fix the issue before public exposure, reducing the chance that the exploit will be
-used before a patch is released.
-
-You may submit the report in the following ways:
-
-- send an email to go-logr-security@googlegroups.com
-- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new)
-
-Please provide the following information in your report:
-
-- A description of the vulnerability and its impact
-- How to reproduce the issue
-
-We ask that you give us 90 days to work on a fix before public exposure.
diff --git a/vendor/github.com/go-logr/logr/context.go b/vendor/github.com/go-logr/logr/context.go
deleted file mode 100644
index de8bcc3a..00000000
--- a/vendor/github.com/go-logr/logr/context.go
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
-Copyright 2023 The logr Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package logr
-
-// contextKey is how we find Loggers in a context.Context. With Go < 1.21,
-// the value is always a Logger value. With Go >= 1.21, the value can be a
-// Logger value or a slog.Logger pointer.
-type contextKey struct{}
-
-// notFoundError exists to carry an IsNotFound method.
-type notFoundError struct{}
-
-func (notFoundError) Error() string {
- return "no logr.Logger was present"
-}
-
-func (notFoundError) IsNotFound() bool {
- return true
-}
diff --git a/vendor/github.com/go-logr/logr/context_noslog.go b/vendor/github.com/go-logr/logr/context_noslog.go
deleted file mode 100644
index f012f9a1..00000000
--- a/vendor/github.com/go-logr/logr/context_noslog.go
+++ /dev/null
@@ -1,49 +0,0 @@
-//go:build !go1.21
-// +build !go1.21
-
-/*
-Copyright 2019 The logr Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package logr
-
-import (
- "context"
-)
-
-// FromContext returns a Logger from ctx or an error if no Logger is found.
-func FromContext(ctx context.Context) (Logger, error) {
- if v, ok := ctx.Value(contextKey{}).(Logger); ok {
- return v, nil
- }
-
- return Logger{}, notFoundError{}
-}
-
-// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
-// returns a Logger that discards all log messages.
-func FromContextOrDiscard(ctx context.Context) Logger {
- if v, ok := ctx.Value(contextKey{}).(Logger); ok {
- return v
- }
-
- return Discard()
-}
-
-// NewContext returns a new Context, derived from ctx, which carries the
-// provided Logger.
-func NewContext(ctx context.Context, logger Logger) context.Context {
- return context.WithValue(ctx, contextKey{}, logger)
-}
diff --git a/vendor/github.com/go-logr/logr/context_slog.go b/vendor/github.com/go-logr/logr/context_slog.go
deleted file mode 100644
index 065ef0b8..00000000
--- a/vendor/github.com/go-logr/logr/context_slog.go
+++ /dev/null
@@ -1,83 +0,0 @@
-//go:build go1.21
-// +build go1.21
-
-/*
-Copyright 2019 The logr Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package logr
-
-import (
- "context"
- "fmt"
- "log/slog"
-)
-
-// FromContext returns a Logger from ctx or an error if no Logger is found.
-func FromContext(ctx context.Context) (Logger, error) {
- v := ctx.Value(contextKey{})
- if v == nil {
- return Logger{}, notFoundError{}
- }
-
- switch v := v.(type) {
- case Logger:
- return v, nil
- case *slog.Logger:
- return FromSlogHandler(v.Handler()), nil
- default:
- // Not reached.
- panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
- }
-}
-
-// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found.
-func FromContextAsSlogLogger(ctx context.Context) *slog.Logger {
- v := ctx.Value(contextKey{})
- if v == nil {
- return nil
- }
-
- switch v := v.(type) {
- case Logger:
- return slog.New(ToSlogHandler(v))
- case *slog.Logger:
- return v
- default:
- // Not reached.
- panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
- }
-}
-
-// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
-// returns a Logger that discards all log messages.
-func FromContextOrDiscard(ctx context.Context) Logger {
- if logger, err := FromContext(ctx); err == nil {
- return logger
- }
- return Discard()
-}
-
-// NewContext returns a new Context, derived from ctx, which carries the
-// provided Logger.
-func NewContext(ctx context.Context, logger Logger) context.Context {
- return context.WithValue(ctx, contextKey{}, logger)
-}
-
-// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the
-// provided slog.Logger.
-func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context {
- return context.WithValue(ctx, contextKey{}, logger)
-}
diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go
deleted file mode 100644
index 99fe8be9..00000000
--- a/vendor/github.com/go-logr/logr/discard.go
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
-Copyright 2020 The logr Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package logr
-
-// Discard returns a Logger that discards all messages logged to it. It can be
-// used whenever the caller is not interested in the logs. Logger instances
-// produced by this function always compare as equal.
-func Discard() Logger {
- return New(nil)
-}
diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go
deleted file mode 100644
index 30568e76..00000000
--- a/vendor/github.com/go-logr/logr/funcr/funcr.go
+++ /dev/null
@@ -1,914 +0,0 @@
-/*
-Copyright 2021 The logr Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package funcr implements formatting of structured log messages and
-// optionally captures the call site and timestamp.
-//
-// The simplest way to use it is via its implementation of a
-// github.com/go-logr/logr.LogSink with output through an arbitrary
-// "write" function. See New and NewJSON for details.
-//
-// # Custom LogSinks
-//
-// For users who need more control, a funcr.Formatter can be embedded inside
-// your own custom LogSink implementation. This is useful when the LogSink
-// needs to implement additional methods, for example.
-//
-// # Formatting
-//
-// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
-// values which are being logged. When rendering a struct, funcr will use Go's
-// standard JSON tags (all except "string").
-package funcr
-
-import (
- "bytes"
- "encoding"
- "encoding/json"
- "fmt"
- "path/filepath"
- "reflect"
- "runtime"
- "strconv"
- "strings"
- "time"
-
- "github.com/go-logr/logr"
-)
-
-// New returns a logr.Logger which is implemented by an arbitrary function.
-func New(fn func(prefix, args string), opts Options) logr.Logger {
- return logr.New(newSink(fn, NewFormatter(opts)))
-}
-
-// NewJSON returns a logr.Logger which is implemented by an arbitrary function
-// and produces JSON output.
-func NewJSON(fn func(obj string), opts Options) logr.Logger {
- fnWrapper := func(_, obj string) {
- fn(obj)
- }
- return logr.New(newSink(fnWrapper, NewFormatterJSON(opts)))
-}
-
-// Underlier exposes access to the underlying logging function. Since
-// callers only have a logr.Logger, they have to know which
-// implementation is in use, so this interface is less of an
-// abstraction and more of a way to test type conversion.
-type Underlier interface {
- GetUnderlying() func(prefix, args string)
-}
-
-func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
- l := &fnlogger{
- Formatter: formatter,
- write: fn,
- }
- // For skipping fnlogger.Info and fnlogger.Error.
- l.Formatter.AddCallDepth(1)
- return l
-}
-
-// Options carries parameters which influence the way logs are generated.
-type Options struct {
- // LogCaller tells funcr to add a "caller" key to some or all log lines.
- // This has some overhead, so some users might not want it.
- LogCaller MessageClass
-
- // LogCallerFunc tells funcr to also log the calling function name. This
- // has no effect if caller logging is not enabled (see Options.LogCaller).
- LogCallerFunc bool
-
- // LogTimestamp tells funcr to add a "ts" key to log lines. This has some
- // overhead, so some users might not want it.
- LogTimestamp bool
-
- // TimestampFormat tells funcr how to render timestamps when LogTimestamp
- // is enabled. If not specified, a default format will be used. For more
- // details, see docs for Go's time.Layout.
- TimestampFormat string
-
- // LogInfoLevel tells funcr what key to use to log the info level.
- // If not specified, the info level will be logged as "level".
- // If this is set to "", the info level will not be logged at all.
- LogInfoLevel *string
-
- // Verbosity tells funcr which V logs to produce. Higher values enable
- // more logs. Info logs at or below this level will be written, while logs
- // above this level will be discarded.
- Verbosity int
-
- // RenderBuiltinsHook allows users to mutate the list of key-value pairs
- // while a log line is being rendered. The kvList argument follows logr
- // conventions - each pair of slice elements is comprised of a string key
- // and an arbitrary value (verified and sanitized before calling this
- // hook). The value returned must follow the same conventions. This hook
- // can be used to audit or modify logged data. For example, you might want
- // to prefix all of funcr's built-in keys with some string. This hook is
- // only called for built-in (provided by funcr itself) key-value pairs.
- // Equivalent hooks are offered for key-value pairs saved via
- // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
- // for user-provided pairs (see RenderArgsHook).
- RenderBuiltinsHook func(kvList []any) []any
-
- // RenderValuesHook is the same as RenderBuiltinsHook, except that it is
- // only called for key-value pairs saved via logr.Logger.WithValues. See
- // RenderBuiltinsHook for more details.
- RenderValuesHook func(kvList []any) []any
-
- // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
- // called for key-value pairs passed directly to Info and Error. See
- // RenderBuiltinsHook for more details.
- RenderArgsHook func(kvList []any) []any
-
- // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
- // that contains a struct, etc.) it may log. Every time it finds a struct,
- // slice, array, or map the depth is increased by one. When the maximum is
- // reached, the value will be converted to a string indicating that the max
- // depth has been exceeded. If this field is not specified, a default
- // value will be used.
- MaxLogDepth int
-}
-
-// MessageClass indicates which category or categories of messages to consider.
-type MessageClass int
-
-const (
- // None ignores all message classes.
- None MessageClass = iota
- // All considers all message classes.
- All
- // Info only considers info messages.
- Info
- // Error only considers error messages.
- Error
-)
-
-// fnlogger inherits some of its LogSink implementation from Formatter
-// and just needs to add some glue code.
-type fnlogger struct {
- Formatter
- write func(prefix, args string)
-}
-
-func (l fnlogger) WithName(name string) logr.LogSink {
- l.Formatter.AddName(name)
- return &l
-}
-
-func (l fnlogger) WithValues(kvList ...any) logr.LogSink {
- l.Formatter.AddValues(kvList)
- return &l
-}
-
-func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
- l.Formatter.AddCallDepth(depth)
- return &l
-}
-
-func (l fnlogger) Info(level int, msg string, kvList ...any) {
- prefix, args := l.FormatInfo(level, msg, kvList)
- l.write(prefix, args)
-}
-
-func (l fnlogger) Error(err error, msg string, kvList ...any) {
- prefix, args := l.FormatError(err, msg, kvList)
- l.write(prefix, args)
-}
-
-func (l fnlogger) GetUnderlying() func(prefix, args string) {
- return l.write
-}
-
-// Assert conformance to the interfaces.
-var _ logr.LogSink = &fnlogger{}
-var _ logr.CallDepthLogSink = &fnlogger{}
-var _ Underlier = &fnlogger{}
-
-// NewFormatter constructs a Formatter which emits a JSON-like key=value format.
-func NewFormatter(opts Options) Formatter {
- return newFormatter(opts, outputKeyValue)
-}
-
-// NewFormatterJSON constructs a Formatter which emits strict JSON.
-func NewFormatterJSON(opts Options) Formatter {
- return newFormatter(opts, outputJSON)
-}
-
-// Defaults for Options.
-const defaultTimestampFormat = "2006-01-02 15:04:05.000000"
-const defaultMaxLogDepth = 16
-
-func newFormatter(opts Options, outfmt outputFormat) Formatter {
- if opts.TimestampFormat == "" {
- opts.TimestampFormat = defaultTimestampFormat
- }
- if opts.MaxLogDepth == 0 {
- opts.MaxLogDepth = defaultMaxLogDepth
- }
- if opts.LogInfoLevel == nil {
- opts.LogInfoLevel = new(string)
- *opts.LogInfoLevel = "level"
- }
- f := Formatter{
- outputFormat: outfmt,
- prefix: "",
- values: nil,
- depth: 0,
- opts: &opts,
- }
- return f
-}
-
-// Formatter is an opaque struct which can be embedded in a LogSink
-// implementation. It should be constructed with NewFormatter. Some of
-// its methods directly implement logr.LogSink.
-type Formatter struct {
- outputFormat outputFormat
- prefix string
- values []any
- valuesStr string
- depth int
- opts *Options
- groupName string // for slog groups
- groups []groupDef
-}
-
-// outputFormat indicates which outputFormat to use.
-type outputFormat int
-
-const (
- // outputKeyValue emits a JSON-like key=value format, but not strict JSON.
- outputKeyValue outputFormat = iota
- // outputJSON emits strict JSON.
- outputJSON
-)
-
-// groupDef represents a saved group. The values may be empty, but we don't
-// know if we need to render the group until the final record is rendered.
-type groupDef struct {
- name string
- values string
-}
-
-// PseudoStruct is a list of key-value pairs that gets logged as a struct.
-type PseudoStruct []any
-
-// render produces a log line, ready to use.
-func (f Formatter) render(builtins, args []any) string {
- // Empirically bytes.Buffer is faster than strings.Builder for this.
- buf := bytes.NewBuffer(make([]byte, 0, 1024))
-
- if f.outputFormat == outputJSON {
- buf.WriteByte('{') // for the whole record
- }
-
- // Render builtins
- vals := builtins
- if hook := f.opts.RenderBuiltinsHook; hook != nil {
- vals = hook(f.sanitize(vals))
- }
- f.flatten(buf, vals, false) // keys are ours, no need to escape
- continuing := len(builtins) > 0
-
- // Turn the inner-most group into a string
- argsStr := func() string {
- buf := bytes.NewBuffer(make([]byte, 0, 1024))
-
- vals = args
- if hook := f.opts.RenderArgsHook; hook != nil {
- vals = hook(f.sanitize(vals))
- }
- f.flatten(buf, vals, true) // escape user-provided keys
-
- return buf.String()
- }()
-
- // Render the stack of groups from the inside out.
- bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr)
- for i := len(f.groups) - 1; i >= 0; i-- {
- grp := &f.groups[i]
- if grp.values == "" && bodyStr == "" {
- // no contents, so we must elide the whole group
- continue
- }
- bodyStr = f.renderGroup(grp.name, grp.values, bodyStr)
- }
-
- if bodyStr != "" {
- if continuing {
- buf.WriteByte(f.comma())
- }
- buf.WriteString(bodyStr)
- }
-
- if f.outputFormat == outputJSON {
- buf.WriteByte('}') // for the whole record
- }
-
- return buf.String()
-}
-
-// renderGroup returns a string representation of the named group with rendered
-// values and args. If the name is empty, this will return the values and args,
-// joined. If the name is not empty, this will return a single key-value pair,
-// where the value is a grouping of the values and args. If the values and
-// args are both empty, this will return an empty string, even if the name was
-// specified.
-func (f Formatter) renderGroup(name string, values string, args string) string {
- buf := bytes.NewBuffer(make([]byte, 0, 1024))
-
- needClosingBrace := false
- if name != "" && (values != "" || args != "") {
- buf.WriteString(f.quoted(name, true)) // escape user-provided keys
- buf.WriteByte(f.colon())
- buf.WriteByte('{')
- needClosingBrace = true
- }
-
- continuing := false
- if values != "" {
- buf.WriteString(values)
- continuing = true
- }
-
- if args != "" {
- if continuing {
- buf.WriteByte(f.comma())
- }
- buf.WriteString(args)
- }
-
- if needClosingBrace {
- buf.WriteByte('}')
- }
-
- return buf.String()
-}
-
-// flatten renders a list of key-value pairs into a buffer. If escapeKeys is
-// true, the keys are assumed to have non-JSON-compatible characters in them
-// and must be evaluated for escapes.
-//
-// This function returns a potentially modified version of kvList, which
-// ensures that there is a value for every key (adding a value if needed) and
-// that each key is a string (substituting a key if needed).
-func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any {
- // This logic overlaps with sanitize() but saves one type-cast per key,
- // which can be measurable.
- if len(kvList)%2 != 0 {
- kvList = append(kvList, noValue)
- }
- copied := false
- for i := 0; i < len(kvList); i += 2 {
- k, ok := kvList[i].(string)
- if !ok {
- if !copied {
- newList := make([]any, len(kvList))
- copy(newList, kvList)
- kvList = newList
- copied = true
- }
- k = f.nonStringKey(kvList[i])
- kvList[i] = k
- }
- v := kvList[i+1]
-
- if i > 0 {
- if f.outputFormat == outputJSON {
- buf.WriteByte(f.comma())
- } else {
- // In theory the format could be something we don't understand. In
- // practice, we control it, so it won't be.
- buf.WriteByte(' ')
- }
- }
-
- buf.WriteString(f.quoted(k, escapeKeys))
- buf.WriteByte(f.colon())
- buf.WriteString(f.pretty(v))
- }
- return kvList
-}
-
-func (f Formatter) quoted(str string, escape bool) string {
- if escape {
- return prettyString(str)
- }
- // this is faster
- return `"` + str + `"`
-}
-
-func (f Formatter) comma() byte {
- if f.outputFormat == outputJSON {
- return ','
- }
- return ' '
-}
-
-func (f Formatter) colon() byte {
- if f.outputFormat == outputJSON {
- return ':'
- }
- return '='
-}
-
-func (f Formatter) pretty(value any) string {
- return f.prettyWithFlags(value, 0, 0)
-}
-
-const (
- flagRawStruct = 0x1 // do not print braces on structs
-)
-
-// TODO: This is not fast. Most of the overhead goes here.
-func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
- if depth > f.opts.MaxLogDepth {
- return `""`
- }
-
- // Handle types that take full control of logging.
- if v, ok := value.(logr.Marshaler); ok {
- // Replace the value with what the type wants to get logged.
- // That then gets handled below via reflection.
- value = invokeMarshaler(v)
- }
-
- // Handle types that want to format themselves.
- switch v := value.(type) {
- case fmt.Stringer:
- value = invokeStringer(v)
- case error:
- value = invokeError(v)
- }
-
- // Handling the most common types without reflect is a small perf win.
- switch v := value.(type) {
- case bool:
- return strconv.FormatBool(v)
- case string:
- return prettyString(v)
- case int:
- return strconv.FormatInt(int64(v), 10)
- case int8:
- return strconv.FormatInt(int64(v), 10)
- case int16:
- return strconv.FormatInt(int64(v), 10)
- case int32:
- return strconv.FormatInt(int64(v), 10)
- case int64:
- return strconv.FormatInt(int64(v), 10)
- case uint:
- return strconv.FormatUint(uint64(v), 10)
- case uint8:
- return strconv.FormatUint(uint64(v), 10)
- case uint16:
- return strconv.FormatUint(uint64(v), 10)
- case uint32:
- return strconv.FormatUint(uint64(v), 10)
- case uint64:
- return strconv.FormatUint(v, 10)
- case uintptr:
- return strconv.FormatUint(uint64(v), 10)
- case float32:
- return strconv.FormatFloat(float64(v), 'f', -1, 32)
- case float64:
- return strconv.FormatFloat(v, 'f', -1, 64)
- case complex64:
- return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"`
- case complex128:
- return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"`
- case PseudoStruct:
- buf := bytes.NewBuffer(make([]byte, 0, 1024))
- v = f.sanitize(v)
- if flags&flagRawStruct == 0 {
- buf.WriteByte('{')
- }
- for i := 0; i < len(v); i += 2 {
- if i > 0 {
- buf.WriteByte(f.comma())
- }
- k, _ := v[i].(string) // sanitize() above means no need to check success
- // arbitrary keys might need escaping
- buf.WriteString(prettyString(k))
- buf.WriteByte(f.colon())
- buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
- }
- if flags&flagRawStruct == 0 {
- buf.WriteByte('}')
- }
- return buf.String()
- }
-
- buf := bytes.NewBuffer(make([]byte, 0, 256))
- t := reflect.TypeOf(value)
- if t == nil {
- return "null"
- }
- v := reflect.ValueOf(value)
- switch t.Kind() {
- case reflect.Bool:
- return strconv.FormatBool(v.Bool())
- case reflect.String:
- return prettyString(v.String())
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return strconv.FormatInt(int64(v.Int()), 10)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return strconv.FormatUint(uint64(v.Uint()), 10)
- case reflect.Float32:
- return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)
- case reflect.Float64:
- return strconv.FormatFloat(v.Float(), 'f', -1, 64)
- case reflect.Complex64:
- return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"`
- case reflect.Complex128:
- return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"`
- case reflect.Struct:
- if flags&flagRawStruct == 0 {
- buf.WriteByte('{')
- }
- printComma := false // testing i>0 is not enough because of JSON omitted fields
- for i := 0; i < t.NumField(); i++ {
- fld := t.Field(i)
- if fld.PkgPath != "" {
- // reflect says this field is only defined for non-exported fields.
- continue
- }
- if !v.Field(i).CanInterface() {
- // reflect isn't clear exactly what this means, but we can't use it.
- continue
- }
- name := ""
- omitempty := false
- if tag, found := fld.Tag.Lookup("json"); found {
- if tag == "-" {
- continue
- }
- if comma := strings.Index(tag, ","); comma != -1 {
- if n := tag[:comma]; n != "" {
- name = n
- }
- rest := tag[comma:]
- if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") {
- omitempty = true
- }
- } else {
- name = tag
- }
- }
- if omitempty && isEmpty(v.Field(i)) {
- continue
- }
- if printComma {
- buf.WriteByte(f.comma())
- }
- printComma = true // if we got here, we are rendering a field
- if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
- buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
- continue
- }
- if name == "" {
- name = fld.Name
- }
- // field names can't contain characters which need escaping
- buf.WriteString(f.quoted(name, false))
- buf.WriteByte(f.colon())
- buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
- }
- if flags&flagRawStruct == 0 {
- buf.WriteByte('}')
- }
- return buf.String()
- case reflect.Slice, reflect.Array:
- // If this is outputing as JSON make sure this isn't really a json.RawMessage.
- // If so just emit "as-is" and don't pretty it as that will just print
- // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want.
- if f.outputFormat == outputJSON {
- if rm, ok := value.(json.RawMessage); ok {
- // If it's empty make sure we emit an empty value as the array style would below.
- if len(rm) > 0 {
- buf.Write(rm)
- } else {
- buf.WriteString("null")
- }
- return buf.String()
- }
- }
- buf.WriteByte('[')
- for i := 0; i < v.Len(); i++ {
- if i > 0 {
- buf.WriteByte(f.comma())
- }
- e := v.Index(i)
- buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
- }
- buf.WriteByte(']')
- return buf.String()
- case reflect.Map:
- buf.WriteByte('{')
- // This does not sort the map keys, for best perf.
- it := v.MapRange()
- i := 0
- for it.Next() {
- if i > 0 {
- buf.WriteByte(f.comma())
- }
- // If a map key supports TextMarshaler, use it.
- keystr := ""
- if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok {
- txt, err := m.MarshalText()
- if err != nil {
- keystr = fmt.Sprintf("", err.Error())
- } else {
- keystr = string(txt)
- }
- keystr = prettyString(keystr)
- } else {
- // prettyWithFlags will produce already-escaped values
- keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1)
- if t.Key().Kind() != reflect.String {
- // JSON only does string keys. Unlike Go's standard JSON, we'll
- // convert just about anything to a string.
- keystr = prettyString(keystr)
- }
- }
- buf.WriteString(keystr)
- buf.WriteByte(f.colon())
- buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
- i++
- }
- buf.WriteByte('}')
- return buf.String()
- case reflect.Ptr, reflect.Interface:
- if v.IsNil() {
- return "null"
- }
- return f.prettyWithFlags(v.Elem().Interface(), 0, depth)
- }
- return fmt.Sprintf(`""`, t.Kind().String())
-}
-
-func prettyString(s string) string {
- // Avoid escaping (which does allocations) if we can.
- if needsEscape(s) {
- return strconv.Quote(s)
- }
- b := bytes.NewBuffer(make([]byte, 0, 1024))
- b.WriteByte('"')
- b.WriteString(s)
- b.WriteByte('"')
- return b.String()
-}
-
-// needsEscape determines whether the input string needs to be escaped or not,
-// without doing any allocations.
-func needsEscape(s string) bool {
- for _, r := range s {
- if !strconv.IsPrint(r) || r == '\\' || r == '"' {
- return true
- }
- }
- return false
-}
-
-func isEmpty(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
- return v.Len() == 0
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.Complex64, reflect.Complex128:
- return v.Complex() == 0
- case reflect.Interface, reflect.Ptr:
- return v.IsNil()
- }
- return false
-}
-
-func invokeMarshaler(m logr.Marshaler) (ret any) {
- defer func() {
- if r := recover(); r != nil {
- ret = fmt.Sprintf("", r)
- }
- }()
- return m.MarshalLog()
-}
-
-func invokeStringer(s fmt.Stringer) (ret string) {
- defer func() {
- if r := recover(); r != nil {
- ret = fmt.Sprintf("", r)
- }
- }()
- return s.String()
-}
-
-func invokeError(e error) (ret string) {
- defer func() {
- if r := recover(); r != nil {
- ret = fmt.Sprintf("", r)
- }
- }()
- return e.Error()
-}
-
-// Caller represents the original call site for a log line, after considering
-// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and
-// Line fields will always be provided, while the Func field is optional.
-// Users can set the render hook fields in Options to examine logged key-value
-// pairs, one of which will be {"caller", Caller} if the Options.LogCaller
-// field is enabled for the given MessageClass.
-type Caller struct {
- // File is the basename of the file for this call site.
- File string `json:"file"`
- // Line is the line number in the file for this call site.
- Line int `json:"line"`
- // Func is the function name for this call site, or empty if
- // Options.LogCallerFunc is not enabled.
- Func string `json:"function,omitempty"`
-}
-
-func (f Formatter) caller() Caller {
- // +1 for this frame, +1 for Info/Error.
- pc, file, line, ok := runtime.Caller(f.depth + 2)
- if !ok {
- return Caller{"", 0, ""}
- }
- fn := ""
- if f.opts.LogCallerFunc {
- if fp := runtime.FuncForPC(pc); fp != nil {
- fn = fp.Name()
- }
- }
-
- return Caller{filepath.Base(file), line, fn}
-}
-
-const noValue = ""
-
-func (f Formatter) nonStringKey(v any) string {
- return fmt.Sprintf("", f.snippet(v))
-}
-
-// snippet produces a short snippet string of an arbitrary value.
-func (f Formatter) snippet(v any) string {
- const snipLen = 16
-
- snip := f.pretty(v)
- if len(snip) > snipLen {
- snip = snip[:snipLen]
- }
- return snip
-}
-
-// sanitize ensures that a list of key-value pairs has a value for every key
-// (adding a value if needed) and that each key is a string (substituting a key
-// if needed).
-func (f Formatter) sanitize(kvList []any) []any {
- if len(kvList)%2 != 0 {
- kvList = append(kvList, noValue)
- }
- for i := 0; i < len(kvList); i += 2 {
- _, ok := kvList[i].(string)
- if !ok {
- kvList[i] = f.nonStringKey(kvList[i])
- }
- }
- return kvList
-}
-
-// startGroup opens a new group scope (basically a sub-struct), which locks all
-// the current saved values and starts them anew. This is needed to satisfy
-// slog.
-func (f *Formatter) startGroup(name string) {
- // Unnamed groups are just inlined.
- if name == "" {
- return
- }
-
- n := len(f.groups)
- f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr})
-
- // Start collecting new values.
- f.groupName = name
- f.valuesStr = ""
- f.values = nil
-}
-
-// Init configures this Formatter from runtime info, such as the call depth
-// imposed by logr itself.
-// Note that this receiver is a pointer, so depth can be saved.
-func (f *Formatter) Init(info logr.RuntimeInfo) {
- f.depth += info.CallDepth
-}
-
-// Enabled checks whether an info message at the given level should be logged.
-func (f Formatter) Enabled(level int) bool {
- return level <= f.opts.Verbosity
-}
-
-// GetDepth returns the current depth of this Formatter. This is useful for
-// implementations which do their own caller attribution.
-func (f Formatter) GetDepth() int {
- return f.depth
-}
-
-// FormatInfo renders an Info log message into strings. The prefix will be
-// empty when no names were set (via AddNames), or when the output is
-// configured for JSON.
-func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) {
- args := make([]any, 0, 64) // using a constant here impacts perf
- prefix = f.prefix
- if f.outputFormat == outputJSON {
- args = append(args, "logger", prefix)
- prefix = ""
- }
- if f.opts.LogTimestamp {
- args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
- }
- if policy := f.opts.LogCaller; policy == All || policy == Info {
- args = append(args, "caller", f.caller())
- }
- if key := *f.opts.LogInfoLevel; key != "" {
- args = append(args, key, level)
- }
- args = append(args, "msg", msg)
- return prefix, f.render(args, kvList)
-}
-
-// FormatError renders an Error log message into strings. The prefix will be
-// empty when no names were set (via AddNames), or when the output is
-// configured for JSON.
-func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) {
- args := make([]any, 0, 64) // using a constant here impacts perf
- prefix = f.prefix
- if f.outputFormat == outputJSON {
- args = append(args, "logger", prefix)
- prefix = ""
- }
- if f.opts.LogTimestamp {
- args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
- }
- if policy := f.opts.LogCaller; policy == All || policy == Error {
- args = append(args, "caller", f.caller())
- }
- args = append(args, "msg", msg)
- var loggableErr any
- if err != nil {
- loggableErr = err.Error()
- }
- args = append(args, "error", loggableErr)
- return prefix, f.render(args, kvList)
-}
-
-// AddName appends the specified name. funcr uses '/' characters to separate
-// name elements. Callers should not pass '/' in the provided name string, but
-// this library does not actually enforce that.
-func (f *Formatter) AddName(name string) {
- if len(f.prefix) > 0 {
- f.prefix += "/"
- }
- f.prefix += name
-}
-
-// AddValues adds key-value pairs to the set of saved values to be logged with
-// each log line.
-func (f *Formatter) AddValues(kvList []any) {
- // Three slice args forces a copy.
- n := len(f.values)
- f.values = append(f.values[:n:n], kvList...)
-
- vals := f.values
- if hook := f.opts.RenderValuesHook; hook != nil {
- vals = hook(f.sanitize(vals))
- }
-
- // Pre-render values, so we don't have to do it on each Info/Error call.
- buf := bytes.NewBuffer(make([]byte, 0, 1024))
- f.flatten(buf, vals, true) // escape user-provided keys
- f.valuesStr = buf.String()
-}
-
-// AddCallDepth increases the number of stack-frames to skip when attributing
-// the log line to a file and line.
-func (f *Formatter) AddCallDepth(depth int) {
- f.depth += depth
-}
diff --git a/vendor/github.com/go-logr/logr/funcr/slogsink.go b/vendor/github.com/go-logr/logr/funcr/slogsink.go
deleted file mode 100644
index 7bd84761..00000000
--- a/vendor/github.com/go-logr/logr/funcr/slogsink.go
+++ /dev/null
@@ -1,105 +0,0 @@
-//go:build go1.21
-// +build go1.21
-
-/*
-Copyright 2023 The logr Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package funcr
-
-import (
- "context"
- "log/slog"
-
- "github.com/go-logr/logr"
-)
-
-var _ logr.SlogSink = &fnlogger{}
-
-const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink
-
-func (l fnlogger) Handle(_ context.Context, record slog.Record) error {
- kvList := make([]any, 0, 2*record.NumAttrs())
- record.Attrs(func(attr slog.Attr) bool {
- kvList = attrToKVs(attr, kvList)
- return true
- })
-
- if record.Level >= slog.LevelError {
- l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...)
- } else {
- level := l.levelFromSlog(record.Level)
- l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...)
- }
- return nil
-}
-
-func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
- kvList := make([]any, 0, 2*len(attrs))
- for _, attr := range attrs {
- kvList = attrToKVs(attr, kvList)
- }
- l.AddValues(kvList)
- return &l
-}
-
-func (l fnlogger) WithGroup(name string) logr.SlogSink {
- l.startGroup(name)
- return &l
-}
-
-// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
-// and other details of slog.
-func attrToKVs(attr slog.Attr, kvList []any) []any {
- attrVal := attr.Value.Resolve()
- if attrVal.Kind() == slog.KindGroup {
- groupVal := attrVal.Group()
- grpKVs := make([]any, 0, 2*len(groupVal))
- for _, attr := range groupVal {
- grpKVs = attrToKVs(attr, grpKVs)
- }
- if attr.Key == "" {
- // slog says we have to inline these
- kvList = append(kvList, grpKVs...)
- } else {
- kvList = append(kvList, attr.Key, PseudoStruct(grpKVs))
- }
- } else if attr.Key != "" {
- kvList = append(kvList, attr.Key, attrVal.Any())
- }
-
- return kvList
-}
-
-// levelFromSlog adjusts the level by the logger's verbosity and negates it.
-// It ensures that the result is >= 0. This is necessary because the result is
-// passed to a LogSink and that API did not historically document whether
-// levels could be negative or what that meant.
-//
-// Some example usage:
-//
-// logrV0 := getMyLogger()
-// logrV2 := logrV0.V(2)
-// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
-// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
-// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
-// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
-func (l fnlogger) levelFromSlog(level slog.Level) int {
- result := -level
- if result < 0 {
- result = 0 // because LogSink doesn't expect negative V levels
- }
- return int(result)
-}
diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go
deleted file mode 100644
index b4428e10..00000000
--- a/vendor/github.com/go-logr/logr/logr.go
+++ /dev/null
@@ -1,520 +0,0 @@
-/*
-Copyright 2019 The logr Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// This design derives from Dave Cheney's blog:
-// http://dave.cheney.net/2015/11/05/lets-talk-about-logging
-
-// Package logr defines a general-purpose logging API and abstract interfaces
-// to back that API. Packages in the Go ecosystem can depend on this package,
-// while callers can implement logging with whatever backend is appropriate.
-//
-// # Usage
-//
-// Logging is done using a Logger instance. Logger is a concrete type with
-// methods, which defers the actual logging to a LogSink interface. The main
-// methods of Logger are Info() and Error(). Arguments to Info() and Error()
-// are key/value pairs rather than printf-style formatted strings, emphasizing
-// "structured logging".
-//
-// With Go's standard log package, we might write:
-//
-// log.Printf("setting target value %s", targetValue)
-//
-// With logr's structured logging, we'd write:
-//
-// logger.Info("setting target", "value", targetValue)
-//
-// Errors are much the same. Instead of:
-//
-// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
-//
-// We'd write:
-//
-// logger.Error(err, "failed to open the pod bay door", "user", user)
-//
-// Info() and Error() are very similar, but they are separate methods so that
-// LogSink implementations can choose to do things like attach additional
-// information (such as stack traces) on calls to Error(). Error() messages are
-// always logged, regardless of the current verbosity. If there is no error
-// instance available, passing nil is valid.
-//
-// # Verbosity
-//
-// Often we want to log information only when the application in "verbose
-// mode". To write log lines that are more verbose, Logger has a V() method.
-// The higher the V-level of a log line, the less critical it is considered.
-// Log-lines with V-levels that are not enabled (as per the LogSink) will not
-// be written. Level V(0) is the default, and logger.V(0).Info() has the same
-// meaning as logger.Info(). Negative V-levels have the same meaning as V(0).
-// Error messages do not have a verbosity level and are always logged.
-//
-// Where we might have written:
-//
-// if flVerbose >= 2 {
-// log.Printf("an unusual thing happened")
-// }
-//
-// We can write:
-//
-// logger.V(2).Info("an unusual thing happened")
-//
-// # Logger Names
-//
-// Logger instances can have name strings so that all messages logged through
-// that instance have additional context. For example, you might want to add
-// a subsystem name:
-//
-// logger.WithName("compactor").Info("started", "time", time.Now())
-//
-// The WithName() method returns a new Logger, which can be passed to
-// constructors or other functions for further use. Repeated use of WithName()
-// will accumulate name "segments". These name segments will be joined in some
-// way by the LogSink implementation. It is strongly recommended that name
-// segments contain simple identifiers (letters, digits, and hyphen), and do
-// not contain characters that could muddle the log output or confuse the
-// joining operation (e.g. whitespace, commas, periods, slashes, brackets,
-// quotes, etc).
-//
-// # Saved Values
-//
-// Logger instances can store any number of key/value pairs, which will be
-// logged alongside all messages logged through that instance. For example,
-// you might want to create a Logger instance per managed object:
-//
-// With the standard log package, we might write:
-//
-// log.Printf("decided to set field foo to value %q for object %s/%s",
-// targetValue, object.Namespace, object.Name)
-//
-// With logr we'd write:
-//
-// // Elsewhere: set up the logger to log the object name.
-// obj.logger = mainLogger.WithValues(
-// "name", obj.name, "namespace", obj.namespace)
-//
-// // later on...
-// obj.logger.Info("setting foo", "value", targetValue)
-//
-// # Best Practices
-//
-// Logger has very few hard rules, with the goal that LogSink implementations
-// might have a lot of freedom to differentiate. There are, however, some
-// things to consider.
-//
-// The log message consists of a constant message attached to the log line.
-// This should generally be a simple description of what's occurring, and should
-// never be a format string. Variable information can then be attached using
-// named values.
-//
-// Keys are arbitrary strings, but should generally be constant values. Values
-// may be any Go value, but how the value is formatted is determined by the
-// LogSink implementation.
-//
-// Logger instances are meant to be passed around by value. Code that receives
-// such a value can call its methods without having to check whether the
-// instance is ready for use.
-//
-// The zero logger (= Logger{}) is identical to Discard() and discards all log
-// entries. Code that receives a Logger by value can simply call it, the methods
-// will never crash. For cases where passing a logger is optional, a pointer to Logger
-// should be used.
-//
-// # Key Naming Conventions
-//
-// Keys are not strictly required to conform to any specification or regex, but
-// it is recommended that they:
-// - be human-readable and meaningful (not auto-generated or simple ordinals)
-// - be constant (not dependent on input data)
-// - contain only printable characters
-// - not contain whitespace or punctuation
-// - use lower case for simple keys and lowerCamelCase for more complex ones
-//
-// These guidelines help ensure that log data is processed properly regardless
-// of the log implementation. For example, log implementations will try to
-// output JSON data or will store data for later database (e.g. SQL) queries.
-//
-// While users are generally free to use key names of their choice, it's
-// generally best to avoid using the following keys, as they're frequently used
-// by implementations:
-// - "caller": the calling information (file/line) of a particular log line
-// - "error": the underlying error value in the `Error` method
-// - "level": the log level
-// - "logger": the name of the associated logger
-// - "msg": the log message
-// - "stacktrace": the stack trace associated with a particular log line or
-// error (often from the `Error` message)
-// - "ts": the timestamp for a log line
-//
-// Implementations are encouraged to make use of these keys to represent the
-// above concepts, when necessary (for example, in a pure-JSON output form, it
-// would be necessary to represent at least message and timestamp as ordinary
-// named values).
-//
-// # Break Glass
-//
-// Implementations may choose to give callers access to the underlying
-// logging implementation. The recommended pattern for this is:
-//
-// // Underlier exposes access to the underlying logging implementation.
-// // Since callers only have a logr.Logger, they have to know which
-// // implementation is in use, so this interface is less of an abstraction
-// // and more of way to test type conversion.
-// type Underlier interface {
-// GetUnderlying()
-// }
-//
-// Logger grants access to the sink to enable type assertions like this:
-//
-// func DoSomethingWithImpl(log logr.Logger) {
-// if underlier, ok := log.GetSink().(impl.Underlier); ok {
-// implLogger := underlier.GetUnderlying()
-// ...
-// }
-// }
-//
-// Custom `With*` functions can be implemented by copying the complete
-// Logger struct and replacing the sink in the copy:
-//
-// // WithFooBar changes the foobar parameter in the log sink and returns a
-// // new logger with that modified sink. It does nothing for loggers where
-// // the sink doesn't support that parameter.
-// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
-// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok {
-// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
-// }
-// return log
-// }
-//
-// Don't use New to construct a new Logger with a LogSink retrieved from an
-// existing Logger. Source code attribution might not work correctly and
-// unexported fields in Logger get lost.
-//
-// Beware that the same LogSink instance may be shared by different logger
-// instances. Calling functions that modify the LogSink will affect all of
-// those.
-package logr
-
-// New returns a new Logger instance. This is primarily used by libraries
-// implementing LogSink, rather than end users. Passing a nil sink will create
-// a Logger which discards all log lines.
-func New(sink LogSink) Logger {
- logger := Logger{}
- logger.setSink(sink)
- if sink != nil {
- sink.Init(runtimeInfo)
- }
- return logger
-}
-
-// setSink stores the sink and updates any related fields. It mutates the
-// logger and thus is only safe to use for loggers that are not currently being
-// used concurrently.
-func (l *Logger) setSink(sink LogSink) {
- l.sink = sink
-}
-
-// GetSink returns the stored sink.
-func (l Logger) GetSink() LogSink {
- return l.sink
-}
-
-// WithSink returns a copy of the logger with the new sink.
-func (l Logger) WithSink(sink LogSink) Logger {
- l.setSink(sink)
- return l
-}
-
-// Logger is an interface to an abstract logging implementation. This is a
-// concrete type for performance reasons, but all the real work is passed on to
-// a LogSink. Implementations of LogSink should provide their own constructors
-// that return Logger, not LogSink.
-//
-// The underlying sink can be accessed through GetSink and be modified through
-// WithSink. This enables the implementation of custom extensions (see "Break
-// Glass" in the package documentation). Normally the sink should be used only
-// indirectly.
-type Logger struct {
- sink LogSink
- level int
-}
-
-// Enabled tests whether this Logger is enabled. For example, commandline
-// flags might be used to set the logging verbosity and disable some info logs.
-func (l Logger) Enabled() bool {
- // Some implementations of LogSink look at the caller in Enabled (e.g.
- // different verbosity levels per package or file), but we only pass one
- // CallDepth in (via Init). This means that all calls from Logger to the
- // LogSink's Enabled, Info, and Error methods must have the same number of
- // frames. In other words, Logger methods can't call other Logger methods
- // which call these LogSink methods unless we do it the same in all paths.
- return l.sink != nil && l.sink.Enabled(l.level)
-}
-
-// Info logs a non-error message with the given key/value pairs as context.
-//
-// The msg argument should be used to add some constant description to the log
-// line. The key/value pairs can then be used to add additional variable
-// information. The key/value pairs must alternate string keys and arbitrary
-// values.
-func (l Logger) Info(msg string, keysAndValues ...any) {
- if l.sink == nil {
- return
- }
- if l.sink.Enabled(l.level) { // see comment in Enabled
- if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
- withHelper.GetCallStackHelper()()
- }
- l.sink.Info(l.level, msg, keysAndValues...)
- }
-}
-
-// Error logs an error, with the given message and key/value pairs as context.
-// It functions similarly to Info, but may have unique behavior, and should be
-// preferred for logging errors (see the package documentations for more
-// information). The log message will always be emitted, regardless of
-// verbosity level.
-//
-// The msg argument should be used to add context to any underlying error,
-// while the err argument should be used to attach the actual error that
-// triggered this log line, if present. The err parameter is optional
-// and nil may be passed instead of an error instance.
-func (l Logger) Error(err error, msg string, keysAndValues ...any) {
- if l.sink == nil {
- return
- }
- if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
- withHelper.GetCallStackHelper()()
- }
- l.sink.Error(err, msg, keysAndValues...)
-}
-
-// V returns a new Logger instance for a specific verbosity level, relative to
-// this Logger. In other words, V-levels are additive. A higher verbosity
-// level means a log message is less important. Negative V-levels are treated
-// as 0.
-func (l Logger) V(level int) Logger {
- if l.sink == nil {
- return l
- }
- if level < 0 {
- level = 0
- }
- l.level += level
- return l
-}
-
-// GetV returns the verbosity level of the logger. If the logger's LogSink is
-// nil as in the Discard logger, this will always return 0.
-func (l Logger) GetV() int {
- // 0 if l.sink nil because of the if check in V above.
- return l.level
-}
-
-// WithValues returns a new Logger instance with additional key/value pairs.
-// See Info for documentation on how key/value pairs work.
-func (l Logger) WithValues(keysAndValues ...any) Logger {
- if l.sink == nil {
- return l
- }
- l.setSink(l.sink.WithValues(keysAndValues...))
- return l
-}
-
-// WithName returns a new Logger instance with the specified name element added
-// to the Logger's name. Successive calls with WithName append additional
-// suffixes to the Logger's name. It's strongly recommended that name segments
-// contain only letters, digits, and hyphens (see the package documentation for
-// more information).
-func (l Logger) WithName(name string) Logger {
- if l.sink == nil {
- return l
- }
- l.setSink(l.sink.WithName(name))
- return l
-}
-
-// WithCallDepth returns a Logger instance that offsets the call stack by the
-// specified number of frames when logging call site information, if possible.
-// This is useful for users who have helper functions between the "real" call
-// site and the actual calls to Logger methods. If depth is 0 the attribution
-// should be to the direct caller of this function. If depth is 1 the
-// attribution should skip 1 call frame, and so on. Successive calls to this
-// are additive.
-//
-// If the underlying log implementation supports a WithCallDepth(int) method,
-// it will be called and the result returned. If the implementation does not
-// support CallDepthLogSink, the original Logger will be returned.
-//
-// To skip one level, WithCallStackHelper() should be used instead of
-// WithCallDepth(1) because it works with implementions that support the
-// CallDepthLogSink and/or CallStackHelperLogSink interfaces.
-func (l Logger) WithCallDepth(depth int) Logger {
- if l.sink == nil {
- return l
- }
- if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
- l.setSink(withCallDepth.WithCallDepth(depth))
- }
- return l
-}
-
-// WithCallStackHelper returns a new Logger instance that skips the direct
-// caller when logging call site information, if possible. This is useful for
-// users who have helper functions between the "real" call site and the actual
-// calls to Logger methods and want to support loggers which depend on marking
-// each individual helper function, like loggers based on testing.T.
-//
-// In addition to using that new logger instance, callers also must call the
-// returned function.
-//
-// If the underlying log implementation supports a WithCallDepth(int) method,
-// WithCallDepth(1) will be called to produce a new logger. If it supports a
-// WithCallStackHelper() method, that will be also called. If the
-// implementation does not support either of these, the original Logger will be
-// returned.
-func (l Logger) WithCallStackHelper() (func(), Logger) {
- if l.sink == nil {
- return func() {}, l
- }
- var helper func()
- if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
- l.setSink(withCallDepth.WithCallDepth(1))
- }
- if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
- helper = withHelper.GetCallStackHelper()
- } else {
- helper = func() {}
- }
- return helper, l
-}
-
-// IsZero returns true if this logger is an uninitialized zero value
-func (l Logger) IsZero() bool {
- return l.sink == nil
-}
-
-// RuntimeInfo holds information that the logr "core" library knows which
-// LogSinks might want to know.
-type RuntimeInfo struct {
- // CallDepth is the number of call frames the logr library adds between the
- // end-user and the LogSink. LogSink implementations which choose to print
- // the original logging site (e.g. file & line) should climb this many
- // additional frames to find it.
- CallDepth int
-}
-
-// runtimeInfo is a static global. It must not be changed at run time.
-var runtimeInfo = RuntimeInfo{
- CallDepth: 1,
-}
-
-// LogSink represents a logging implementation. End-users will generally not
-// interact with this type.
-type LogSink interface {
- // Init receives optional information about the logr library for LogSink
- // implementations that need it.
- Init(info RuntimeInfo)
-
- // Enabled tests whether this LogSink is enabled at the specified V-level.
- // For example, commandline flags might be used to set the logging
- // verbosity and disable some info logs.
- Enabled(level int) bool
-
- // Info logs a non-error message with the given key/value pairs as context.
- // The level argument is provided for optional logging. This method will
- // only be called when Enabled(level) is true. See Logger.Info for more
- // details.
- Info(level int, msg string, keysAndValues ...any)
-
- // Error logs an error, with the given message and key/value pairs as
- // context. See Logger.Error for more details.
- Error(err error, msg string, keysAndValues ...any)
-
- // WithValues returns a new LogSink with additional key/value pairs. See
- // Logger.WithValues for more details.
- WithValues(keysAndValues ...any) LogSink
-
- // WithName returns a new LogSink with the specified name appended. See
- // Logger.WithName for more details.
- WithName(name string) LogSink
-}
-
-// CallDepthLogSink represents a LogSink that knows how to climb the call stack
-// to identify the original call site and can offset the depth by a specified
-// number of frames. This is useful for users who have helper functions
-// between the "real" call site and the actual calls to Logger methods.
-// Implementations that log information about the call site (such as file,
-// function, or line) would otherwise log information about the intermediate
-// helper functions.
-//
-// This is an optional interface and implementations are not required to
-// support it.
-type CallDepthLogSink interface {
- // WithCallDepth returns a LogSink that will offset the call
- // stack by the specified number of frames when logging call
- // site information.
- //
- // If depth is 0, the LogSink should skip exactly the number
- // of call frames defined in RuntimeInfo.CallDepth when Info
- // or Error are called, i.e. the attribution should be to the
- // direct caller of Logger.Info or Logger.Error.
- //
- // If depth is 1 the attribution should skip 1 call frame, and so on.
- // Successive calls to this are additive.
- WithCallDepth(depth int) LogSink
-}
-
-// CallStackHelperLogSink represents a LogSink that knows how to climb
-// the call stack to identify the original call site and can skip
-// intermediate helper functions if they mark themselves as
-// helper. Go's testing package uses that approach.
-//
-// This is useful for users who have helper functions between the
-// "real" call site and the actual calls to Logger methods.
-// Implementations that log information about the call site (such as
-// file, function, or line) would otherwise log information about the
-// intermediate helper functions.
-//
-// This is an optional interface and implementations are not required
-// to support it. Implementations that choose to support this must not
-// simply implement it as WithCallDepth(1), because
-// Logger.WithCallStackHelper will call both methods if they are
-// present. This should only be implemented for LogSinks that actually
-// need it, as with testing.T.
-type CallStackHelperLogSink interface {
- // GetCallStackHelper returns a function that must be called
- // to mark the direct caller as helper function when logging
- // call site information.
- GetCallStackHelper() func()
-}
-
-// Marshaler is an optional interface that logged values may choose to
-// implement. Loggers with structured output, such as JSON, should
-// log the object return by the MarshalLog method instead of the
-// original value.
-type Marshaler interface {
- // MarshalLog can be used to:
- // - ensure that structs are not logged as strings when the original
- // value has a String method: return a different type without a
- // String method
- // - select which fields of a complex type should get logged:
- // return a simpler struct with fewer fields
- // - log unexported fields: return a different struct
- // with exported fields
- //
- // It may return any value of any type.
- MarshalLog() any
-}
diff --git a/vendor/github.com/go-logr/logr/sloghandler.go b/vendor/github.com/go-logr/logr/sloghandler.go
deleted file mode 100644
index 82d1ba49..00000000
--- a/vendor/github.com/go-logr/logr/sloghandler.go
+++ /dev/null
@@ -1,192 +0,0 @@
-//go:build go1.21
-// +build go1.21
-
-/*
-Copyright 2023 The logr Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package logr
-
-import (
- "context"
- "log/slog"
-)
-
-type slogHandler struct {
- // May be nil, in which case all logs get discarded.
- sink LogSink
- // Non-nil if sink is non-nil and implements SlogSink.
- slogSink SlogSink
-
- // groupPrefix collects values from WithGroup calls. It gets added as
- // prefix to value keys when handling a log record.
- groupPrefix string
-
- // levelBias can be set when constructing the handler to influence the
- // slog.Level of log records. A positive levelBias reduces the
- // slog.Level value. slog has no API to influence this value after the
- // handler got created, so it can only be set indirectly through
- // Logger.V.
- levelBias slog.Level
-}
-
-var _ slog.Handler = &slogHandler{}
-
-// groupSeparator is used to concatenate WithGroup names and attribute keys.
-const groupSeparator = "."
-
-// GetLevel is used for black box unit testing.
-func (l *slogHandler) GetLevel() slog.Level {
- return l.levelBias
-}
-
-func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool {
- return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level)))
-}
-
-func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error {
- if l.slogSink != nil {
- // Only adjust verbosity level of log entries < slog.LevelError.
- if record.Level < slog.LevelError {
- record.Level -= l.levelBias
- }
- return l.slogSink.Handle(ctx, record)
- }
-
- // No need to check for nil sink here because Handle will only be called
- // when Enabled returned true.
-
- kvList := make([]any, 0, 2*record.NumAttrs())
- record.Attrs(func(attr slog.Attr) bool {
- kvList = attrToKVs(attr, l.groupPrefix, kvList)
- return true
- })
- if record.Level >= slog.LevelError {
- l.sinkWithCallDepth().Error(nil, record.Message, kvList...)
- } else {
- level := l.levelFromSlog(record.Level)
- l.sinkWithCallDepth().Info(level, record.Message, kvList...)
- }
- return nil
-}
-
-// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info
-// are called by Handle, code in slog gets skipped.
-//
-// This offset currently (Go 1.21.0) works for calls through
-// slog.New(ToSlogHandler(...)). There's no guarantee that the call
-// chain won't change. Wrapping the handler will also break unwinding. It's
-// still better than not adjusting at all....
-//
-// This cannot be done when constructing the handler because FromSlogHandler needs
-// access to the original sink without this adjustment. A second copy would
-// work, but then WithAttrs would have to be called for both of them.
-func (l *slogHandler) sinkWithCallDepth() LogSink {
- if sink, ok := l.sink.(CallDepthLogSink); ok {
- return sink.WithCallDepth(2)
- }
- return l.sink
-}
-
-func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
- if l.sink == nil || len(attrs) == 0 {
- return l
- }
-
- clone := *l
- if l.slogSink != nil {
- clone.slogSink = l.slogSink.WithAttrs(attrs)
- clone.sink = clone.slogSink
- } else {
- kvList := make([]any, 0, 2*len(attrs))
- for _, attr := range attrs {
- kvList = attrToKVs(attr, l.groupPrefix, kvList)
- }
- clone.sink = l.sink.WithValues(kvList...)
- }
- return &clone
-}
-
-func (l *slogHandler) WithGroup(name string) slog.Handler {
- if l.sink == nil {
- return l
- }
- if name == "" {
- // slog says to inline empty groups
- return l
- }
- clone := *l
- if l.slogSink != nil {
- clone.slogSink = l.slogSink.WithGroup(name)
- clone.sink = clone.slogSink
- } else {
- clone.groupPrefix = addPrefix(clone.groupPrefix, name)
- }
- return &clone
-}
-
-// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
-// and other details of slog.
-func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any {
- attrVal := attr.Value.Resolve()
- if attrVal.Kind() == slog.KindGroup {
- groupVal := attrVal.Group()
- grpKVs := make([]any, 0, 2*len(groupVal))
- prefix := groupPrefix
- if attr.Key != "" {
- prefix = addPrefix(groupPrefix, attr.Key)
- }
- for _, attr := range groupVal {
- grpKVs = attrToKVs(attr, prefix, grpKVs)
- }
- kvList = append(kvList, grpKVs...)
- } else if attr.Key != "" {
- kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any())
- }
-
- return kvList
-}
-
-func addPrefix(prefix, name string) string {
- if prefix == "" {
- return name
- }
- if name == "" {
- return prefix
- }
- return prefix + groupSeparator + name
-}
-
-// levelFromSlog adjusts the level by the logger's verbosity and negates it.
-// It ensures that the result is >= 0. This is necessary because the result is
-// passed to a LogSink and that API did not historically document whether
-// levels could be negative or what that meant.
-//
-// Some example usage:
-//
-// logrV0 := getMyLogger()
-// logrV2 := logrV0.V(2)
-// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
-// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
-// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
-// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
-func (l *slogHandler) levelFromSlog(level slog.Level) int {
- result := -level
- result += l.levelBias // in case the original Logger had a V level
- if result < 0 {
- result = 0 // because LogSink doesn't expect negative V levels
- }
- return int(result)
-}
diff --git a/vendor/github.com/go-logr/logr/slogr.go b/vendor/github.com/go-logr/logr/slogr.go
deleted file mode 100644
index 28a83d02..00000000
--- a/vendor/github.com/go-logr/logr/slogr.go
+++ /dev/null
@@ -1,100 +0,0 @@
-//go:build go1.21
-// +build go1.21
-
-/*
-Copyright 2023 The logr Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package logr
-
-import (
- "context"
- "log/slog"
-)
-
-// FromSlogHandler returns a Logger which writes to the slog.Handler.
-//
-// The logr verbosity level is mapped to slog levels such that V(0) becomes
-// slog.LevelInfo and V(4) becomes slog.LevelDebug.
-func FromSlogHandler(handler slog.Handler) Logger {
- if handler, ok := handler.(*slogHandler); ok {
- if handler.sink == nil {
- return Discard()
- }
- return New(handler.sink).V(int(handler.levelBias))
- }
- return New(&slogSink{handler: handler})
-}
-
-// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger.
-//
-// The returned logger writes all records with level >= slog.LevelError as
-// error log entries with LogSink.Error, regardless of the verbosity level of
-// the Logger:
-//
-// logger :=
-// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...)
-//
-// The level of all other records gets reduced by the verbosity
-// level of the Logger and the result is negated. If it happens
-// to be negative, then it gets replaced by zero because a LogSink
-// is not expected to handled negative levels:
-//
-// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...)
-// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...)
-// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...)
-// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...)
-func ToSlogHandler(logger Logger) slog.Handler {
- if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 {
- return sink.handler
- }
-
- handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())}
- if slogSink, ok := handler.sink.(SlogSink); ok {
- handler.slogSink = slogSink
- }
- return handler
-}
-
-// SlogSink is an optional interface that a LogSink can implement to support
-// logging through the slog.Logger or slog.Handler APIs better. It then should
-// also support special slog values like slog.Group. When used as a
-// slog.Handler, the advantages are:
-//
-// - stack unwinding gets avoided in favor of logging the pre-recorded PC,
-// as intended by slog
-// - proper grouping of key/value pairs via WithGroup
-// - verbosity levels > slog.LevelInfo can be recorded
-// - less overhead
-//
-// Both APIs (Logger and slog.Logger/Handler) then are supported equally
-// well. Developers can pick whatever API suits them better and/or mix
-// packages which use either API in the same binary with a common logging
-// implementation.
-//
-// This interface is necessary because the type implementing the LogSink
-// interface cannot also implement the slog.Handler interface due to the
-// different prototype of the common Enabled method.
-//
-// An implementation could support both interfaces in two different types, but then
-// additional interfaces would be needed to convert between those types in FromSlogHandler
-// and ToSlogHandler.
-type SlogSink interface {
- LogSink
-
- Handle(ctx context.Context, record slog.Record) error
- WithAttrs(attrs []slog.Attr) SlogSink
- WithGroup(name string) SlogSink
-}
diff --git a/vendor/github.com/go-logr/logr/slogsink.go b/vendor/github.com/go-logr/logr/slogsink.go
deleted file mode 100644
index 4060fcbc..00000000
--- a/vendor/github.com/go-logr/logr/slogsink.go
+++ /dev/null
@@ -1,120 +0,0 @@
-//go:build go1.21
-// +build go1.21
-
-/*
-Copyright 2023 The logr Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package logr
-
-import (
- "context"
- "log/slog"
- "runtime"
- "time"
-)
-
-var (
- _ LogSink = &slogSink{}
- _ CallDepthLogSink = &slogSink{}
- _ Underlier = &slogSink{}
-)
-
-// Underlier is implemented by the LogSink returned by NewFromLogHandler.
-type Underlier interface {
- // GetUnderlying returns the Handler used by the LogSink.
- GetUnderlying() slog.Handler
-}
-
-const (
- // nameKey is used to log the `WithName` values as an additional attribute.
- nameKey = "logger"
-
- // errKey is used to log the error parameter of Error as an additional attribute.
- errKey = "err"
-)
-
-type slogSink struct {
- callDepth int
- name string
- handler slog.Handler
-}
-
-func (l *slogSink) Init(info RuntimeInfo) {
- l.callDepth = info.CallDepth
-}
-
-func (l *slogSink) GetUnderlying() slog.Handler {
- return l.handler
-}
-
-func (l *slogSink) WithCallDepth(depth int) LogSink {
- newLogger := *l
- newLogger.callDepth += depth
- return &newLogger
-}
-
-func (l *slogSink) Enabled(level int) bool {
- return l.handler.Enabled(context.Background(), slog.Level(-level))
-}
-
-func (l *slogSink) Info(level int, msg string, kvList ...interface{}) {
- l.log(nil, msg, slog.Level(-level), kvList...)
-}
-
-func (l *slogSink) Error(err error, msg string, kvList ...interface{}) {
- l.log(err, msg, slog.LevelError, kvList...)
-}
-
-func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) {
- var pcs [1]uintptr
- // skip runtime.Callers, this function, Info/Error, and all helper functions above that.
- runtime.Callers(3+l.callDepth, pcs[:])
-
- record := slog.NewRecord(time.Now(), level, msg, pcs[0])
- if l.name != "" {
- record.AddAttrs(slog.String(nameKey, l.name))
- }
- if err != nil {
- record.AddAttrs(slog.Any(errKey, err))
- }
- record.Add(kvList...)
- _ = l.handler.Handle(context.Background(), record)
-}
-
-func (l slogSink) WithName(name string) LogSink {
- if l.name != "" {
- l.name += "/"
- }
- l.name += name
- return &l
-}
-
-func (l slogSink) WithValues(kvList ...interface{}) LogSink {
- l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...))
- return &l
-}
-
-func kvListToAttrs(kvList ...interface{}) []slog.Attr {
- // We don't need the record itself, only its Add method.
- record := slog.NewRecord(time.Time{}, 0, "", 0)
- record.Add(kvList...)
- attrs := make([]slog.Attr, 0, record.NumAttrs())
- record.Attrs(func(attr slog.Attr) bool {
- attrs = append(attrs, attr)
- return true
- })
- return attrs
-}
diff --git a/vendor/github.com/go-logr/stdr/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE
deleted file mode 100644
index 261eeb9e..00000000
--- a/vendor/github.com/go-logr/stdr/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md
deleted file mode 100644
index 51586678..00000000
--- a/vendor/github.com/go-logr/stdr/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-# Minimal Go logging using logr and Go's standard library
-
-[](https://pkg.go.dev/github.com/go-logr/stdr)
-
-This package implements the [logr interface](https://github.com/go-logr/logr)
-in terms of Go's standard log package(https://pkg.go.dev/log).
diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go
deleted file mode 100644
index 93a8aab5..00000000
--- a/vendor/github.com/go-logr/stdr/stdr.go
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
-Copyright 2019 The logr Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package stdr implements github.com/go-logr/logr.Logger in terms of
-// Go's standard log package.
-package stdr
-
-import (
- "log"
- "os"
-
- "github.com/go-logr/logr"
- "github.com/go-logr/logr/funcr"
-)
-
-// The global verbosity level. See SetVerbosity().
-var globalVerbosity int
-
-// SetVerbosity sets the global level against which all info logs will be
-// compared. If this is greater than or equal to the "V" of the logger, the
-// message will be logged. A higher value here means more logs will be written.
-// The previous verbosity value is returned. This is not concurrent-safe -
-// callers must be sure to call it from only one goroutine.
-func SetVerbosity(v int) int {
- old := globalVerbosity
- globalVerbosity = v
- return old
-}
-
-// New returns a logr.Logger which is implemented by Go's standard log package,
-// or something like it. If std is nil, this will use a default logger
-// instead.
-//
-// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
-func New(std StdLogger) logr.Logger {
- return NewWithOptions(std, Options{})
-}
-
-// NewWithOptions returns a logr.Logger which is implemented by Go's standard
-// log package, or something like it. See New for details.
-func NewWithOptions(std StdLogger, opts Options) logr.Logger {
- if std == nil {
- // Go's log.Default() is only available in 1.16 and higher.
- std = log.New(os.Stderr, "", log.LstdFlags)
- }
-
- if opts.Depth < 0 {
- opts.Depth = 0
- }
-
- fopts := funcr.Options{
- LogCaller: funcr.MessageClass(opts.LogCaller),
- }
-
- sl := &logger{
- Formatter: funcr.NewFormatter(fopts),
- std: std,
- }
-
- // For skipping our own logger.Info/Error.
- sl.Formatter.AddCallDepth(1 + opts.Depth)
-
- return logr.New(sl)
-}
-
-// Options carries parameters which influence the way logs are generated.
-type Options struct {
- // Depth biases the assumed number of call frames to the "true" caller.
- // This is useful when the calling code calls a function which then calls
- // stdr (e.g. a logging shim to another API). Values less than zero will
- // be treated as zero.
- Depth int
-
- // LogCaller tells stdr to add a "caller" key to some or all log lines.
- // Go's log package has options to log this natively, too.
- LogCaller MessageClass
-
- // TODO: add an option to log the date/time
-}
-
-// MessageClass indicates which category or categories of messages to consider.
-type MessageClass int
-
-const (
- // None ignores all message classes.
- None MessageClass = iota
- // All considers all message classes.
- All
- // Info only considers info messages.
- Info
- // Error only considers error messages.
- Error
-)
-
-// StdLogger is the subset of the Go stdlib log.Logger API that is needed for
-// this adapter.
-type StdLogger interface {
- // Output is the same as log.Output and log.Logger.Output.
- Output(calldepth int, logline string) error
-}
-
-type logger struct {
- funcr.Formatter
- std StdLogger
-}
-
-var _ logr.LogSink = &logger{}
-var _ logr.CallDepthLogSink = &logger{}
-
-func (l logger) Enabled(level int) bool {
- return globalVerbosity >= level
-}
-
-func (l logger) Info(level int, msg string, kvList ...interface{}) {
- prefix, args := l.FormatInfo(level, msg, kvList)
- if prefix != "" {
- args = prefix + ": " + args
- }
- _ = l.std.Output(l.Formatter.GetDepth()+1, args)
-}
-
-func (l logger) Error(err error, msg string, kvList ...interface{}) {
- prefix, args := l.FormatError(err, msg, kvList)
- if prefix != "" {
- args = prefix + ": " + args
- }
- _ = l.std.Output(l.Formatter.GetDepth()+1, args)
-}
-
-func (l logger) WithName(name string) logr.LogSink {
- l.Formatter.AddName(name)
- return &l
-}
-
-func (l logger) WithValues(kvList ...interface{}) logr.LogSink {
- l.Formatter.AddValues(kvList)
- return &l
-}
-
-func (l logger) WithCallDepth(depth int) logr.LogSink {
- l.Formatter.AddCallDepth(depth)
- return &l
-}
-
-// Underlier exposes access to the underlying logging implementation. Since
-// callers only have a logr.Logger, they have to know which implementation is
-// in use, so this interface is less of an abstraction and more of way to test
-// type conversion.
-type Underlier interface {
- GetUnderlying() StdLogger
-}
-
-// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger
-// is itself an interface, the result may or may not be a Go log.Logger.
-func (l logger) GetUnderlying() StdLogger {
- return l.std
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/.gitignore b/vendor/github.com/go-sql-driver/mysql/.gitignore
deleted file mode 100644
index 2de28da1..00000000
--- a/vendor/github.com/go-sql-driver/mysql/.gitignore
+++ /dev/null
@@ -1,9 +0,0 @@
-.DS_Store
-.DS_Store?
-._*
-.Spotlight-V100
-.Trashes
-Icon?
-ehthumbs.db
-Thumbs.db
-.idea
diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS
deleted file mode 100644
index 4021b96c..00000000
--- a/vendor/github.com/go-sql-driver/mysql/AUTHORS
+++ /dev/null
@@ -1,142 +0,0 @@
-# This is the official list of Go-MySQL-Driver authors for copyright purposes.
-
-# If you are submitting a patch, please add your name or the name of the
-# organization which holds the copyright to this list in alphabetical order.
-
-# Names should be added to this file as
-# Name
-# The email address is not required for organizations.
-# Please keep the list sorted.
-
-
-# Individual Persons
-
-Aaron Hopkins
-Achille Roussel
-Aidan
-Alex Snast
-Alexey Palazhchenko
-Andrew Reid
-Animesh Ray
-Arne Hormann
-Ariel Mashraki
-Asta Xie
-Brian Hendriks
-Bulat Gaifullin
-Caine Jette
-Carlos Nieto
-Chris Kirkland
-Chris Moos
-Craig Wilson
-Daemonxiao <735462752 at qq.com>
-Daniel Montoya
-Daniel Nichter
-Daniël van Eeden
-Dave Protasowski
-DisposaBoy
-Egor Smolyakov
-Erwan Martin
-Evan Elias
-Evan Shaw
-Frederick Mayle
-Gustavo Kristic
-Gusted
-Hajime Nakagami
-Hanno Braun
-Henri Yandell
-Hirotaka Yamamoto
-Huyiguang
-ICHINOSE Shogo
-Ilia Cimpoes
-INADA Naoki
-Jacek Szwec
-James Harr
-Janek Vedock
-Jason Ng
-Jean-Yves Pellé
-Jeff Hodges
-Jeffrey Charles
-Jennifer Purevsuren
-Jerome Meyer
-Jiajia Zhong
-Jian Zhen
-Joshua Prunier
-Julien Lefevre
-Julien Schmidt
-Justin Li
-Justin Nuß
-Kamil Dziedzic
-Kei Kamikawa
-Kevin Malachowski
-Kieron Woodhouse
-Lance Tian
-Lennart Rudolph
-Leonardo YongUk Kim
-Linh Tran Tuan
-Lion Yang
-Luca Looz
-Lucas Liu
-Lunny Xiao
-Luke Scott
-Maciej Zimnoch
-Michael Woolnough
-Nathanial Murphy
-Nicola Peduzzi
-Oliver Bone
-Olivier Mengué
-oscarzhao
-Paul Bonser
-Paulius Lozys
-Peter Schultz
-Phil Porada
-Rebecca Chin
-Reed Allman
-Richard Wilkes
-Robert Russell
-Runrioter Wung
-Samantha Frank
-Santhosh Kumar Tekuri
-Sho Iizuka
-Sho Ikeda
-Shuode Li
-Simon J Mudd
-Soroush Pour
-Stan Putrya
-Stanley Gunawan
-Steven Hartland
-Tan Jinhua <312841925 at qq.com>
-Tetsuro Aoki
-Thomas Wodarek
-Tim Ruffles
-Tom Jenkinson
-Vladimir Kovpak
-Vladyslav Zhelezniak
-Xiangyu Hu
-Xiaobing Jiang
-Xiuming Chen
-Xuehong Chan
-Zhang Xiang
-Zhenye Xie
-Zhixin Wen
-Ziheng Lyu
-
-# Organizations
-
-Barracuda Networks, Inc.
-Counting Ltd.
-DigitalOcean Inc.
-Dolthub Inc.
-dyves labs AG
-Facebook Inc.
-GitHub Inc.
-Google Inc.
-InfoSum Ltd.
-Keybase Inc.
-Microsoft Corp.
-Multiplay Ltd.
-Percona LLC
-PingCAP Inc.
-Pivotal Inc.
-Shattered Silicon Ltd.
-Stripe Inc.
-Zendesk Inc.
diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
deleted file mode 100644
index 0c9bd9b1..00000000
--- a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md
+++ /dev/null
@@ -1,308 +0,0 @@
-## Version 1.8.1 (2024-03-26)
-
-Bugfixes:
-
-- fix race condition when context is canceled in [#1562](https://github.com/go-sql-driver/mysql/pull/1562) and [#1570](https://github.com/go-sql-driver/mysql/pull/1570)
-
-## Version 1.8.0 (2024-03-09)
-
-Major Changes:
-
-- Use `SET NAMES charset COLLATE collation`. by @methane in [#1437](https://github.com/go-sql-driver/mysql/pull/1437)
- - Older go-mysql-driver used `collation_id` in the handshake packet. But it caused collation mismatch in some situation.
- - If you don't specify charset nor collation, go-mysql-driver sends `SET NAMES utf8mb4` for new connection. This uses server's default collation for utf8mb4.
- - If you specify charset, go-mysql-driver sends `SET NAMES `. This uses the server's default collation for ``.
- - If you specify collation and/or charset, go-mysql-driver sends `SET NAMES charset COLLATE collation`.
-- PathEscape dbname in DSN. by @methane in [#1432](https://github.com/go-sql-driver/mysql/pull/1432)
- - This is backward incompatible in rare case. Check your DSN.
-- Drop Go 1.13-17 support by @methane in [#1420](https://github.com/go-sql-driver/mysql/pull/1420)
- - Use Go 1.18+
-- Parse numbers on text protocol too by @methane in [#1452](https://github.com/go-sql-driver/mysql/pull/1452)
- - When text protocol is used, go-mysql-driver passed bare `[]byte` to database/sql for avoid unnecessary allocation and conversion.
- - If user specified `*any` to `Scan()`, database/sql passed the `[]byte` into the target variable.
- - This confused users because most user doesn't know when text/binary protocol used.
- - go-mysql-driver 1.8 converts integer/float values into int64/double even in text protocol. This doesn't increase allocation compared to `[]byte` and conversion cost is negatable.
-- New options start using the Functional Option Pattern to avoid increasing technical debt in the Config object. Future version may introduce Functional Option for existing options, but not for now.
- - Make TimeTruncate functional option by @methane in [1552](https://github.com/go-sql-driver/mysql/pull/1552)
- - Add BeforeConnect callback to configuration object by @ItalyPaleAle in [#1469](https://github.com/go-sql-driver/mysql/pull/1469)
-
-
-Other changes:
-
-- Adding DeregisterDialContext to prevent memory leaks with dialers we don't need anymore by @jypelle in https://github.com/go-sql-driver/mysql/pull/1422
-- Make logger configurable per connection by @frozenbonito in https://github.com/go-sql-driver/mysql/pull/1408
-- Fix ColumnType.DatabaseTypeName for mediumint unsigned by @evanelias in https://github.com/go-sql-driver/mysql/pull/1428
-- Add connection attributes by @Daemonxiao in https://github.com/go-sql-driver/mysql/pull/1389
-- Stop `ColumnTypeScanType()` from returning `sql.RawBytes` by @methane in https://github.com/go-sql-driver/mysql/pull/1424
-- Exec() now provides access to status of multiple statements. by @mherr-google in https://github.com/go-sql-driver/mysql/pull/1309
-- Allow to change (or disable) the default driver name for registration by @dolmen in https://github.com/go-sql-driver/mysql/pull/1499
-- Add default connection attribute '_server_host' by @oblitorum in https://github.com/go-sql-driver/mysql/pull/1506
-- QueryUnescape DSN ConnectionAttribute value by @zhangyangyu in https://github.com/go-sql-driver/mysql/pull/1470
-- Add client_ed25519 authentication by @Gusted in https://github.com/go-sql-driver/mysql/pull/1518
-
-## Version 1.7.1 (2023-04-25)
-
-Changes:
-
- - bump actions/checkout@v3 and actions/setup-go@v3 (#1375)
- - Add go1.20 and mariadb10.11 to the testing matrix (#1403)
- - Increase default maxAllowedPacket size. (#1411)
-
-Bugfixes:
-
- - Use SET syntax as specified in the MySQL documentation (#1402)
-
-
-## Version 1.7 (2022-11-29)
-
-Changes:
-
- - Drop support of Go 1.12 (#1211)
- - Refactoring `(*textRows).readRow` in a more clear way (#1230)
- - util: Reduce boundary check in escape functions. (#1316)
- - enhancement for mysqlConn handleAuthResult (#1250)
-
-New Features:
-
- - support Is comparison on MySQLError (#1210)
- - return unsigned in database type name when necessary (#1238)
- - Add API to express like a --ssl-mode=PREFERRED MySQL client (#1370)
- - Add SQLState to MySQLError (#1321)
-
-Bugfixes:
-
- - Fix parsing 0 year. (#1257)
-
-
-## Version 1.6 (2021-04-01)
-
-Changes:
-
- - Migrate the CI service from travis-ci to GitHub Actions (#1176, #1183, #1190)
- - `NullTime` is deprecated (#960, #1144)
- - Reduce allocations when building SET command (#1111)
- - Performance improvement for time formatting (#1118)
- - Performance improvement for time parsing (#1098, #1113)
-
-New Features:
-
- - Implement `driver.Validator` interface (#1106, #1174)
- - Support returning `uint64` from `Valuer` in `ConvertValue` (#1143)
- - Add `json.RawMessage` for converter and prepared statement (#1059)
- - Interpolate `json.RawMessage` as `string` (#1058)
- - Implements `CheckNamedValue` (#1090)
-
-Bugfixes:
-
- - Stop rounding times (#1121, #1172)
- - Put zero filler into the SSL handshake packet (#1066)
- - Fix checking cancelled connections back into the connection pool (#1095)
- - Fix remove last 0 byte for mysql_old_password when password is empty (#1133)
-
-
-## Version 1.5 (2020-01-07)
-
-Changes:
-
- - Dropped support Go 1.9 and lower (#823, #829, #886, #1016, #1017)
- - Improve buffer handling (#890)
- - Document potentially insecure TLS configs (#901)
- - Use a double-buffering scheme to prevent data races (#943)
- - Pass uint64 values without converting them to string (#838, #955)
- - Update collations and make utf8mb4 default (#877, #1054)
- - Make NullTime compatible with sql.NullTime in Go 1.13+ (#995)
- - Removed CloudSQL support (#993, #1007)
- - Add Go Module support (#1003)
-
-New Features:
-
- - Implement support of optional TLS (#900)
- - Check connection liveness (#934, #964, #997, #1048, #1051, #1052)
- - Implement Connector Interface (#941, #958, #1020, #1035)
-
-Bugfixes:
-
- - Mark connections as bad on error during ping (#875)
- - Mark connections as bad on error during dial (#867)
- - Fix connection leak caused by rapid context cancellation (#1024)
- - Mark connections as bad on error during Conn.Prepare (#1030)
-
-
-## Version 1.4.1 (2018-11-14)
-
-Bugfixes:
-
- - Fix TIME format for binary columns (#818)
- - Fix handling of empty auth plugin names (#835)
- - Fix caching_sha2_password with empty password (#826)
- - Fix canceled context broke mysqlConn (#862)
- - Fix OldAuthSwitchRequest support (#870)
- - Fix Auth Response packet for cleartext password (#887)
-
-## Version 1.4 (2018-06-03)
-
-Changes:
-
- - Documentation fixes (#530, #535, #567)
- - Refactoring (#575, #579, #580, #581, #603, #615, #704)
- - Cache column names (#444)
- - Sort the DSN parameters in DSNs generated from a config (#637)
- - Allow native password authentication by default (#644)
- - Use the default port if it is missing in the DSN (#668)
- - Removed the `strict` mode (#676)
- - Do not query `max_allowed_packet` by default (#680)
- - Dropped support Go 1.6 and lower (#696)
- - Updated `ConvertValue()` to match the database/sql/driver implementation (#760)
- - Document the usage of `0000-00-00T00:00:00` as the time.Time zero value (#783)
- - Improved the compatibility of the authentication system (#807)
-
-New Features:
-
- - Multi-Results support (#537)
- - `rejectReadOnly` DSN option (#604)
- - `context.Context` support (#608, #612, #627, #761)
- - Transaction isolation level support (#619, #744)
- - Read-Only transactions support (#618, #634)
- - `NewConfig` function which initializes a config with default values (#679)
- - Implemented the `ColumnType` interfaces (#667, #724)
- - Support for custom string types in `ConvertValue` (#623)
- - Implemented `NamedValueChecker`, improving support for uint64 with high bit set (#690, #709, #710)
- - `caching_sha2_password` authentication plugin support (#794, #800, #801, #802)
- - Implemented `driver.SessionResetter` (#779)
- - `sha256_password` authentication plugin support (#808)
-
-Bugfixes:
-
- - Use the DSN hostname as TLS default ServerName if `tls=true` (#564, #718)
- - Fixed LOAD LOCAL DATA INFILE for empty files (#590)
- - Removed columns definition cache since it sometimes cached invalid data (#592)
- - Don't mutate registered TLS configs (#600)
- - Make RegisterTLSConfig concurrency-safe (#613)
- - Handle missing auth data in the handshake packet correctly (#646)
- - Do not retry queries when data was written to avoid data corruption (#302, #736)
- - Cache the connection pointer for error handling before invalidating it (#678)
- - Fixed imports for appengine/cloudsql (#700)
- - Fix sending STMT_LONG_DATA for 0 byte data (#734)
- - Set correct capacity for []bytes read from length-encoded strings (#766)
- - Make RegisterDial concurrency-safe (#773)
-
-
-## Version 1.3 (2016-12-01)
-
-Changes:
-
- - Go 1.1 is no longer supported
- - Use decimals fields in MySQL to format time types (#249)
- - Buffer optimizations (#269)
- - TLS ServerName defaults to the host (#283)
- - Refactoring (#400, #410, #437)
- - Adjusted documentation for second generation CloudSQL (#485)
- - Documented DSN system var quoting rules (#502)
- - Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512)
-
-New Features:
-
- - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
- - Support for returning table alias on Columns() (#289, #359, #382)
- - Placeholder interpolation, can be activated with the DSN parameter `interpolateParams=true` (#309, #318, #490)
- - Support for uint64 parameters with high bit set (#332, #345)
- - Cleartext authentication plugin support (#327)
- - Exported ParseDSN function and the Config struct (#403, #419, #429)
- - Read / Write timeouts (#401)
- - Support for JSON field type (#414)
- - Support for multi-statements and multi-results (#411, #431)
- - DSN parameter to set the driver-side max_allowed_packet value manually (#489)
- - Native password authentication plugin support (#494, #524)
-
-Bugfixes:
-
- - Fixed handling of queries without columns and rows (#255)
- - Fixed a panic when SetKeepAlive() failed (#298)
- - Handle ERR packets while reading rows (#321)
- - Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349)
- - Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356)
- - Actually zero out bytes in handshake response (#378)
- - Fixed race condition in registering LOAD DATA INFILE handler (#383)
- - Fixed tests with MySQL 5.7.9+ (#380)
- - QueryUnescape TLS config names (#397)
- - Fixed "broken pipe" error by writing to closed socket (#390)
- - Fixed LOAD LOCAL DATA INFILE buffering (#424)
- - Fixed parsing of floats into float64 when placeholders are used (#434)
- - Fixed DSN tests with Go 1.7+ (#459)
- - Handle ERR packets while waiting for EOF (#473)
- - Invalidate connection on error while discarding additional results (#513)
- - Allow terminating packets of length 0 (#516)
-
-
-## Version 1.2 (2014-06-03)
-
-Changes:
-
- - We switched back to a "rolling release". `go get` installs the current master branch again
- - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver
- - Exported errors to allow easy checking from application code
- - Enabled TCP Keepalives on TCP connections
- - Optimized INFILE handling (better buffer size calculation, lazy init, ...)
- - The DSN parser also checks for a missing separating slash
- - Faster binary date / datetime to string formatting
- - Also exported the MySQLWarning type
- - mysqlConn.Close returns the first error encountered instead of ignoring all errors
- - writePacket() automatically writes the packet size to the header
- - readPacket() uses an iterative approach instead of the recursive approach to merge split packets
-
-New Features:
-
- - `RegisterDial` allows the usage of a custom dial function to establish the network connection
- - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter
- - Logging of critical errors is configurable with `SetLogger`
- - Google CloudSQL support
-
-Bugfixes:
-
- - Allow more than 32 parameters in prepared statements
- - Various old_password fixes
- - Fixed TestConcurrent test to pass Go's race detection
- - Fixed appendLengthEncodedInteger for large numbers
- - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo)
-
-
-## Version 1.1 (2013-11-02)
-
-Changes:
-
- - Go-MySQL-Driver now requires Go 1.1
- - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore
- - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors
- - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")`
- - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'.
- - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries
- - Optimized the buffer for reading
- - stmt.Query now caches column metadata
- - New Logo
- - Changed the copyright header to include all contributors
- - Improved the LOAD INFILE documentation
- - The driver struct is now exported to make the driver directly accessible
- - Refactored the driver tests
- - Added more benchmarks and moved all to a separate file
- - Other small refactoring
-
-New Features:
-
- - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure
- - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs
- - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used
-
-Bugfixes:
-
- - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification
- - Convert to DB timezone when inserting `time.Time`
- - Split packets (more than 16MB) are now merged correctly
- - Fixed false positive `io.EOF` errors when the data was fully read
- - Avoid panics on reuse of closed connections
- - Fixed empty string producing false nil values
- - Fixed sign byte for positive TIME fields
-
-
-## Version 1.0 (2013-05-14)
-
-Initial Release
diff --git a/vendor/github.com/go-sql-driver/mysql/LICENSE b/vendor/github.com/go-sql-driver/mysql/LICENSE
deleted file mode 100644
index 14e2f777..00000000
--- a/vendor/github.com/go-sql-driver/mysql/LICENSE
+++ /dev/null
@@ -1,373 +0,0 @@
-Mozilla Public License Version 2.0
-==================================
-
-1. Definitions
---------------
-
-1.1. "Contributor"
- means each individual or legal entity that creates, contributes to
- the creation of, or owns Covered Software.
-
-1.2. "Contributor Version"
- means the combination of the Contributions of others (if any) used
- by a Contributor and that particular Contributor's Contribution.
-
-1.3. "Contribution"
- means Covered Software of a particular Contributor.
-
-1.4. "Covered Software"
- means Source Code Form to which the initial Contributor has attached
- the notice in Exhibit A, the Executable Form of such Source Code
- Form, and Modifications of such Source Code Form, in each case
- including portions thereof.
-
-1.5. "Incompatible With Secondary Licenses"
- means
-
- (a) that the initial Contributor has attached the notice described
- in Exhibit B to the Covered Software; or
-
- (b) that the Covered Software was made available under the terms of
- version 1.1 or earlier of the License, but not also under the
- terms of a Secondary License.
-
-1.6. "Executable Form"
- means any form of the work other than Source Code Form.
-
-1.7. "Larger Work"
- means a work that combines Covered Software with other material, in
- a separate file or files, that is not Covered Software.
-
-1.8. "License"
- means this document.
-
-1.9. "Licensable"
- means having the right to grant, to the maximum extent possible,
- whether at the time of the initial grant or subsequently, any and
- all of the rights conveyed by this License.
-
-1.10. "Modifications"
- means any of the following:
-
- (a) any file in Source Code Form that results from an addition to,
- deletion from, or modification of the contents of Covered
- Software; or
-
- (b) any new file in Source Code Form that contains any Covered
- Software.
-
-1.11. "Patent Claims" of a Contributor
- means any patent claim(s), including without limitation, method,
- process, and apparatus claims, in any patent Licensable by such
- Contributor that would be infringed, but for the grant of the
- License, by the making, using, selling, offering for sale, having
- made, import, or transfer of either its Contributions or its
- Contributor Version.
-
-1.12. "Secondary License"
- means either the GNU General Public License, Version 2.0, the GNU
- Lesser General Public License, Version 2.1, the GNU Affero General
- Public License, Version 3.0, or any later versions of those
- licenses.
-
-1.13. "Source Code Form"
- means the form of the work preferred for making modifications.
-
-1.14. "You" (or "Your")
- means an individual or a legal entity exercising rights under this
- License. For legal entities, "You" includes any entity that
- controls, is controlled by, or is under common control with You. For
- purposes of this definition, "control" means (a) the power, direct
- or indirect, to cause the direction or management of such entity,
- whether by contract or otherwise, or (b) ownership of more than
- fifty percent (50%) of the outstanding shares or beneficial
- ownership of such entity.
-
-2. License Grants and Conditions
---------------------------------
-
-2.1. Grants
-
-Each Contributor hereby grants You a world-wide, royalty-free,
-non-exclusive license:
-
-(a) under intellectual property rights (other than patent or trademark)
- Licensable by such Contributor to use, reproduce, make available,
- modify, display, perform, distribute, and otherwise exploit its
- Contributions, either on an unmodified basis, with Modifications, or
- as part of a Larger Work; and
-
-(b) under Patent Claims of such Contributor to make, use, sell, offer
- for sale, have made, import, and otherwise transfer either its
- Contributions or its Contributor Version.
-
-2.2. Effective Date
-
-The licenses granted in Section 2.1 with respect to any Contribution
-become effective for each Contribution on the date the Contributor first
-distributes such Contribution.
-
-2.3. Limitations on Grant Scope
-
-The licenses granted in this Section 2 are the only rights granted under
-this License. No additional rights or licenses will be implied from the
-distribution or licensing of Covered Software under this License.
-Notwithstanding Section 2.1(b) above, no patent license is granted by a
-Contributor:
-
-(a) for any code that a Contributor has removed from Covered Software;
- or
-
-(b) for infringements caused by: (i) Your and any other third party's
- modifications of Covered Software, or (ii) the combination of its
- Contributions with other software (except as part of its Contributor
- Version); or
-
-(c) under Patent Claims infringed by Covered Software in the absence of
- its Contributions.
-
-This License does not grant any rights in the trademarks, service marks,
-or logos of any Contributor (except as may be necessary to comply with
-the notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
-No Contributor makes additional grants as a result of Your choice to
-distribute the Covered Software under a subsequent version of this
-License (see Section 10.2) or under the terms of a Secondary License (if
-permitted under the terms of Section 3.3).
-
-2.5. Representation
-
-Each Contributor represents that the Contributor believes its
-Contributions are its original creation(s) or it has sufficient rights
-to grant the rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
-This License is not intended to limit any rights You have under
-applicable copyright doctrines of fair use, fair dealing, or other
-equivalents.
-
-2.7. Conditions
-
-Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
-in Section 2.1.
-
-3. Responsibilities
--------------------
-
-3.1. Distribution of Source Form
-
-All distribution of Covered Software in Source Code Form, including any
-Modifications that You create or to which You contribute, must be under
-the terms of this License. You must inform recipients that the Source
-Code Form of the Covered Software is governed by the terms of this
-License, and how they can obtain a copy of this License. You may not
-attempt to alter or restrict the recipients' rights in the Source Code
-Form.
-
-3.2. Distribution of Executable Form
-
-If You distribute Covered Software in Executable Form then:
-
-(a) such Covered Software must also be made available in Source Code
- Form, as described in Section 3.1, and You must inform recipients of
- the Executable Form how they can obtain a copy of such Source Code
- Form by reasonable means in a timely manner, at a charge no more
- than the cost of distribution to the recipient; and
-
-(b) You may distribute such Executable Form under the terms of this
- License, or sublicense it under different terms, provided that the
- license for the Executable Form does not attempt to limit or alter
- the recipients' rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
-You may create and distribute a Larger Work under terms of Your choice,
-provided that You also comply with the requirements of this License for
-the Covered Software. If the Larger Work is a combination of Covered
-Software with a work governed by one or more Secondary Licenses, and the
-Covered Software is not Incompatible With Secondary Licenses, this
-License permits You to additionally distribute such Covered Software
-under the terms of such Secondary License(s), so that the recipient of
-the Larger Work may, at their option, further distribute the Covered
-Software under the terms of either this License or such Secondary
-License(s).
-
-3.4. Notices
-
-You may not remove or alter the substance of any license notices
-(including copyright notices, patent notices, disclaimers of warranty,
-or limitations of liability) contained within the Source Code Form of
-the Covered Software, except that You may alter any license notices to
-the extent required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
-You may choose to offer, and to charge a fee for, warranty, support,
-indemnity or liability obligations to one or more recipients of Covered
-Software. However, You may do so only on Your own behalf, and not on
-behalf of any Contributor. You must make it absolutely clear that any
-such warranty, support, indemnity, or liability obligation is offered by
-You alone, and You hereby agree to indemnify every Contributor for any
-liability incurred by such Contributor as a result of warranty, support,
-indemnity or liability terms You offer. You may include additional
-disclaimers of warranty and limitations of liability specific to any
-jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
----------------------------------------------------
-
-If it is impossible for You to comply with any of the terms of this
-License with respect to some or all of the Covered Software due to
-statute, judicial order, or regulation then You must: (a) comply with
-the terms of this License to the maximum extent possible; and (b)
-describe the limitations and the code they affect. Such description must
-be placed in a text file included with all distributions of the Covered
-Software under this License. Except to the extent prohibited by statute
-or regulation, such description must be sufficiently detailed for a
-recipient of ordinary skill to be able to understand it.
-
-5. Termination
---------------
-
-5.1. The rights granted under this License will terminate automatically
-if You fail to comply with any of its terms. However, if You become
-compliant, then the rights granted under this License from a particular
-Contributor are reinstated (a) provisionally, unless and until such
-Contributor explicitly and finally terminates Your grants, and (b) on an
-ongoing basis, if such Contributor fails to notify You of the
-non-compliance by some reasonable means prior to 60 days after You have
-come back into compliance. Moreover, Your grants from a particular
-Contributor are reinstated on an ongoing basis if such Contributor
-notifies You of the non-compliance by some reasonable means, this is the
-first time You have received notice of non-compliance with this License
-from such Contributor, and You become compliant prior to 30 days after
-Your receipt of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
-infringement claim (excluding declaratory judgment actions,
-counter-claims, and cross-claims) alleging that a Contributor Version
-directly or indirectly infringes any patent, then the rights granted to
-You by any and all Contributors for the Covered Software under Section
-2.1 of this License shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all
-end user license agreements (excluding distributors and resellers) which
-have been validly granted by You or Your distributors under this License
-prior to termination shall survive termination.
-
-************************************************************************
-* *
-* 6. Disclaimer of Warranty *
-* ------------------------- *
-* *
-* Covered Software is provided under this License on an "as is" *
-* basis, without warranty of any kind, either expressed, implied, or *
-* statutory, including, without limitation, warranties that the *
-* Covered Software is free of defects, merchantable, fit for a *
-* particular purpose or non-infringing. The entire risk as to the *
-* quality and performance of the Covered Software is with You. *
-* Should any Covered Software prove defective in any respect, You *
-* (not any Contributor) assume the cost of any necessary servicing, *
-* repair, or correction. This disclaimer of warranty constitutes an *
-* essential part of this License. No use of any Covered Software is *
-* authorized under this License except under this disclaimer. *
-* *
-************************************************************************
-
-************************************************************************
-* *
-* 7. Limitation of Liability *
-* -------------------------- *
-* *
-* Under no circumstances and under no legal theory, whether tort *
-* (including negligence), contract, or otherwise, shall any *
-* Contributor, or anyone who distributes Covered Software as *
-* permitted above, be liable to You for any direct, indirect, *
-* special, incidental, or consequential damages of any character *
-* including, without limitation, damages for lost profits, loss of *
-* goodwill, work stoppage, computer failure or malfunction, or any *
-* and all other commercial damages or losses, even if such party *
-* shall have been informed of the possibility of such damages. This *
-* limitation of liability shall not apply to liability for death or *
-* personal injury resulting from such party's negligence to the *
-* extent applicable law prohibits such limitation. Some *
-* jurisdictions do not allow the exclusion or limitation of *
-* incidental or consequential damages, so this exclusion and *
-* limitation may not apply to You. *
-* *
-************************************************************************
-
-8. Litigation
--------------
-
-Any litigation relating to this License may be brought only in the
-courts of a jurisdiction where the defendant maintains its principal
-place of business and such litigation shall be governed by laws of that
-jurisdiction, without reference to its conflict-of-law provisions.
-Nothing in this Section shall prevent a party's ability to bring
-cross-claims or counter-claims.
-
-9. Miscellaneous
-----------------
-
-This License represents the complete agreement concerning the subject
-matter hereof. If any provision of this License is held to be
-unenforceable, such provision shall be reformed only to the extent
-necessary to make it enforceable. Any law or regulation which provides
-that the language of a contract shall be construed against the drafter
-shall not be used to construe this License against a Contributor.
-
-10. Versions of the License
----------------------------
-
-10.1. New Versions
-
-Mozilla Foundation is the license steward. Except as provided in Section
-10.3, no one other than the license steward has the right to modify or
-publish new versions of this License. Each version will be given a
-distinguishing version number.
-
-10.2. Effect of New Versions
-
-You may distribute the Covered Software under the terms of the version
-of the License under which You originally received the Covered Software,
-or under the terms of any subsequent version published by the license
-steward.
-
-10.3. Modified Versions
-
-If you create software not governed by this License, and you want to
-create a new license for such software, you may create and use a
-modified version of this License if you rename the license and remove
-any references to the name of the license steward (except to note that
-such modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary
-Licenses
-
-If You choose to distribute Source Code Form that is Incompatible With
-Secondary Licenses under the terms of this version of the License, the
-notice described in Exhibit B of this License must be attached.
-
-Exhibit A - Source Code Form License Notice
--------------------------------------------
-
- This Source Code Form is subject to the terms of the Mozilla Public
- License, v. 2.0. If a copy of the MPL was not distributed with this
- file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular
-file, then You may include the notice in a location (such as a LICENSE
-file in a relevant directory) where a recipient would be likely to look
-for such a notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - "Incompatible With Secondary Licenses" Notice
----------------------------------------------------------
-
- This Source Code Form is "Incompatible With Secondary Licenses", as
- defined by the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md
deleted file mode 100644
index 4968cb06..00000000
--- a/vendor/github.com/go-sql-driver/mysql/README.md
+++ /dev/null
@@ -1,581 +0,0 @@
-# Go-MySQL-Driver
-
-A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) package
-
-
-
----------------------------------------
- * [Features](#features)
- * [Requirements](#requirements)
- * [Installation](#installation)
- * [Usage](#usage)
- * [DSN (Data Source Name)](#dsn-data-source-name)
- * [Password](#password)
- * [Protocol](#protocol)
- * [Address](#address)
- * [Parameters](#parameters)
- * [Examples](#examples)
- * [Connection pool and timeouts](#connection-pool-and-timeouts)
- * [context.Context Support](#contextcontext-support)
- * [ColumnType Support](#columntype-support)
- * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
- * [time.Time support](#timetime-support)
- * [Unicode support](#unicode-support)
- * [Testing / Development](#testing--development)
- * [License](#license)
-
----------------------------------------
-
-## Features
- * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance")
- * Native Go implementation. No C-bindings, just pure Go
- * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](https://godoc.org/github.com/go-sql-driver/mysql#DialFunc)
- * Automatic handling of broken connections
- * Automatic Connection Pooling *(by database/sql package)*
- * Supports queries larger than 16MB
- * Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support.
- * Intelligent `LONG DATA` handling in prepared statements
- * Secure `LOAD DATA LOCAL INFILE` support with file allowlisting and `io.Reader` support
- * Optional `time.Time` parsing
- * Optional placeholder interpolation
-
-## Requirements
-
-* Go 1.19 or higher. We aim to support the 3 latest versions of Go.
-* MySQL (5.7+) and MariaDB (10.3+) are supported.
-* [TiDB](https://github.com/pingcap/tidb) is supported by PingCAP.
- * Do not ask questions about TiDB in our issue tracker or forum.
- * [Document](https://docs.pingcap.com/tidb/v6.1/dev-guide-sample-application-golang)
- * [Forum](https://ask.pingcap.com/)
-* go-mysql would work with Percona Server, Google CloudSQL or Sphinx (2.2.3+).
- * Maintainers won't support them. Do not expect issues are investigated and resolved by maintainers.
- * Investigate issues yourself and please send a pull request to fix it.
-
----------------------------------------
-
-## Installation
-Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
-```bash
-go get -u github.com/go-sql-driver/mysql
-```
-Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
-
-## Usage
-_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then.
-
-Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`:
-
-```go
-import (
- "database/sql"
- "time"
-
- _ "github.com/go-sql-driver/mysql"
-)
-
-// ...
-
-db, err := sql.Open("mysql", "user:password@/dbname")
-if err != nil {
- panic(err)
-}
-// See "Important settings" section.
-db.SetConnMaxLifetime(time.Minute * 3)
-db.SetMaxOpenConns(10)
-db.SetMaxIdleConns(10)
-```
-
-[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples").
-
-### Important settings
-
-`db.SetConnMaxLifetime()` is required to ensure connections are closed by the driver safely before connection is closed by MySQL server, OS, or other middlewares. Since some middlewares close idle connections by 5 minutes, we recommend timeout shorter than 5 minutes. This setting helps load balancing and changing system variables too.
-
-`db.SetMaxOpenConns()` is highly recommended to limit the number of connection used by the application. There is no recommended limit number because it depends on application and MySQL server.
-
-`db.SetMaxIdleConns()` is recommended to be set same to `db.SetMaxOpenConns()`. When it is smaller than `SetMaxOpenConns()`, connections can be opened and closed much more frequently than you expect. Idle connections can be closed by the `db.SetConnMaxLifetime()`. If you want to close idle connections more rapidly, you can use `db.SetConnMaxIdleTime()` since Go 1.15.
-
-
-### DSN (Data Source Name)
-
-The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets):
-```
-[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN]
-```
-
-A DSN in its fullest form:
-```
-username:password@protocol(address)/dbname?param=value
-```
-
-Except for the databasename, all values are optional. So the minimal DSN is:
-```
-/dbname
-```
-
-If you do not want to preselect a database, leave `dbname` empty:
-```
-/
-```
-This has the same effect as an empty DSN string:
-```
-
-```
-
-`dbname` is escaped by [PathEscape()](https://pkg.go.dev/net/url#PathEscape) since v1.8.0. If your database name is `dbname/withslash`, it becomes:
-
-```
-/dbname%2Fwithslash
-```
-
-Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct.
-
-#### Password
-Passwords can consist of any character. Escaping is **not** necessary.
-
-#### Protocol
-See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available.
-In general you should use a Unix domain socket if available and TCP otherwise for best performance.
-
-#### Address
-For TCP and UDP networks, addresses have the form `host[:port]`.
-If `port` is omitted, the default port will be used.
-If `host` is a literal IPv6 address, it must be enclosed in square brackets.
-The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
-
-For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`.
-
-#### Parameters
-*Parameters are case-sensitive!*
-
-Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`.
-
-##### `allowAllFiles`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-`allowAllFiles=true` disables the file allowlist for `LOAD DATA LOCAL INFILE` and allows *all* files.
-[*Might be insecure!*](https://dev.mysql.com/doc/refman/8.0/en/load-data.html#load-data-local)
-
-##### `allowCleartextPasswords`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-`allowCleartextPasswords=true` allows using the [cleartext client side plugin](https://dev.mysql.com/doc/en/cleartext-pluggable-authentication.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
-
-
-##### `allowFallbackToPlaintext`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-`allowFallbackToPlaintext=true` acts like a `--ssl-mode=PREFERRED` MySQL client as described in [Command Options for Connecting to the Server](https://dev.mysql.com/doc/refman/5.7/en/connection-options.html#option_general_ssl-mode)
-
-##### `allowNativePasswords`
-
-```
-Type: bool
-Valid Values: true, false
-Default: true
-```
-`allowNativePasswords=false` disallows the usage of MySQL native password method.
-
-##### `allowOldPasswords`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords).
-
-##### `charset`
-
-```
-Type: string
-Valid Values:
-Default: none
-```
-
-Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset fails. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`).
-
-See also [Unicode Support](#unicode-support).
-
-##### `checkConnLiveness`
-
-```
-Type: bool
-Valid Values: true, false
-Default: true
-```
-
-On supported platforms connections retrieved from the connection pool are checked for liveness before using them. If the check fails, the respective connection is marked as bad and the query retried with another connection.
-`checkConnLiveness=false` disables this liveness check of connections.
-
-##### `collation`
-
-```
-Type: string
-Valid Values:
-Default: utf8mb4_general_ci
-```
-
-Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail.
-
-A list of valid charsets for a server is retrievable with `SHOW COLLATION`.
-
-The default collation (`utf8mb4_general_ci`) is supported from MySQL 5.5. You should use an older collation (e.g. `utf8_general_ci`) for older MySQL.
-
-Collations for charset "ucs2", "utf16", "utf16le", and "utf32" can not be used ([ref](https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset)).
-
-See also [Unicode Support](#unicode-support).
-
-##### `clientFoundRows`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed.
-
-##### `columnsWithAlias`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example:
-
-```
-SELECT u.id FROM users as u
-```
-
-will return `u.id` instead of just `id` if `columnsWithAlias=true`.
-
-##### `interpolateParams`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`.
-
-*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are rejected as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!*
-
-##### `loc`
-
-```
-Type: string
-Valid Values:
-Default: UTC
-```
-
-Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](https://golang.org/pkg/time/#LoadLocation) for details.
-
-Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter.
-
-Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
-
-##### `timeTruncate`
-
-```
-Type: duration
-Default: 0
-```
-
-[Truncate time values](https://pkg.go.dev/time#Duration.Truncate) to the specified duration. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
-
-##### `maxAllowedPacket`
-```
-Type: decimal number
-Default: 64*1024*1024
-```
-
-Max packet size allowed in bytes. The default value is 64 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*.
-
-##### `multiStatements`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-Allow multiple statements in one query. This can be used to bach multiple queries. Use [Rows.NextResultSet()](https://pkg.go.dev/database/sql#Rows.NextResultSet) to get result of the second and subsequent queries.
-
-When `multiStatements` is used, `?` parameters must only be used in the first statement. [interpolateParams](#interpolateparams) can be used to avoid this limitation unless prepared statement is used explicitly.
-
-It's possible to access the last inserted ID and number of affected rows for multiple statements by using `sql.Conn.Raw()` and the `mysql.Result`. For example:
-
-```go
-conn, _ := db.Conn(ctx)
-conn.Raw(func(conn any) error {
- ex := conn.(driver.Execer)
- res, err := ex.Exec(`
- UPDATE point SET x = 1 WHERE y = 2;
- UPDATE point SET x = 2 WHERE y = 3;
- `, nil)
- // Both slices have 2 elements.
- log.Print(res.(mysql.Result).AllRowsAffected())
- log.Print(res.(mysql.Result).AllLastInsertIds())
-})
-```
-
-##### `parseTime`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string`
-The date or datetime like `0000-00-00 00:00:00` is converted into zero value of `time.Time`.
-
-
-##### `readTimeout`
-
-```
-Type: duration
-Default: 0
-```
-
-I/O read timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
-
-##### `rejectReadOnly`
-
-```
-Type: bool
-Valid Values: true, false
-Default: false
-```
-
-
-`rejectReadOnly=true` causes the driver to reject read-only connections. This
-is for a possible race condition during an automatic failover, where the mysql
-client gets connected to a read-only replica after the failover.
-
-Note that this should be a fairly rare case, as an automatic failover normally
-happens when the primary is down, and the race condition shouldn't happen
-unless it comes back up online as soon as the failover is kicked off. On the
-other hand, when this happens, a MySQL application can get stuck on a
-read-only connection until restarted. It is however fairly easy to reproduce,
-for example, using a manual failover on AWS Aurora's MySQL-compatible cluster.
-
-If you are not relying on read-only transactions to reject writes that aren't
-supposed to happen, setting this on some MySQL providers (such as AWS Aurora)
-is safer for failovers.
-
-Note that ERROR 1290 can be returned for a `read-only` server and this option will
-cause a retry for that error. However the same error number is used for some
-other cases. You should ensure your application will never cause an ERROR 1290
-except for `read-only` mode when enabling this option.
-
-
-##### `serverPubKey`
-
-```
-Type: string
-Valid Values:
-Default: none
-```
-
-Server public keys can be registered with [`mysql.RegisterServerPubKey`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterServerPubKey), which can then be used by the assigned name in the DSN.
-Public keys are used to transmit encrypted data, e.g. for authentication.
-If the server's public key is known, it should be set manually to avoid expensive and potentially insecure transmissions of the public key from the server to the client each time it is required.
-
-
-##### `timeout`
-
-```
-Type: duration
-Default: OS default
-```
-
-Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
-
-
-##### `tls`
-
-```
-Type: bool / string
-Valid Values: true, false, skip-verify, preferred,
-Default: false
-```
-
-`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side) or use `preferred` to use TLS only when advertised by the server. This is similar to `skip-verify`, but additionally allows a fallback to a connection which is not encrypted. Neither `skip-verify` nor `preferred` add any reliable security. You can use a custom TLS config after registering it with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
-
-
-##### `writeTimeout`
-
-```
-Type: duration
-Default: 0
-```
-
-I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
-
-##### `connectionAttributes`
-
-```
-Type: comma-delimited string of user-defined "key:value" pairs
-Valid Values: (:,:,...)
-Default: none
-```
-
-[Connection attributes](https://dev.mysql.com/doc/refman/8.0/en/performance-schema-connection-attribute-tables.html) are key-value pairs that application programs can pass to the server at connect time.
-
-##### System Variables
-
-Any other parameters are interpreted as system variables:
- * `=`: `SET =`
- * `=`: `SET =`
- * `=%27%27`: `SET =''`
-
-Rules:
-* The values for string variables must be quoted with `'`.
-* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!
- (which implies values of string variables must be wrapped with `%27`).
-
-Examples:
- * `autocommit=1`: `SET autocommit=1`
- * [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'`
- * [`transaction_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_transaction_isolation): `SET transaction_isolation='REPEATABLE-READ'`
-
-
-#### Examples
-```
-user@unix(/path/to/socket)/dbname
-```
-
-```
-root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local
-```
-
-```
-user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true
-```
-
-Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html):
-```
-user:password@/dbname?sql_mode=TRADITIONAL
-```
-
-TCP via IPv6:
-```
-user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci
-```
-
-TCP on a remote host, e.g. Amazon RDS:
-```
-id:password@tcp(your-amazonaws-uri.com:3306)/dbname
-```
-
-Google Cloud SQL on App Engine:
-```
-user:password@unix(/cloudsql/project-id:region-name:instance-name)/dbname
-```
-
-TCP using default port (3306) on localhost:
-```
-user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped
-```
-
-Use the default protocol (tcp) and host (localhost:3306):
-```
-user:password@/dbname
-```
-
-No Database preselected:
-```
-user:password@/
-```
-
-
-### Connection pool and timeouts
-The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively.
-
-## `ColumnType` Support
-This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported. All Unsigned database type names will be returned `UNSIGNED ` with `INT`, `TINYINT`, `SMALLINT`, `MEDIUMINT`, `BIGINT`.
-
-## `context.Context` Support
-Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
-See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details.
-
-
-### `LOAD DATA LOCAL INFILE` support
-For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
-```go
-import "github.com/go-sql-driver/mysql"
-```
-
-Files must be explicitly allowed by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the allowlist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](https://dev.mysql.com/doc/refman/8.0/en/load-data.html#load-data-local)).
-
-To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore.
-
-See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details.
-
-
-### `time.Time` support
-The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program.
-
-However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical equivalent in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
-
-**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes).
-
-
-### Unicode support
-Since version 1.5 Go-MySQL-Driver automatically uses the collation ` utf8mb4_general_ci` by default.
-
-Other charsets / collations can be set using the [`charset`](#charset) or [`collation`](#collation) DSN parameter.
-
-- When only the `charset` is specified, the `SET NAMES ` query is sent and the server's default collation is used.
-- When both the `charset` and `collation` are specified, the `SET NAMES COLLATE ` query is sent.
-- When only the `collation` is specified, the collation is specified in the protocol handshake and the `SET NAMES` query is not sent. This can save one roundtrip, but note that the server may ignore the specified collation silently and use the server's default charset/collation instead.
-
-See http://dev.mysql.com/doc/refman/8.0/en/charset-unicode.html for more details on MySQL's Unicode support.
-
-## Testing / Development
-To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
-
-Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated.
-If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls).
-
-See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/.github/CONTRIBUTING.md) for details.
-
----------------------------------------
-
-## License
-Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
-
-Mozilla summarizes the license scope as follows:
-> MPL: The copyleft applies to any files containing MPLed code.
-
-
-That means:
- * You can **use** the **unchanged** source code both in private and commercially.
- * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0).
- * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**.
-
-Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license.
-
-You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE).
-
-
diff --git a/vendor/github.com/go-sql-driver/mysql/atomic_bool.go b/vendor/github.com/go-sql-driver/mysql/atomic_bool.go
deleted file mode 100644
index 1b7e19f3..00000000
--- a/vendor/github.com/go-sql-driver/mysql/atomic_bool.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
-//
-// Copyright 2022 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-//go:build go1.19
-// +build go1.19
-
-package mysql
-
-import "sync/atomic"
-
-/******************************************************************************
-* Sync utils *
-******************************************************************************/
-
-type atomicBool = atomic.Bool
diff --git a/vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go b/vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go
deleted file mode 100644
index 2e9a7f0b..00000000
--- a/vendor/github.com/go-sql-driver/mysql/atomic_bool_go118.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package.
-//
-// Copyright 2022 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-//go:build !go1.19
-// +build !go1.19
-
-package mysql
-
-import "sync/atomic"
-
-/******************************************************************************
-* Sync utils *
-******************************************************************************/
-
-// atomicBool is an implementation of atomic.Bool for older version of Go.
-// it is a wrapper around uint32 for usage as a boolean value with
-// atomic access.
-type atomicBool struct {
- _ noCopy
- value uint32
-}
-
-// Load returns whether the current boolean value is true
-func (ab *atomicBool) Load() bool {
- return atomic.LoadUint32(&ab.value) > 0
-}
-
-// Store sets the value of the bool regardless of the previous value
-func (ab *atomicBool) Store(value bool) {
- if value {
- atomic.StoreUint32(&ab.value, 1)
- } else {
- atomic.StoreUint32(&ab.value, 0)
- }
-}
-
-// Swap sets the value of the bool and returns the old value.
-func (ab *atomicBool) Swap(value bool) bool {
- if value {
- return atomic.SwapUint32(&ab.value, 1) > 0
- }
- return atomic.SwapUint32(&ab.value, 0) > 0
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/auth.go b/vendor/github.com/go-sql-driver/mysql/auth.go
deleted file mode 100644
index 74e1bd03..00000000
--- a/vendor/github.com/go-sql-driver/mysql/auth.go
+++ /dev/null
@@ -1,484 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "crypto/rand"
- "crypto/rsa"
- "crypto/sha1"
- "crypto/sha256"
- "crypto/sha512"
- "crypto/x509"
- "encoding/pem"
- "fmt"
- "sync"
-
- "filippo.io/edwards25519"
-)
-
-// server pub keys registry
-var (
- serverPubKeyLock sync.RWMutex
- serverPubKeyRegistry map[string]*rsa.PublicKey
-)
-
-// RegisterServerPubKey registers a server RSA public key which can be used to
-// send data in a secure manner to the server without receiving the public key
-// in a potentially insecure way from the server first.
-// Registered keys can afterwards be used adding serverPubKey= to the DSN.
-//
-// Note: The provided rsa.PublicKey instance is exclusively owned by the driver
-// after registering it and may not be modified.
-//
-// data, err := os.ReadFile("mykey.pem")
-// if err != nil {
-// log.Fatal(err)
-// }
-//
-// block, _ := pem.Decode(data)
-// if block == nil || block.Type != "PUBLIC KEY" {
-// log.Fatal("failed to decode PEM block containing public key")
-// }
-//
-// pub, err := x509.ParsePKIXPublicKey(block.Bytes)
-// if err != nil {
-// log.Fatal(err)
-// }
-//
-// if rsaPubKey, ok := pub.(*rsa.PublicKey); ok {
-// mysql.RegisterServerPubKey("mykey", rsaPubKey)
-// } else {
-// log.Fatal("not a RSA public key")
-// }
-func RegisterServerPubKey(name string, pubKey *rsa.PublicKey) {
- serverPubKeyLock.Lock()
- if serverPubKeyRegistry == nil {
- serverPubKeyRegistry = make(map[string]*rsa.PublicKey)
- }
-
- serverPubKeyRegistry[name] = pubKey
- serverPubKeyLock.Unlock()
-}
-
-// DeregisterServerPubKey removes the public key registered with the given name.
-func DeregisterServerPubKey(name string) {
- serverPubKeyLock.Lock()
- if serverPubKeyRegistry != nil {
- delete(serverPubKeyRegistry, name)
- }
- serverPubKeyLock.Unlock()
-}
-
-func getServerPubKey(name string) (pubKey *rsa.PublicKey) {
- serverPubKeyLock.RLock()
- if v, ok := serverPubKeyRegistry[name]; ok {
- pubKey = v
- }
- serverPubKeyLock.RUnlock()
- return
-}
-
-// Hash password using pre 4.1 (old password) method
-// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c
-type myRnd struct {
- seed1, seed2 uint32
-}
-
-const myRndMaxVal = 0x3FFFFFFF
-
-// Pseudo random number generator
-func newMyRnd(seed1, seed2 uint32) *myRnd {
- return &myRnd{
- seed1: seed1 % myRndMaxVal,
- seed2: seed2 % myRndMaxVal,
- }
-}
-
-// Tested to be equivalent to MariaDB's floating point variant
-// http://play.golang.org/p/QHvhd4qved
-// http://play.golang.org/p/RG0q4ElWDx
-func (r *myRnd) NextByte() byte {
- r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal
- r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal
-
- return byte(uint64(r.seed1) * 31 / myRndMaxVal)
-}
-
-// Generate binary hash from byte string using insecure pre 4.1 method
-func pwHash(password []byte) (result [2]uint32) {
- var add uint32 = 7
- var tmp uint32
-
- result[0] = 1345345333
- result[1] = 0x12345671
-
- for _, c := range password {
- // skip spaces and tabs in password
- if c == ' ' || c == '\t' {
- continue
- }
-
- tmp = uint32(c)
- result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8)
- result[1] += (result[1] << 8) ^ result[0]
- add += tmp
- }
-
- // Remove sign bit (1<<31)-1)
- result[0] &= 0x7FFFFFFF
- result[1] &= 0x7FFFFFFF
-
- return
-}
-
-// Hash password using insecure pre 4.1 method
-func scrambleOldPassword(scramble []byte, password string) []byte {
- scramble = scramble[:8]
-
- hashPw := pwHash([]byte(password))
- hashSc := pwHash(scramble)
-
- r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1])
-
- var out [8]byte
- for i := range out {
- out[i] = r.NextByte() + 64
- }
-
- mask := r.NextByte()
- for i := range out {
- out[i] ^= mask
- }
-
- return out[:]
-}
-
-// Hash password using 4.1+ method (SHA1)
-func scramblePassword(scramble []byte, password string) []byte {
- if len(password) == 0 {
- return nil
- }
-
- // stage1Hash = SHA1(password)
- crypt := sha1.New()
- crypt.Write([]byte(password))
- stage1 := crypt.Sum(nil)
-
- // scrambleHash = SHA1(scramble + SHA1(stage1Hash))
- // inner Hash
- crypt.Reset()
- crypt.Write(stage1)
- hash := crypt.Sum(nil)
-
- // outer Hash
- crypt.Reset()
- crypt.Write(scramble)
- crypt.Write(hash)
- scramble = crypt.Sum(nil)
-
- // token = scrambleHash XOR stage1Hash
- for i := range scramble {
- scramble[i] ^= stage1[i]
- }
- return scramble
-}
-
-// Hash password using MySQL 8+ method (SHA256)
-func scrambleSHA256Password(scramble []byte, password string) []byte {
- if len(password) == 0 {
- return nil
- }
-
- // XOR(SHA256(password), SHA256(SHA256(SHA256(password)), scramble))
-
- crypt := sha256.New()
- crypt.Write([]byte(password))
- message1 := crypt.Sum(nil)
-
- crypt.Reset()
- crypt.Write(message1)
- message1Hash := crypt.Sum(nil)
-
- crypt.Reset()
- crypt.Write(message1Hash)
- crypt.Write(scramble)
- message2 := crypt.Sum(nil)
-
- for i := range message1 {
- message1[i] ^= message2[i]
- }
-
- return message1
-}
-
-func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte, error) {
- plain := make([]byte, len(password)+1)
- copy(plain, password)
- for i := range plain {
- j := i % len(seed)
- plain[i] ^= seed[j]
- }
- sha1 := sha1.New()
- return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil)
-}
-
-// authEd25519 does ed25519 authentication used by MariaDB.
-func authEd25519(scramble []byte, password string) ([]byte, error) {
- // Derived from https://github.com/MariaDB/server/blob/d8e6bb00888b1f82c031938f4c8ac5d97f6874c3/plugin/auth_ed25519/ref10/sign.c
- // Code style is from https://cs.opensource.google/go/go/+/refs/tags/go1.21.5:src/crypto/ed25519/ed25519.go;l=207
- h := sha512.Sum512([]byte(password))
-
- s, err := edwards25519.NewScalar().SetBytesWithClamping(h[:32])
- if err != nil {
- return nil, err
- }
- A := (&edwards25519.Point{}).ScalarBaseMult(s)
-
- mh := sha512.New()
- mh.Write(h[32:])
- mh.Write(scramble)
- messageDigest := mh.Sum(nil)
- r, err := edwards25519.NewScalar().SetUniformBytes(messageDigest)
- if err != nil {
- return nil, err
- }
-
- R := (&edwards25519.Point{}).ScalarBaseMult(r)
-
- kh := sha512.New()
- kh.Write(R.Bytes())
- kh.Write(A.Bytes())
- kh.Write(scramble)
- hramDigest := kh.Sum(nil)
- k, err := edwards25519.NewScalar().SetUniformBytes(hramDigest)
- if err != nil {
- return nil, err
- }
-
- S := k.MultiplyAdd(k, s, r)
-
- return append(R.Bytes(), S.Bytes()...), nil
-}
-
-func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error {
- enc, err := encryptPassword(mc.cfg.Passwd, seed, pub)
- if err != nil {
- return err
- }
- return mc.writeAuthSwitchPacket(enc)
-}
-
-func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) {
- switch plugin {
- case "caching_sha2_password":
- authResp := scrambleSHA256Password(authData, mc.cfg.Passwd)
- return authResp, nil
-
- case "mysql_old_password":
- if !mc.cfg.AllowOldPasswords {
- return nil, ErrOldPassword
- }
- if len(mc.cfg.Passwd) == 0 {
- return nil, nil
- }
- // Note: there are edge cases where this should work but doesn't;
- // this is currently "wontfix":
- // https://github.com/go-sql-driver/mysql/issues/184
- authResp := append(scrambleOldPassword(authData[:8], mc.cfg.Passwd), 0)
- return authResp, nil
-
- case "mysql_clear_password":
- if !mc.cfg.AllowCleartextPasswords {
- return nil, ErrCleartextPassword
- }
- // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
- // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
- return append([]byte(mc.cfg.Passwd), 0), nil
-
- case "mysql_native_password":
- if !mc.cfg.AllowNativePasswords {
- return nil, ErrNativePassword
- }
- // https://dev.mysql.com/doc/internals/en/secure-password-authentication.html
- // Native password authentication only need and will need 20-byte challenge.
- authResp := scramblePassword(authData[:20], mc.cfg.Passwd)
- return authResp, nil
-
- case "sha256_password":
- if len(mc.cfg.Passwd) == 0 {
- return []byte{0}, nil
- }
- // unlike caching_sha2_password, sha256_password does not accept
- // cleartext password on unix transport.
- if mc.cfg.TLS != nil {
- // write cleartext auth packet
- return append([]byte(mc.cfg.Passwd), 0), nil
- }
-
- pubKey := mc.cfg.pubKey
- if pubKey == nil {
- // request public key from server
- return []byte{1}, nil
- }
-
- // encrypted password
- enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey)
- return enc, err
-
- case "client_ed25519":
- if len(authData) != 32 {
- return nil, ErrMalformPkt
- }
- return authEd25519(authData, mc.cfg.Passwd)
-
- default:
- mc.log("unknown auth plugin:", plugin)
- return nil, ErrUnknownPlugin
- }
-}
-
-func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error {
- // Read Result Packet
- authData, newPlugin, err := mc.readAuthResult()
- if err != nil {
- return err
- }
-
- // handle auth plugin switch, if requested
- if newPlugin != "" {
- // If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is
- // sent and we have to keep using the cipher sent in the init packet.
- if authData == nil {
- authData = oldAuthData
- } else {
- // copy data from read buffer to owned slice
- copy(oldAuthData, authData)
- }
-
- plugin = newPlugin
-
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- return err
- }
- if err = mc.writeAuthSwitchPacket(authResp); err != nil {
- return err
- }
-
- // Read Result Packet
- authData, newPlugin, err = mc.readAuthResult()
- if err != nil {
- return err
- }
-
- // Do not allow to change the auth plugin more than once
- if newPlugin != "" {
- return ErrMalformPkt
- }
- }
-
- switch plugin {
-
- // https://dev.mysql.com/blog-archive/preparing-your-community-connector-for-mysql-8-part-2-sha256/
- case "caching_sha2_password":
- switch len(authData) {
- case 0:
- return nil // auth successful
- case 1:
- switch authData[0] {
- case cachingSha2PasswordFastAuthSuccess:
- if err = mc.resultUnchanged().readResultOK(); err == nil {
- return nil // auth successful
- }
-
- case cachingSha2PasswordPerformFullAuthentication:
- if mc.cfg.TLS != nil || mc.cfg.Net == "unix" {
- // write cleartext auth packet
- err = mc.writeAuthSwitchPacket(append([]byte(mc.cfg.Passwd), 0))
- if err != nil {
- return err
- }
- } else {
- pubKey := mc.cfg.pubKey
- if pubKey == nil {
- // request public key from server
- data, err := mc.buf.takeSmallBuffer(4 + 1)
- if err != nil {
- return err
- }
- data[4] = cachingSha2PasswordRequestPublicKey
- err = mc.writePacket(data)
- if err != nil {
- return err
- }
-
- if data, err = mc.readPacket(); err != nil {
- return err
- }
-
- if data[0] != iAuthMoreData {
- return fmt.Errorf("unexpected resp from server for caching_sha2_password, perform full authentication")
- }
-
- // parse public key
- block, rest := pem.Decode(data[1:])
- if block == nil {
- return fmt.Errorf("no pem data found, data: %s", rest)
- }
- pkix, err := x509.ParsePKIXPublicKey(block.Bytes)
- if err != nil {
- return err
- }
- pubKey = pkix.(*rsa.PublicKey)
- }
-
- // send encrypted password
- err = mc.sendEncryptedPassword(oldAuthData, pubKey)
- if err != nil {
- return err
- }
- }
- return mc.resultUnchanged().readResultOK()
-
- default:
- return ErrMalformPkt
- }
- default:
- return ErrMalformPkt
- }
-
- case "sha256_password":
- switch len(authData) {
- case 0:
- return nil // auth successful
- default:
- block, _ := pem.Decode(authData)
- if block == nil {
- return fmt.Errorf("no Pem data found, data: %s", authData)
- }
-
- pub, err := x509.ParsePKIXPublicKey(block.Bytes)
- if err != nil {
- return err
- }
-
- // send encrypted password
- err = mc.sendEncryptedPassword(oldAuthData, pub.(*rsa.PublicKey))
- if err != nil {
- return err
- }
- return mc.resultUnchanged().readResultOK()
- }
-
- default:
- return nil // auth successful
- }
-
- return err
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go
deleted file mode 100644
index 0774c5c8..00000000
--- a/vendor/github.com/go-sql-driver/mysql/buffer.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "io"
- "net"
- "time"
-)
-
-const defaultBufSize = 4096
-const maxCachedBufSize = 256 * 1024
-
-// A buffer which is used for both reading and writing.
-// This is possible since communication on each connection is synchronous.
-// In other words, we can't write and read simultaneously on the same connection.
-// The buffer is similar to bufio.Reader / Writer but zero-copy-ish
-// Also highly optimized for this particular use case.
-// This buffer is backed by two byte slices in a double-buffering scheme
-type buffer struct {
- buf []byte // buf is a byte buffer who's length and capacity are equal.
- nc net.Conn
- idx int
- length int
- timeout time.Duration
- dbuf [2][]byte // dbuf is an array with the two byte slices that back this buffer
- flipcnt uint // flipccnt is the current buffer counter for double-buffering
-}
-
-// newBuffer allocates and returns a new buffer.
-func newBuffer(nc net.Conn) buffer {
- fg := make([]byte, defaultBufSize)
- return buffer{
- buf: fg,
- nc: nc,
- dbuf: [2][]byte{fg, nil},
- }
-}
-
-// flip replaces the active buffer with the background buffer
-// this is a delayed flip that simply increases the buffer counter;
-// the actual flip will be performed the next time we call `buffer.fill`
-func (b *buffer) flip() {
- b.flipcnt += 1
-}
-
-// fill reads into the buffer until at least _need_ bytes are in it
-func (b *buffer) fill(need int) error {
- n := b.length
- // fill data into its double-buffering target: if we've called
- // flip on this buffer, we'll be copying to the background buffer,
- // and then filling it with network data; otherwise we'll just move
- // the contents of the current buffer to the front before filling it
- dest := b.dbuf[b.flipcnt&1]
-
- // grow buffer if necessary to fit the whole packet.
- if need > len(dest) {
- // Round up to the next multiple of the default size
- dest = make([]byte, ((need/defaultBufSize)+1)*defaultBufSize)
-
- // if the allocated buffer is not too large, move it to backing storage
- // to prevent extra allocations on applications that perform large reads
- if len(dest) <= maxCachedBufSize {
- b.dbuf[b.flipcnt&1] = dest
- }
- }
-
- // if we're filling the fg buffer, move the existing data to the start of it.
- // if we're filling the bg buffer, copy over the data
- if n > 0 {
- copy(dest[:n], b.buf[b.idx:])
- }
-
- b.buf = dest
- b.idx = 0
-
- for {
- if b.timeout > 0 {
- if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil {
- return err
- }
- }
-
- nn, err := b.nc.Read(b.buf[n:])
- n += nn
-
- switch err {
- case nil:
- if n < need {
- continue
- }
- b.length = n
- return nil
-
- case io.EOF:
- if n >= need {
- b.length = n
- return nil
- }
- return io.ErrUnexpectedEOF
-
- default:
- return err
- }
- }
-}
-
-// returns next N bytes from buffer.
-// The returned slice is only guaranteed to be valid until the next read
-func (b *buffer) readNext(need int) ([]byte, error) {
- if b.length < need {
- // refill
- if err := b.fill(need); err != nil {
- return nil, err
- }
- }
-
- offset := b.idx
- b.idx += need
- b.length -= need
- return b.buf[offset:b.idx], nil
-}
-
-// takeBuffer returns a buffer with the requested size.
-// If possible, a slice from the existing buffer is returned.
-// Otherwise a bigger buffer is made.
-// Only one buffer (total) can be used at a time.
-func (b *buffer) takeBuffer(length int) ([]byte, error) {
- if b.length > 0 {
- return nil, ErrBusyBuffer
- }
-
- // test (cheap) general case first
- if length <= cap(b.buf) {
- return b.buf[:length], nil
- }
-
- if length < maxPacketSize {
- b.buf = make([]byte, length)
- return b.buf, nil
- }
-
- // buffer is larger than we want to store.
- return make([]byte, length), nil
-}
-
-// takeSmallBuffer is shortcut which can be used if length is
-// known to be smaller than defaultBufSize.
-// Only one buffer (total) can be used at a time.
-func (b *buffer) takeSmallBuffer(length int) ([]byte, error) {
- if b.length > 0 {
- return nil, ErrBusyBuffer
- }
- return b.buf[:length], nil
-}
-
-// takeCompleteBuffer returns the complete existing buffer.
-// This can be used if the necessary buffer size is unknown.
-// cap and len of the returned buffer will be equal.
-// Only one buffer (total) can be used at a time.
-func (b *buffer) takeCompleteBuffer() ([]byte, error) {
- if b.length > 0 {
- return nil, ErrBusyBuffer
- }
- return b.buf, nil
-}
-
-// store stores buf, an updated buffer, if its suitable to do so.
-func (b *buffer) store(buf []byte) error {
- if b.length > 0 {
- return ErrBusyBuffer
- } else if cap(buf) <= maxPacketSize && cap(buf) > cap(b.buf) {
- b.buf = buf[:cap(buf)]
- }
- return nil
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/collations.go b/vendor/github.com/go-sql-driver/mysql/collations.go
deleted file mode 100644
index 1cdf97b6..00000000
--- a/vendor/github.com/go-sql-driver/mysql/collations.go
+++ /dev/null
@@ -1,266 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-const defaultCollation = "utf8mb4_general_ci"
-const binaryCollationID = 63
-
-// A list of available collations mapped to the internal ID.
-// To update this map use the following MySQL query:
-//
-// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS WHERE ID<256 ORDER BY ID
-//
-// Handshake packet have only 1 byte for collation_id. So we can't use collations with ID > 255.
-//
-// ucs2, utf16, and utf32 can't be used for connection charset.
-// https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset
-// They are commented out to reduce this map.
-var collations = map[string]byte{
- "big5_chinese_ci": 1,
- "latin2_czech_cs": 2,
- "dec8_swedish_ci": 3,
- "cp850_general_ci": 4,
- "latin1_german1_ci": 5,
- "hp8_english_ci": 6,
- "koi8r_general_ci": 7,
- "latin1_swedish_ci": 8,
- "latin2_general_ci": 9,
- "swe7_swedish_ci": 10,
- "ascii_general_ci": 11,
- "ujis_japanese_ci": 12,
- "sjis_japanese_ci": 13,
- "cp1251_bulgarian_ci": 14,
- "latin1_danish_ci": 15,
- "hebrew_general_ci": 16,
- "tis620_thai_ci": 18,
- "euckr_korean_ci": 19,
- "latin7_estonian_cs": 20,
- "latin2_hungarian_ci": 21,
- "koi8u_general_ci": 22,
- "cp1251_ukrainian_ci": 23,
- "gb2312_chinese_ci": 24,
- "greek_general_ci": 25,
- "cp1250_general_ci": 26,
- "latin2_croatian_ci": 27,
- "gbk_chinese_ci": 28,
- "cp1257_lithuanian_ci": 29,
- "latin5_turkish_ci": 30,
- "latin1_german2_ci": 31,
- "armscii8_general_ci": 32,
- "utf8_general_ci": 33,
- "cp1250_czech_cs": 34,
- //"ucs2_general_ci": 35,
- "cp866_general_ci": 36,
- "keybcs2_general_ci": 37,
- "macce_general_ci": 38,
- "macroman_general_ci": 39,
- "cp852_general_ci": 40,
- "latin7_general_ci": 41,
- "latin7_general_cs": 42,
- "macce_bin": 43,
- "cp1250_croatian_ci": 44,
- "utf8mb4_general_ci": 45,
- "utf8mb4_bin": 46,
- "latin1_bin": 47,
- "latin1_general_ci": 48,
- "latin1_general_cs": 49,
- "cp1251_bin": 50,
- "cp1251_general_ci": 51,
- "cp1251_general_cs": 52,
- "macroman_bin": 53,
- //"utf16_general_ci": 54,
- //"utf16_bin": 55,
- //"utf16le_general_ci": 56,
- "cp1256_general_ci": 57,
- "cp1257_bin": 58,
- "cp1257_general_ci": 59,
- //"utf32_general_ci": 60,
- //"utf32_bin": 61,
- //"utf16le_bin": 62,
- "binary": 63,
- "armscii8_bin": 64,
- "ascii_bin": 65,
- "cp1250_bin": 66,
- "cp1256_bin": 67,
- "cp866_bin": 68,
- "dec8_bin": 69,
- "greek_bin": 70,
- "hebrew_bin": 71,
- "hp8_bin": 72,
- "keybcs2_bin": 73,
- "koi8r_bin": 74,
- "koi8u_bin": 75,
- "utf8_tolower_ci": 76,
- "latin2_bin": 77,
- "latin5_bin": 78,
- "latin7_bin": 79,
- "cp850_bin": 80,
- "cp852_bin": 81,
- "swe7_bin": 82,
- "utf8_bin": 83,
- "big5_bin": 84,
- "euckr_bin": 85,
- "gb2312_bin": 86,
- "gbk_bin": 87,
- "sjis_bin": 88,
- "tis620_bin": 89,
- //"ucs2_bin": 90,
- "ujis_bin": 91,
- "geostd8_general_ci": 92,
- "geostd8_bin": 93,
- "latin1_spanish_ci": 94,
- "cp932_japanese_ci": 95,
- "cp932_bin": 96,
- "eucjpms_japanese_ci": 97,
- "eucjpms_bin": 98,
- "cp1250_polish_ci": 99,
- //"utf16_unicode_ci": 101,
- //"utf16_icelandic_ci": 102,
- //"utf16_latvian_ci": 103,
- //"utf16_romanian_ci": 104,
- //"utf16_slovenian_ci": 105,
- //"utf16_polish_ci": 106,
- //"utf16_estonian_ci": 107,
- //"utf16_spanish_ci": 108,
- //"utf16_swedish_ci": 109,
- //"utf16_turkish_ci": 110,
- //"utf16_czech_ci": 111,
- //"utf16_danish_ci": 112,
- //"utf16_lithuanian_ci": 113,
- //"utf16_slovak_ci": 114,
- //"utf16_spanish2_ci": 115,
- //"utf16_roman_ci": 116,
- //"utf16_persian_ci": 117,
- //"utf16_esperanto_ci": 118,
- //"utf16_hungarian_ci": 119,
- //"utf16_sinhala_ci": 120,
- //"utf16_german2_ci": 121,
- //"utf16_croatian_ci": 122,
- //"utf16_unicode_520_ci": 123,
- //"utf16_vietnamese_ci": 124,
- //"ucs2_unicode_ci": 128,
- //"ucs2_icelandic_ci": 129,
- //"ucs2_latvian_ci": 130,
- //"ucs2_romanian_ci": 131,
- //"ucs2_slovenian_ci": 132,
- //"ucs2_polish_ci": 133,
- //"ucs2_estonian_ci": 134,
- //"ucs2_spanish_ci": 135,
- //"ucs2_swedish_ci": 136,
- //"ucs2_turkish_ci": 137,
- //"ucs2_czech_ci": 138,
- //"ucs2_danish_ci": 139,
- //"ucs2_lithuanian_ci": 140,
- //"ucs2_slovak_ci": 141,
- //"ucs2_spanish2_ci": 142,
- //"ucs2_roman_ci": 143,
- //"ucs2_persian_ci": 144,
- //"ucs2_esperanto_ci": 145,
- //"ucs2_hungarian_ci": 146,
- //"ucs2_sinhala_ci": 147,
- //"ucs2_german2_ci": 148,
- //"ucs2_croatian_ci": 149,
- //"ucs2_unicode_520_ci": 150,
- //"ucs2_vietnamese_ci": 151,
- //"ucs2_general_mysql500_ci": 159,
- //"utf32_unicode_ci": 160,
- //"utf32_icelandic_ci": 161,
- //"utf32_latvian_ci": 162,
- //"utf32_romanian_ci": 163,
- //"utf32_slovenian_ci": 164,
- //"utf32_polish_ci": 165,
- //"utf32_estonian_ci": 166,
- //"utf32_spanish_ci": 167,
- //"utf32_swedish_ci": 168,
- //"utf32_turkish_ci": 169,
- //"utf32_czech_ci": 170,
- //"utf32_danish_ci": 171,
- //"utf32_lithuanian_ci": 172,
- //"utf32_slovak_ci": 173,
- //"utf32_spanish2_ci": 174,
- //"utf32_roman_ci": 175,
- //"utf32_persian_ci": 176,
- //"utf32_esperanto_ci": 177,
- //"utf32_hungarian_ci": 178,
- //"utf32_sinhala_ci": 179,
- //"utf32_german2_ci": 180,
- //"utf32_croatian_ci": 181,
- //"utf32_unicode_520_ci": 182,
- //"utf32_vietnamese_ci": 183,
- "utf8_unicode_ci": 192,
- "utf8_icelandic_ci": 193,
- "utf8_latvian_ci": 194,
- "utf8_romanian_ci": 195,
- "utf8_slovenian_ci": 196,
- "utf8_polish_ci": 197,
- "utf8_estonian_ci": 198,
- "utf8_spanish_ci": 199,
- "utf8_swedish_ci": 200,
- "utf8_turkish_ci": 201,
- "utf8_czech_ci": 202,
- "utf8_danish_ci": 203,
- "utf8_lithuanian_ci": 204,
- "utf8_slovak_ci": 205,
- "utf8_spanish2_ci": 206,
- "utf8_roman_ci": 207,
- "utf8_persian_ci": 208,
- "utf8_esperanto_ci": 209,
- "utf8_hungarian_ci": 210,
- "utf8_sinhala_ci": 211,
- "utf8_german2_ci": 212,
- "utf8_croatian_ci": 213,
- "utf8_unicode_520_ci": 214,
- "utf8_vietnamese_ci": 215,
- "utf8_general_mysql500_ci": 223,
- "utf8mb4_unicode_ci": 224,
- "utf8mb4_icelandic_ci": 225,
- "utf8mb4_latvian_ci": 226,
- "utf8mb4_romanian_ci": 227,
- "utf8mb4_slovenian_ci": 228,
- "utf8mb4_polish_ci": 229,
- "utf8mb4_estonian_ci": 230,
- "utf8mb4_spanish_ci": 231,
- "utf8mb4_swedish_ci": 232,
- "utf8mb4_turkish_ci": 233,
- "utf8mb4_czech_ci": 234,
- "utf8mb4_danish_ci": 235,
- "utf8mb4_lithuanian_ci": 236,
- "utf8mb4_slovak_ci": 237,
- "utf8mb4_spanish2_ci": 238,
- "utf8mb4_roman_ci": 239,
- "utf8mb4_persian_ci": 240,
- "utf8mb4_esperanto_ci": 241,
- "utf8mb4_hungarian_ci": 242,
- "utf8mb4_sinhala_ci": 243,
- "utf8mb4_german2_ci": 244,
- "utf8mb4_croatian_ci": 245,
- "utf8mb4_unicode_520_ci": 246,
- "utf8mb4_vietnamese_ci": 247,
- "gb18030_chinese_ci": 248,
- "gb18030_bin": 249,
- "gb18030_unicode_520_ci": 250,
- "utf8mb4_0900_ai_ci": 255,
-}
-
-// A denylist of collations which is unsafe to interpolate parameters.
-// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes.
-var unsafeCollations = map[string]bool{
- "big5_chinese_ci": true,
- "sjis_japanese_ci": true,
- "gbk_chinese_ci": true,
- "big5_bin": true,
- "gb2312_bin": true,
- "gbk_bin": true,
- "sjis_bin": true,
- "cp932_japanese_ci": true,
- "cp932_bin": true,
- "gb18030_chinese_ci": true,
- "gb18030_bin": true,
- "gb18030_unicode_520_ci": true,
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/conncheck.go b/vendor/github.com/go-sql-driver/mysql/conncheck.go
deleted file mode 100644
index 0ea72172..00000000
--- a/vendor/github.com/go-sql-driver/mysql/conncheck.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-//go:build linux || darwin || dragonfly || freebsd || netbsd || openbsd || solaris || illumos
-// +build linux darwin dragonfly freebsd netbsd openbsd solaris illumos
-
-package mysql
-
-import (
- "errors"
- "io"
- "net"
- "syscall"
-)
-
-var errUnexpectedRead = errors.New("unexpected read from socket")
-
-func connCheck(conn net.Conn) error {
- var sysErr error
-
- sysConn, ok := conn.(syscall.Conn)
- if !ok {
- return nil
- }
- rawConn, err := sysConn.SyscallConn()
- if err != nil {
- return err
- }
-
- err = rawConn.Read(func(fd uintptr) bool {
- var buf [1]byte
- n, err := syscall.Read(int(fd), buf[:])
- switch {
- case n == 0 && err == nil:
- sysErr = io.EOF
- case n > 0:
- sysErr = errUnexpectedRead
- case err == syscall.EAGAIN || err == syscall.EWOULDBLOCK:
- sysErr = nil
- default:
- sysErr = err
- }
- return true
- })
- if err != nil {
- return err
- }
-
- return sysErr
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go b/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go
deleted file mode 100644
index a56c138f..00000000
--- a/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-//go:build !linux && !darwin && !dragonfly && !freebsd && !netbsd && !openbsd && !solaris && !illumos
-// +build !linux,!darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!illumos
-
-package mysql
-
-import "net"
-
-func connCheck(conn net.Conn) error {
- return nil
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go
deleted file mode 100644
index eff978d9..00000000
--- a/vendor/github.com/go-sql-driver/mysql/connection.go
+++ /dev/null
@@ -1,688 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "context"
- "database/sql"
- "database/sql/driver"
- "encoding/json"
- "io"
- "net"
- "strconv"
- "strings"
- "time"
-)
-
-type mysqlConn struct {
- buf buffer
- netConn net.Conn
- rawConn net.Conn // underlying connection when netConn is TLS connection.
- result mysqlResult // managed by clearResult() and handleOkPacket().
- cfg *Config
- connector *connector
- maxAllowedPacket int
- maxWriteSize int
- writeTimeout time.Duration
- flags clientFlag
- status statusFlag
- sequence uint8
- parseTime bool
-
- // for context support (Go 1.8+)
- watching bool
- watcher chan<- context.Context
- closech chan struct{}
- finished chan<- struct{}
- canceled atomicError // set non-nil if conn is canceled
- closed atomicBool // set when conn is closed, before closech is closed
-}
-
-// Helper function to call per-connection logger.
-func (mc *mysqlConn) log(v ...any) {
- mc.cfg.Logger.Print(v...)
-}
-
-// Handles parameters set in DSN after the connection is established
-func (mc *mysqlConn) handleParams() (err error) {
- var cmdSet strings.Builder
-
- for param, val := range mc.cfg.Params {
- switch param {
- // Charset: character_set_connection, character_set_client, character_set_results
- case "charset":
- charsets := strings.Split(val, ",")
- for _, cs := range charsets {
- // ignore errors here - a charset may not exist
- if mc.cfg.Collation != "" {
- err = mc.exec("SET NAMES " + cs + " COLLATE " + mc.cfg.Collation)
- } else {
- err = mc.exec("SET NAMES " + cs)
- }
- if err == nil {
- break
- }
- }
- if err != nil {
- return
- }
-
- // Other system vars accumulated in a single SET command
- default:
- if cmdSet.Len() == 0 {
- // Heuristic: 29 chars for each other key=value to reduce reallocations
- cmdSet.Grow(4 + len(param) + 3 + len(val) + 30*(len(mc.cfg.Params)-1))
- cmdSet.WriteString("SET ")
- } else {
- cmdSet.WriteString(", ")
- }
- cmdSet.WriteString(param)
- cmdSet.WriteString(" = ")
- cmdSet.WriteString(val)
- }
- }
-
- if cmdSet.Len() > 0 {
- err = mc.exec(cmdSet.String())
- if err != nil {
- return
- }
- }
-
- return
-}
-
-func (mc *mysqlConn) markBadConn(err error) error {
- if mc == nil {
- return err
- }
- if err != errBadConnNoWrite {
- return err
- }
- return driver.ErrBadConn
-}
-
-func (mc *mysqlConn) Begin() (driver.Tx, error) {
- return mc.begin(false)
-}
-
-func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) {
- if mc.closed.Load() {
- mc.log(ErrInvalidConn)
- return nil, driver.ErrBadConn
- }
- var q string
- if readOnly {
- q = "START TRANSACTION READ ONLY"
- } else {
- q = "START TRANSACTION"
- }
- err := mc.exec(q)
- if err == nil {
- return &mysqlTx{mc}, err
- }
- return nil, mc.markBadConn(err)
-}
-
-func (mc *mysqlConn) Close() (err error) {
- // Makes Close idempotent
- if !mc.closed.Load() {
- err = mc.writeCommandPacket(comQuit)
- }
-
- mc.cleanup()
- mc.clearResult()
- return
-}
-
-// Closes the network connection and unsets internal variables. Do not call this
-// function after successfully authentication, call Close instead. This function
-// is called before auth or on auth failure because MySQL will have already
-// closed the network connection.
-func (mc *mysqlConn) cleanup() {
- if mc.closed.Swap(true) {
- return
- }
-
- // Makes cleanup idempotent
- close(mc.closech)
- conn := mc.rawConn
- if conn == nil {
- return
- }
- if err := conn.Close(); err != nil {
- mc.log(err)
- }
- // This function can be called from multiple goroutines.
- // So we can not mc.clearResult() here.
- // Caller should do it if they are in safe goroutine.
-}
-
-func (mc *mysqlConn) error() error {
- if mc.closed.Load() {
- if err := mc.canceled.Value(); err != nil {
- return err
- }
- return ErrInvalidConn
- }
- return nil
-}
-
-func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
- if mc.closed.Load() {
- mc.log(ErrInvalidConn)
- return nil, driver.ErrBadConn
- }
- // Send command
- err := mc.writeCommandPacketStr(comStmtPrepare, query)
- if err != nil {
- // STMT_PREPARE is safe to retry. So we can return ErrBadConn here.
- mc.log(err)
- return nil, driver.ErrBadConn
- }
-
- stmt := &mysqlStmt{
- mc: mc,
- }
-
- // Read Result
- columnCount, err := stmt.readPrepareResultPacket()
- if err == nil {
- if stmt.paramCount > 0 {
- if err = mc.readUntilEOF(); err != nil {
- return nil, err
- }
- }
-
- if columnCount > 0 {
- err = mc.readUntilEOF()
- }
- }
-
- return stmt, err
-}
-
-func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) {
- // Number of ? should be same to len(args)
- if strings.Count(query, "?") != len(args) {
- return "", driver.ErrSkip
- }
-
- buf, err := mc.buf.takeCompleteBuffer()
- if err != nil {
- // can not take the buffer. Something must be wrong with the connection
- mc.log(err)
- return "", ErrInvalidConn
- }
- buf = buf[:0]
- argPos := 0
-
- for i := 0; i < len(query); i++ {
- q := strings.IndexByte(query[i:], '?')
- if q == -1 {
- buf = append(buf, query[i:]...)
- break
- }
- buf = append(buf, query[i:i+q]...)
- i += q
-
- arg := args[argPos]
- argPos++
-
- if arg == nil {
- buf = append(buf, "NULL"...)
- continue
- }
-
- switch v := arg.(type) {
- case int64:
- buf = strconv.AppendInt(buf, v, 10)
- case uint64:
- // Handle uint64 explicitly because our custom ConvertValue emits unsigned values
- buf = strconv.AppendUint(buf, v, 10)
- case float64:
- buf = strconv.AppendFloat(buf, v, 'g', -1, 64)
- case bool:
- if v {
- buf = append(buf, '1')
- } else {
- buf = append(buf, '0')
- }
- case time.Time:
- if v.IsZero() {
- buf = append(buf, "'0000-00-00'"...)
- } else {
- buf = append(buf, '\'')
- buf, err = appendDateTime(buf, v.In(mc.cfg.Loc), mc.cfg.timeTruncate)
- if err != nil {
- return "", err
- }
- buf = append(buf, '\'')
- }
- case json.RawMessage:
- buf = append(buf, '\'')
- if mc.status&statusNoBackslashEscapes == 0 {
- buf = escapeBytesBackslash(buf, v)
- } else {
- buf = escapeBytesQuotes(buf, v)
- }
- buf = append(buf, '\'')
- case []byte:
- if v == nil {
- buf = append(buf, "NULL"...)
- } else {
- buf = append(buf, "_binary'"...)
- if mc.status&statusNoBackslashEscapes == 0 {
- buf = escapeBytesBackslash(buf, v)
- } else {
- buf = escapeBytesQuotes(buf, v)
- }
- buf = append(buf, '\'')
- }
- case string:
- buf = append(buf, '\'')
- if mc.status&statusNoBackslashEscapes == 0 {
- buf = escapeStringBackslash(buf, v)
- } else {
- buf = escapeStringQuotes(buf, v)
- }
- buf = append(buf, '\'')
- default:
- return "", driver.ErrSkip
- }
-
- if len(buf)+4 > mc.maxAllowedPacket {
- return "", driver.ErrSkip
- }
- }
- if argPos != len(args) {
- return "", driver.ErrSkip
- }
- return string(buf), nil
-}
-
-func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
- if mc.closed.Load() {
- mc.log(ErrInvalidConn)
- return nil, driver.ErrBadConn
- }
- if len(args) != 0 {
- if !mc.cfg.InterpolateParams {
- return nil, driver.ErrSkip
- }
- // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement
- prepared, err := mc.interpolateParams(query, args)
- if err != nil {
- return nil, err
- }
- query = prepared
- }
-
- err := mc.exec(query)
- if err == nil {
- copied := mc.result
- return &copied, err
- }
- return nil, mc.markBadConn(err)
-}
-
-// Internal function to execute commands
-func (mc *mysqlConn) exec(query string) error {
- handleOk := mc.clearResult()
- // Send command
- if err := mc.writeCommandPacketStr(comQuery, query); err != nil {
- return mc.markBadConn(err)
- }
-
- // Read Result
- resLen, err := handleOk.readResultSetHeaderPacket()
- if err != nil {
- return err
- }
-
- if resLen > 0 {
- // columns
- if err := mc.readUntilEOF(); err != nil {
- return err
- }
-
- // rows
- if err := mc.readUntilEOF(); err != nil {
- return err
- }
- }
-
- return handleOk.discardResults()
-}
-
-func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
- return mc.query(query, args)
-}
-
-func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) {
- handleOk := mc.clearResult()
-
- if mc.closed.Load() {
- mc.log(ErrInvalidConn)
- return nil, driver.ErrBadConn
- }
- if len(args) != 0 {
- if !mc.cfg.InterpolateParams {
- return nil, driver.ErrSkip
- }
- // try client-side prepare to reduce roundtrip
- prepared, err := mc.interpolateParams(query, args)
- if err != nil {
- return nil, err
- }
- query = prepared
- }
- // Send command
- err := mc.writeCommandPacketStr(comQuery, query)
- if err == nil {
- // Read Result
- var resLen int
- resLen, err = handleOk.readResultSetHeaderPacket()
- if err == nil {
- rows := new(textRows)
- rows.mc = mc
-
- if resLen == 0 {
- rows.rs.done = true
-
- switch err := rows.NextResultSet(); err {
- case nil, io.EOF:
- return rows, nil
- default:
- return nil, err
- }
- }
-
- // Columns
- rows.rs.columns, err = mc.readColumns(resLen)
- return rows, err
- }
- }
- return nil, mc.markBadConn(err)
-}
-
-// Gets the value of the given MySQL System Variable
-// The returned byte slice is only valid until the next read
-func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
- // Send command
- handleOk := mc.clearResult()
- if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil {
- return nil, err
- }
-
- // Read Result
- resLen, err := handleOk.readResultSetHeaderPacket()
- if err == nil {
- rows := new(textRows)
- rows.mc = mc
- rows.rs.columns = []mysqlField{{fieldType: fieldTypeVarChar}}
-
- if resLen > 0 {
- // Columns
- if err := mc.readUntilEOF(); err != nil {
- return nil, err
- }
- }
-
- dest := make([]driver.Value, resLen)
- if err = rows.readRow(dest); err == nil {
- return dest[0].([]byte), mc.readUntilEOF()
- }
- }
- return nil, err
-}
-
-// finish is called when the query has canceled.
-func (mc *mysqlConn) cancel(err error) {
- mc.canceled.Set(err)
- mc.cleanup()
-}
-
-// finish is called when the query has succeeded.
-func (mc *mysqlConn) finish() {
- if !mc.watching || mc.finished == nil {
- return
- }
- select {
- case mc.finished <- struct{}{}:
- mc.watching = false
- case <-mc.closech:
- }
-}
-
-// Ping implements driver.Pinger interface
-func (mc *mysqlConn) Ping(ctx context.Context) (err error) {
- if mc.closed.Load() {
- mc.log(ErrInvalidConn)
- return driver.ErrBadConn
- }
-
- if err = mc.watchCancel(ctx); err != nil {
- return
- }
- defer mc.finish()
-
- handleOk := mc.clearResult()
- if err = mc.writeCommandPacket(comPing); err != nil {
- return mc.markBadConn(err)
- }
-
- return handleOk.readResultOK()
-}
-
-// BeginTx implements driver.ConnBeginTx interface
-func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
- if mc.closed.Load() {
- return nil, driver.ErrBadConn
- }
-
- if err := mc.watchCancel(ctx); err != nil {
- return nil, err
- }
- defer mc.finish()
-
- if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault {
- level, err := mapIsolationLevel(opts.Isolation)
- if err != nil {
- return nil, err
- }
- err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level)
- if err != nil {
- return nil, err
- }
- }
-
- return mc.begin(opts.ReadOnly)
-}
-
-func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
- dargs, err := namedValueToValue(args)
- if err != nil {
- return nil, err
- }
-
- if err := mc.watchCancel(ctx); err != nil {
- return nil, err
- }
-
- rows, err := mc.query(query, dargs)
- if err != nil {
- mc.finish()
- return nil, err
- }
- rows.finish = mc.finish
- return rows, err
-}
-
-func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
- dargs, err := namedValueToValue(args)
- if err != nil {
- return nil, err
- }
-
- if err := mc.watchCancel(ctx); err != nil {
- return nil, err
- }
- defer mc.finish()
-
- return mc.Exec(query, dargs)
-}
-
-func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
- if err := mc.watchCancel(ctx); err != nil {
- return nil, err
- }
-
- stmt, err := mc.Prepare(query)
- mc.finish()
- if err != nil {
- return nil, err
- }
-
- select {
- default:
- case <-ctx.Done():
- stmt.Close()
- return nil, ctx.Err()
- }
- return stmt, nil
-}
-
-func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
- dargs, err := namedValueToValue(args)
- if err != nil {
- return nil, err
- }
-
- if err := stmt.mc.watchCancel(ctx); err != nil {
- return nil, err
- }
-
- rows, err := stmt.query(dargs)
- if err != nil {
- stmt.mc.finish()
- return nil, err
- }
- rows.finish = stmt.mc.finish
- return rows, err
-}
-
-func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
- dargs, err := namedValueToValue(args)
- if err != nil {
- return nil, err
- }
-
- if err := stmt.mc.watchCancel(ctx); err != nil {
- return nil, err
- }
- defer stmt.mc.finish()
-
- return stmt.Exec(dargs)
-}
-
-func (mc *mysqlConn) watchCancel(ctx context.Context) error {
- if mc.watching {
- // Reach here if canceled,
- // so the connection is already invalid
- mc.cleanup()
- return nil
- }
- // When ctx is already cancelled, don't watch it.
- if err := ctx.Err(); err != nil {
- return err
- }
- // When ctx is not cancellable, don't watch it.
- if ctx.Done() == nil {
- return nil
- }
- // When watcher is not alive, can't watch it.
- if mc.watcher == nil {
- return nil
- }
-
- mc.watching = true
- mc.watcher <- ctx
- return nil
-}
-
-func (mc *mysqlConn) startWatcher() {
- watcher := make(chan context.Context, 1)
- mc.watcher = watcher
- finished := make(chan struct{})
- mc.finished = finished
- go func() {
- for {
- var ctx context.Context
- select {
- case ctx = <-watcher:
- case <-mc.closech:
- return
- }
-
- select {
- case <-ctx.Done():
- mc.cancel(ctx.Err())
- case <-finished:
- case <-mc.closech:
- return
- }
- }
- }()
-}
-
-func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) {
- nv.Value, err = converter{}.ConvertValue(nv.Value)
- return
-}
-
-// ResetSession implements driver.SessionResetter.
-// (From Go 1.10)
-func (mc *mysqlConn) ResetSession(ctx context.Context) error {
- if mc.closed.Load() {
- return driver.ErrBadConn
- }
-
- // Perform a stale connection check. We only perform this check for
- // the first query on a connection that has been checked out of the
- // connection pool: a fresh connection from the pool is more likely
- // to be stale, and it has not performed any previous writes that
- // could cause data corruption, so it's safe to return ErrBadConn
- // if the check fails.
- if mc.cfg.CheckConnLiveness {
- conn := mc.netConn
- if mc.rawConn != nil {
- conn = mc.rawConn
- }
- var err error
- if mc.cfg.ReadTimeout != 0 {
- err = conn.SetReadDeadline(time.Now().Add(mc.cfg.ReadTimeout))
- }
- if err == nil {
- err = connCheck(conn)
- }
- if err != nil {
- mc.log("closing bad idle connection: ", err)
- return driver.ErrBadConn
- }
- }
-
- return nil
-}
-
-// IsValid implements driver.Validator interface
-// (From Go 1.15)
-func (mc *mysqlConn) IsValid() bool {
- return !mc.closed.Load()
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/connector.go b/vendor/github.com/go-sql-driver/mysql/connector.go
deleted file mode 100644
index b6707759..00000000
--- a/vendor/github.com/go-sql-driver/mysql/connector.go
+++ /dev/null
@@ -1,197 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "context"
- "database/sql/driver"
- "net"
- "os"
- "strconv"
- "strings"
-)
-
-type connector struct {
- cfg *Config // immutable private copy.
- encodedAttributes string // Encoded connection attributes.
-}
-
-func encodeConnectionAttributes(cfg *Config) string {
- connAttrsBuf := make([]byte, 0)
-
- // default connection attributes
- connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrClientName)
- connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrClientNameValue)
- connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrOS)
- connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrOSValue)
- connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrPlatform)
- connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrPlatformValue)
- connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrPid)
- connAttrsBuf = appendLengthEncodedString(connAttrsBuf, strconv.Itoa(os.Getpid()))
- serverHost, _, _ := net.SplitHostPort(cfg.Addr)
- if serverHost != "" {
- connAttrsBuf = appendLengthEncodedString(connAttrsBuf, connAttrServerHost)
- connAttrsBuf = appendLengthEncodedString(connAttrsBuf, serverHost)
- }
-
- // user-defined connection attributes
- for _, connAttr := range strings.Split(cfg.ConnectionAttributes, ",") {
- k, v, found := strings.Cut(connAttr, ":")
- if !found {
- continue
- }
- connAttrsBuf = appendLengthEncodedString(connAttrsBuf, k)
- connAttrsBuf = appendLengthEncodedString(connAttrsBuf, v)
- }
-
- return string(connAttrsBuf)
-}
-
-func newConnector(cfg *Config) *connector {
- encodedAttributes := encodeConnectionAttributes(cfg)
- return &connector{
- cfg: cfg,
- encodedAttributes: encodedAttributes,
- }
-}
-
-// Connect implements driver.Connector interface.
-// Connect returns a connection to the database.
-func (c *connector) Connect(ctx context.Context) (driver.Conn, error) {
- var err error
-
- // Invoke beforeConnect if present, with a copy of the configuration
- cfg := c.cfg
- if c.cfg.beforeConnect != nil {
- cfg = c.cfg.Clone()
- err = c.cfg.beforeConnect(ctx, cfg)
- if err != nil {
- return nil, err
- }
- }
-
- // New mysqlConn
- mc := &mysqlConn{
- maxAllowedPacket: maxPacketSize,
- maxWriteSize: maxPacketSize - 1,
- closech: make(chan struct{}),
- cfg: cfg,
- connector: c,
- }
- mc.parseTime = mc.cfg.ParseTime
-
- // Connect to Server
- dialsLock.RLock()
- dial, ok := dials[mc.cfg.Net]
- dialsLock.RUnlock()
- if ok {
- dctx := ctx
- if mc.cfg.Timeout > 0 {
- var cancel context.CancelFunc
- dctx, cancel = context.WithTimeout(ctx, c.cfg.Timeout)
- defer cancel()
- }
- mc.netConn, err = dial(dctx, mc.cfg.Addr)
- } else {
- nd := net.Dialer{Timeout: mc.cfg.Timeout}
- mc.netConn, err = nd.DialContext(ctx, mc.cfg.Net, mc.cfg.Addr)
- }
- if err != nil {
- return nil, err
- }
- mc.rawConn = mc.netConn
-
- // Enable TCP Keepalives on TCP connections
- if tc, ok := mc.netConn.(*net.TCPConn); ok {
- if err := tc.SetKeepAlive(true); err != nil {
- c.cfg.Logger.Print(err)
- }
- }
-
- // Call startWatcher for context support (From Go 1.8)
- mc.startWatcher()
- if err := mc.watchCancel(ctx); err != nil {
- mc.cleanup()
- return nil, err
- }
- defer mc.finish()
-
- mc.buf = newBuffer(mc.netConn)
-
- // Set I/O timeouts
- mc.buf.timeout = mc.cfg.ReadTimeout
- mc.writeTimeout = mc.cfg.WriteTimeout
-
- // Reading Handshake Initialization Packet
- authData, plugin, err := mc.readHandshakePacket()
- if err != nil {
- mc.cleanup()
- return nil, err
- }
-
- if plugin == "" {
- plugin = defaultAuthPlugin
- }
-
- // Send Client Authentication Packet
- authResp, err := mc.auth(authData, plugin)
- if err != nil {
- // try the default auth plugin, if using the requested plugin failed
- c.cfg.Logger.Print("could not use requested auth plugin '"+plugin+"': ", err.Error())
- plugin = defaultAuthPlugin
- authResp, err = mc.auth(authData, plugin)
- if err != nil {
- mc.cleanup()
- return nil, err
- }
- }
- if err = mc.writeHandshakeResponsePacket(authResp, plugin); err != nil {
- mc.cleanup()
- return nil, err
- }
-
- // Handle response to auth packet, switch methods if possible
- if err = mc.handleAuthResult(authData, plugin); err != nil {
- // Authentication failed and MySQL has already closed the connection
- // (https://dev.mysql.com/doc/internals/en/authentication-fails.html).
- // Do not send COM_QUIT, just cleanup and return the error.
- mc.cleanup()
- return nil, err
- }
-
- if mc.cfg.MaxAllowedPacket > 0 {
- mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket
- } else {
- // Get max allowed packet size
- maxap, err := mc.getSystemVar("max_allowed_packet")
- if err != nil {
- mc.Close()
- return nil, err
- }
- mc.maxAllowedPacket = stringToInt(maxap) - 1
- }
- if mc.maxAllowedPacket < maxPacketSize {
- mc.maxWriteSize = mc.maxAllowedPacket
- }
-
- // Handle DSN Params
- err = mc.handleParams()
- if err != nil {
- mc.Close()
- return nil, err
- }
-
- return mc, nil
-}
-
-// Driver implements driver.Connector interface.
-// Driver returns &MySQLDriver{}.
-func (c *connector) Driver() driver.Driver {
- return &MySQLDriver{}
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go
deleted file mode 100644
index 22526e03..00000000
--- a/vendor/github.com/go-sql-driver/mysql/const.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import "runtime"
-
-const (
- defaultAuthPlugin = "mysql_native_password"
- defaultMaxAllowedPacket = 64 << 20 // 64 MiB. See https://github.com/go-sql-driver/mysql/issues/1355
- minProtocolVersion = 10
- maxPacketSize = 1<<24 - 1
- timeFormat = "2006-01-02 15:04:05.999999"
-
- // Connection attributes
- // See https://dev.mysql.com/doc/refman/8.0/en/performance-schema-connection-attribute-tables.html#performance-schema-connection-attributes-available
- connAttrClientName = "_client_name"
- connAttrClientNameValue = "Go-MySQL-Driver"
- connAttrOS = "_os"
- connAttrOSValue = runtime.GOOS
- connAttrPlatform = "_platform"
- connAttrPlatformValue = runtime.GOARCH
- connAttrPid = "_pid"
- connAttrServerHost = "_server_host"
-)
-
-// MySQL constants documentation:
-// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
-
-const (
- iOK byte = 0x00
- iAuthMoreData byte = 0x01
- iLocalInFile byte = 0xfb
- iEOF byte = 0xfe
- iERR byte = 0xff
-)
-
-// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags
-type clientFlag uint32
-
-const (
- clientLongPassword clientFlag = 1 << iota
- clientFoundRows
- clientLongFlag
- clientConnectWithDB
- clientNoSchema
- clientCompress
- clientODBC
- clientLocalFiles
- clientIgnoreSpace
- clientProtocol41
- clientInteractive
- clientSSL
- clientIgnoreSIGPIPE
- clientTransactions
- clientReserved
- clientSecureConn
- clientMultiStatements
- clientMultiResults
- clientPSMultiResults
- clientPluginAuth
- clientConnectAttrs
- clientPluginAuthLenEncClientData
- clientCanHandleExpiredPasswords
- clientSessionTrack
- clientDeprecateEOF
-)
-
-const (
- comQuit byte = iota + 1
- comInitDB
- comQuery
- comFieldList
- comCreateDB
- comDropDB
- comRefresh
- comShutdown
- comStatistics
- comProcessInfo
- comConnect
- comProcessKill
- comDebug
- comPing
- comTime
- comDelayedInsert
- comChangeUser
- comBinlogDump
- comTableDump
- comConnectOut
- comRegisterSlave
- comStmtPrepare
- comStmtExecute
- comStmtSendLongData
- comStmtClose
- comStmtReset
- comSetOption
- comStmtFetch
-)
-
-// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType
-type fieldType byte
-
-const (
- fieldTypeDecimal fieldType = iota
- fieldTypeTiny
- fieldTypeShort
- fieldTypeLong
- fieldTypeFloat
- fieldTypeDouble
- fieldTypeNULL
- fieldTypeTimestamp
- fieldTypeLongLong
- fieldTypeInt24
- fieldTypeDate
- fieldTypeTime
- fieldTypeDateTime
- fieldTypeYear
- fieldTypeNewDate
- fieldTypeVarChar
- fieldTypeBit
-)
-const (
- fieldTypeJSON fieldType = iota + 0xf5
- fieldTypeNewDecimal
- fieldTypeEnum
- fieldTypeSet
- fieldTypeTinyBLOB
- fieldTypeMediumBLOB
- fieldTypeLongBLOB
- fieldTypeBLOB
- fieldTypeVarString
- fieldTypeString
- fieldTypeGeometry
-)
-
-type fieldFlag uint16
-
-const (
- flagNotNULL fieldFlag = 1 << iota
- flagPriKey
- flagUniqueKey
- flagMultipleKey
- flagBLOB
- flagUnsigned
- flagZeroFill
- flagBinary
- flagEnum
- flagAutoIncrement
- flagTimestamp
- flagSet
- flagUnknown1
- flagUnknown2
- flagUnknown3
- flagUnknown4
-)
-
-// http://dev.mysql.com/doc/internals/en/status-flags.html
-type statusFlag uint16
-
-const (
- statusInTrans statusFlag = 1 << iota
- statusInAutocommit
- statusReserved // Not in documentation
- statusMoreResultsExists
- statusNoGoodIndexUsed
- statusNoIndexUsed
- statusCursorExists
- statusLastRowSent
- statusDbDropped
- statusNoBackslashEscapes
- statusMetadataChanged
- statusQueryWasSlow
- statusPsOutParams
- statusInTransReadonly
- statusSessionStateChanged
-)
-
-const (
- cachingSha2PasswordRequestPublicKey = 2
- cachingSha2PasswordFastAuthSuccess = 3
- cachingSha2PasswordPerformFullAuthentication = 4
-)
diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go
deleted file mode 100644
index 105316b8..00000000
--- a/vendor/github.com/go-sql-driver/mysql/driver.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-// Package mysql provides a MySQL driver for Go's database/sql package.
-//
-// The driver should be used via the database/sql package:
-//
-// import "database/sql"
-// import _ "github.com/go-sql-driver/mysql"
-//
-// db, err := sql.Open("mysql", "user:password@/dbname")
-//
-// See https://github.com/go-sql-driver/mysql#usage for details
-package mysql
-
-import (
- "context"
- "database/sql"
- "database/sql/driver"
- "net"
- "sync"
-)
-
-// MySQLDriver is exported to make the driver directly accessible.
-// In general the driver is used via the database/sql package.
-type MySQLDriver struct{}
-
-// DialFunc is a function which can be used to establish the network connection.
-// Custom dial functions must be registered with RegisterDial
-//
-// Deprecated: users should register a DialContextFunc instead
-type DialFunc func(addr string) (net.Conn, error)
-
-// DialContextFunc is a function which can be used to establish the network connection.
-// Custom dial functions must be registered with RegisterDialContext
-type DialContextFunc func(ctx context.Context, addr string) (net.Conn, error)
-
-var (
- dialsLock sync.RWMutex
- dials map[string]DialContextFunc
-)
-
-// RegisterDialContext registers a custom dial function. It can then be used by the
-// network address mynet(addr), where mynet is the registered new network.
-// The current context for the connection and its address is passed to the dial function.
-func RegisterDialContext(net string, dial DialContextFunc) {
- dialsLock.Lock()
- defer dialsLock.Unlock()
- if dials == nil {
- dials = make(map[string]DialContextFunc)
- }
- dials[net] = dial
-}
-
-// DeregisterDialContext removes the custom dial function registered with the given net.
-func DeregisterDialContext(net string) {
- dialsLock.Lock()
- defer dialsLock.Unlock()
- if dials != nil {
- delete(dials, net)
- }
-}
-
-// RegisterDial registers a custom dial function. It can then be used by the
-// network address mynet(addr), where mynet is the registered new network.
-// addr is passed as a parameter to the dial function.
-//
-// Deprecated: users should call RegisterDialContext instead
-func RegisterDial(network string, dial DialFunc) {
- RegisterDialContext(network, func(_ context.Context, addr string) (net.Conn, error) {
- return dial(addr)
- })
-}
-
-// Open new Connection.
-// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how
-// the DSN string is formatted
-func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
- cfg, err := ParseDSN(dsn)
- if err != nil {
- return nil, err
- }
- c := newConnector(cfg)
- return c.Connect(context.Background())
-}
-
-// This variable can be replaced with -ldflags like below:
-// go build "-ldflags=-X github.com/go-sql-driver/mysql.driverName=custom"
-var driverName = "mysql"
-
-func init() {
- if driverName != "" {
- sql.Register(driverName, &MySQLDriver{})
- }
-}
-
-// NewConnector returns new driver.Connector.
-func NewConnector(cfg *Config) (driver.Connector, error) {
- cfg = cfg.Clone()
- // normalize the contents of cfg so calls to NewConnector have the same
- // behavior as MySQLDriver.OpenConnector
- if err := cfg.normalize(); err != nil {
- return nil, err
- }
- return newConnector(cfg), nil
-}
-
-// OpenConnector implements driver.DriverContext.
-func (d MySQLDriver) OpenConnector(dsn string) (driver.Connector, error) {
- cfg, err := ParseDSN(dsn)
- if err != nil {
- return nil, err
- }
- return newConnector(cfg), nil
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go
deleted file mode 100644
index 65f5a024..00000000
--- a/vendor/github.com/go-sql-driver/mysql/dsn.go
+++ /dev/null
@@ -1,653 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "bytes"
- "context"
- "crypto/rsa"
- "crypto/tls"
- "errors"
- "fmt"
- "math/big"
- "net"
- "net/url"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-var (
- errInvalidDSNUnescaped = errors.New("invalid DSN: did you forget to escape a param value?")
- errInvalidDSNAddr = errors.New("invalid DSN: network address not terminated (missing closing brace)")
- errInvalidDSNNoSlash = errors.New("invalid DSN: missing the slash separating the database name")
- errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations")
-)
-
-// Config is a configuration parsed from a DSN string.
-// If a new Config is created instead of being parsed from a DSN string,
-// the NewConfig function should be used, which sets default values.
-type Config struct {
- // non boolean fields
-
- User string // Username
- Passwd string // Password (requires User)
- Net string // Network (e.g. "tcp", "tcp6", "unix". default: "tcp")
- Addr string // Address (default: "127.0.0.1:3306" for "tcp" and "/tmp/mysql.sock" for "unix")
- DBName string // Database name
- Params map[string]string // Connection parameters
- ConnectionAttributes string // Connection Attributes, comma-delimited string of user-defined "key:value" pairs
- Collation string // Connection collation
- Loc *time.Location // Location for time.Time values
- MaxAllowedPacket int // Max packet size allowed
- ServerPubKey string // Server public key name
- TLSConfig string // TLS configuration name
- TLS *tls.Config // TLS configuration, its priority is higher than TLSConfig
- Timeout time.Duration // Dial timeout
- ReadTimeout time.Duration // I/O read timeout
- WriteTimeout time.Duration // I/O write timeout
- Logger Logger // Logger
-
- // boolean fields
-
- AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE
- AllowCleartextPasswords bool // Allows the cleartext client side plugin
- AllowFallbackToPlaintext bool // Allows fallback to unencrypted connection if server does not support TLS
- AllowNativePasswords bool // Allows the native password authentication method
- AllowOldPasswords bool // Allows the old insecure password method
- CheckConnLiveness bool // Check connections for liveness before using them
- ClientFoundRows bool // Return number of matching rows instead of rows changed
- ColumnsWithAlias bool // Prepend table alias to column names
- InterpolateParams bool // Interpolate placeholders into query string
- MultiStatements bool // Allow multiple statements in one query
- ParseTime bool // Parse time values to time.Time
- RejectReadOnly bool // Reject read-only connections
-
- // unexported fields. new options should be come here
-
- beforeConnect func(context.Context, *Config) error // Invoked before a connection is established
- pubKey *rsa.PublicKey // Server public key
- timeTruncate time.Duration // Truncate time.Time values to the specified duration
-}
-
-// Functional Options Pattern
-// https://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
-type Option func(*Config) error
-
-// NewConfig creates a new Config and sets default values.
-func NewConfig() *Config {
- cfg := &Config{
- Loc: time.UTC,
- MaxAllowedPacket: defaultMaxAllowedPacket,
- Logger: defaultLogger,
- AllowNativePasswords: true,
- CheckConnLiveness: true,
- }
-
- return cfg
-}
-
-// Apply applies the given options to the Config object.
-func (c *Config) Apply(opts ...Option) error {
- for _, opt := range opts {
- err := opt(c)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// TimeTruncate sets the time duration to truncate time.Time values in
-// query parameters.
-func TimeTruncate(d time.Duration) Option {
- return func(cfg *Config) error {
- cfg.timeTruncate = d
- return nil
- }
-}
-
-// BeforeConnect sets the function to be invoked before a connection is established.
-func BeforeConnect(fn func(context.Context, *Config) error) Option {
- return func(cfg *Config) error {
- cfg.beforeConnect = fn
- return nil
- }
-}
-
-func (cfg *Config) Clone() *Config {
- cp := *cfg
- if cp.TLS != nil {
- cp.TLS = cfg.TLS.Clone()
- }
- if len(cp.Params) > 0 {
- cp.Params = make(map[string]string, len(cfg.Params))
- for k, v := range cfg.Params {
- cp.Params[k] = v
- }
- }
- if cfg.pubKey != nil {
- cp.pubKey = &rsa.PublicKey{
- N: new(big.Int).Set(cfg.pubKey.N),
- E: cfg.pubKey.E,
- }
- }
- return &cp
-}
-
-func (cfg *Config) normalize() error {
- if cfg.InterpolateParams && cfg.Collation != "" && unsafeCollations[cfg.Collation] {
- return errInvalidDSNUnsafeCollation
- }
-
- // Set default network if empty
- if cfg.Net == "" {
- cfg.Net = "tcp"
- }
-
- // Set default address if empty
- if cfg.Addr == "" {
- switch cfg.Net {
- case "tcp":
- cfg.Addr = "127.0.0.1:3306"
- case "unix":
- cfg.Addr = "/tmp/mysql.sock"
- default:
- return errors.New("default addr for network '" + cfg.Net + "' unknown")
- }
- } else if cfg.Net == "tcp" {
- cfg.Addr = ensureHavePort(cfg.Addr)
- }
-
- if cfg.TLS == nil {
- switch cfg.TLSConfig {
- case "false", "":
- // don't set anything
- case "true":
- cfg.TLS = &tls.Config{}
- case "skip-verify":
- cfg.TLS = &tls.Config{InsecureSkipVerify: true}
- case "preferred":
- cfg.TLS = &tls.Config{InsecureSkipVerify: true}
- cfg.AllowFallbackToPlaintext = true
- default:
- cfg.TLS = getTLSConfigClone(cfg.TLSConfig)
- if cfg.TLS == nil {
- return errors.New("invalid value / unknown config name: " + cfg.TLSConfig)
- }
- }
- }
-
- if cfg.TLS != nil && cfg.TLS.ServerName == "" && !cfg.TLS.InsecureSkipVerify {
- host, _, err := net.SplitHostPort(cfg.Addr)
- if err == nil {
- cfg.TLS.ServerName = host
- }
- }
-
- if cfg.ServerPubKey != "" {
- cfg.pubKey = getServerPubKey(cfg.ServerPubKey)
- if cfg.pubKey == nil {
- return errors.New("invalid value / unknown server pub key name: " + cfg.ServerPubKey)
- }
- }
-
- if cfg.Logger == nil {
- cfg.Logger = defaultLogger
- }
-
- return nil
-}
-
-func writeDSNParam(buf *bytes.Buffer, hasParam *bool, name, value string) {
- buf.Grow(1 + len(name) + 1 + len(value))
- if !*hasParam {
- *hasParam = true
- buf.WriteByte('?')
- } else {
- buf.WriteByte('&')
- }
- buf.WriteString(name)
- buf.WriteByte('=')
- buf.WriteString(value)
-}
-
-// FormatDSN formats the given Config into a DSN string which can be passed to
-// the driver.
-//
-// Note: use [NewConnector] and [database/sql.OpenDB] to open a connection from a [*Config].
-func (cfg *Config) FormatDSN() string {
- var buf bytes.Buffer
-
- // [username[:password]@]
- if len(cfg.User) > 0 {
- buf.WriteString(cfg.User)
- if len(cfg.Passwd) > 0 {
- buf.WriteByte(':')
- buf.WriteString(cfg.Passwd)
- }
- buf.WriteByte('@')
- }
-
- // [protocol[(address)]]
- if len(cfg.Net) > 0 {
- buf.WriteString(cfg.Net)
- if len(cfg.Addr) > 0 {
- buf.WriteByte('(')
- buf.WriteString(cfg.Addr)
- buf.WriteByte(')')
- }
- }
-
- // /dbname
- buf.WriteByte('/')
- buf.WriteString(url.PathEscape(cfg.DBName))
-
- // [?param1=value1&...¶mN=valueN]
- hasParam := false
-
- if cfg.AllowAllFiles {
- hasParam = true
- buf.WriteString("?allowAllFiles=true")
- }
-
- if cfg.AllowCleartextPasswords {
- writeDSNParam(&buf, &hasParam, "allowCleartextPasswords", "true")
- }
-
- if cfg.AllowFallbackToPlaintext {
- writeDSNParam(&buf, &hasParam, "allowFallbackToPlaintext", "true")
- }
-
- if !cfg.AllowNativePasswords {
- writeDSNParam(&buf, &hasParam, "allowNativePasswords", "false")
- }
-
- if cfg.AllowOldPasswords {
- writeDSNParam(&buf, &hasParam, "allowOldPasswords", "true")
- }
-
- if !cfg.CheckConnLiveness {
- writeDSNParam(&buf, &hasParam, "checkConnLiveness", "false")
- }
-
- if cfg.ClientFoundRows {
- writeDSNParam(&buf, &hasParam, "clientFoundRows", "true")
- }
-
- if col := cfg.Collation; col != "" {
- writeDSNParam(&buf, &hasParam, "collation", col)
- }
-
- if cfg.ColumnsWithAlias {
- writeDSNParam(&buf, &hasParam, "columnsWithAlias", "true")
- }
-
- if cfg.InterpolateParams {
- writeDSNParam(&buf, &hasParam, "interpolateParams", "true")
- }
-
- if cfg.Loc != time.UTC && cfg.Loc != nil {
- writeDSNParam(&buf, &hasParam, "loc", url.QueryEscape(cfg.Loc.String()))
- }
-
- if cfg.MultiStatements {
- writeDSNParam(&buf, &hasParam, "multiStatements", "true")
- }
-
- if cfg.ParseTime {
- writeDSNParam(&buf, &hasParam, "parseTime", "true")
- }
-
- if cfg.timeTruncate > 0 {
- writeDSNParam(&buf, &hasParam, "timeTruncate", cfg.timeTruncate.String())
- }
-
- if cfg.ReadTimeout > 0 {
- writeDSNParam(&buf, &hasParam, "readTimeout", cfg.ReadTimeout.String())
- }
-
- if cfg.RejectReadOnly {
- writeDSNParam(&buf, &hasParam, "rejectReadOnly", "true")
- }
-
- if len(cfg.ServerPubKey) > 0 {
- writeDSNParam(&buf, &hasParam, "serverPubKey", url.QueryEscape(cfg.ServerPubKey))
- }
-
- if cfg.Timeout > 0 {
- writeDSNParam(&buf, &hasParam, "timeout", cfg.Timeout.String())
- }
-
- if len(cfg.TLSConfig) > 0 {
- writeDSNParam(&buf, &hasParam, "tls", url.QueryEscape(cfg.TLSConfig))
- }
-
- if cfg.WriteTimeout > 0 {
- writeDSNParam(&buf, &hasParam, "writeTimeout", cfg.WriteTimeout.String())
- }
-
- if cfg.MaxAllowedPacket != defaultMaxAllowedPacket {
- writeDSNParam(&buf, &hasParam, "maxAllowedPacket", strconv.Itoa(cfg.MaxAllowedPacket))
- }
-
- // other params
- if cfg.Params != nil {
- var params []string
- for param := range cfg.Params {
- params = append(params, param)
- }
- sort.Strings(params)
- for _, param := range params {
- writeDSNParam(&buf, &hasParam, param, url.QueryEscape(cfg.Params[param]))
- }
- }
-
- return buf.String()
-}
-
-// ParseDSN parses the DSN string to a Config
-func ParseDSN(dsn string) (cfg *Config, err error) {
- // New config with some default values
- cfg = NewConfig()
-
- // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN]
- // Find the last '/' (since the password or the net addr might contain a '/')
- foundSlash := false
- for i := len(dsn) - 1; i >= 0; i-- {
- if dsn[i] == '/' {
- foundSlash = true
- var j, k int
-
- // left part is empty if i <= 0
- if i > 0 {
- // [username[:password]@][protocol[(address)]]
- // Find the last '@' in dsn[:i]
- for j = i; j >= 0; j-- {
- if dsn[j] == '@' {
- // username[:password]
- // Find the first ':' in dsn[:j]
- for k = 0; k < j; k++ {
- if dsn[k] == ':' {
- cfg.Passwd = dsn[k+1 : j]
- break
- }
- }
- cfg.User = dsn[:k]
-
- break
- }
- }
-
- // [protocol[(address)]]
- // Find the first '(' in dsn[j+1:i]
- for k = j + 1; k < i; k++ {
- if dsn[k] == '(' {
- // dsn[i-1] must be == ')' if an address is specified
- if dsn[i-1] != ')' {
- if strings.ContainsRune(dsn[k+1:i], ')') {
- return nil, errInvalidDSNUnescaped
- }
- return nil, errInvalidDSNAddr
- }
- cfg.Addr = dsn[k+1 : i-1]
- break
- }
- }
- cfg.Net = dsn[j+1 : k]
- }
-
- // dbname[?param1=value1&...¶mN=valueN]
- // Find the first '?' in dsn[i+1:]
- for j = i + 1; j < len(dsn); j++ {
- if dsn[j] == '?' {
- if err = parseDSNParams(cfg, dsn[j+1:]); err != nil {
- return
- }
- break
- }
- }
-
- dbname := dsn[i+1 : j]
- if cfg.DBName, err = url.PathUnescape(dbname); err != nil {
- return nil, fmt.Errorf("invalid dbname %q: %w", dbname, err)
- }
-
- break
- }
- }
-
- if !foundSlash && len(dsn) > 0 {
- return nil, errInvalidDSNNoSlash
- }
-
- if err = cfg.normalize(); err != nil {
- return nil, err
- }
- return
-}
-
-// parseDSNParams parses the DSN "query string"
-// Values must be url.QueryEscape'ed
-func parseDSNParams(cfg *Config, params string) (err error) {
- for _, v := range strings.Split(params, "&") {
- key, value, found := strings.Cut(v, "=")
- if !found {
- continue
- }
-
- // cfg params
- switch key {
- // Disable INFILE allowlist / enable all files
- case "allowAllFiles":
- var isBool bool
- cfg.AllowAllFiles, isBool = readBool(value)
- if !isBool {
- return errors.New("invalid bool value: " + value)
- }
-
- // Use cleartext authentication mode (MySQL 5.5.10+)
- case "allowCleartextPasswords":
- var isBool bool
- cfg.AllowCleartextPasswords, isBool = readBool(value)
- if !isBool {
- return errors.New("invalid bool value: " + value)
- }
-
- // Allow fallback to unencrypted connection if server does not support TLS
- case "allowFallbackToPlaintext":
- var isBool bool
- cfg.AllowFallbackToPlaintext, isBool = readBool(value)
- if !isBool {
- return errors.New("invalid bool value: " + value)
- }
-
- // Use native password authentication
- case "allowNativePasswords":
- var isBool bool
- cfg.AllowNativePasswords, isBool = readBool(value)
- if !isBool {
- return errors.New("invalid bool value: " + value)
- }
-
- // Use old authentication mode (pre MySQL 4.1)
- case "allowOldPasswords":
- var isBool bool
- cfg.AllowOldPasswords, isBool = readBool(value)
- if !isBool {
- return errors.New("invalid bool value: " + value)
- }
-
- // Check connections for Liveness before using them
- case "checkConnLiveness":
- var isBool bool
- cfg.CheckConnLiveness, isBool = readBool(value)
- if !isBool {
- return errors.New("invalid bool value: " + value)
- }
-
- // Switch "rowsAffected" mode
- case "clientFoundRows":
- var isBool bool
- cfg.ClientFoundRows, isBool = readBool(value)
- if !isBool {
- return errors.New("invalid bool value: " + value)
- }
-
- // Collation
- case "collation":
- cfg.Collation = value
-
- case "columnsWithAlias":
- var isBool bool
- cfg.ColumnsWithAlias, isBool = readBool(value)
- if !isBool {
- return errors.New("invalid bool value: " + value)
- }
-
- // Compression
- case "compress":
- return errors.New("compression not implemented yet")
-
- // Enable client side placeholder substitution
- case "interpolateParams":
- var isBool bool
- cfg.InterpolateParams, isBool = readBool(value)
- if !isBool {
- return errors.New("invalid bool value: " + value)
- }
-
- // Time Location
- case "loc":
- if value, err = url.QueryUnescape(value); err != nil {
- return
- }
- cfg.Loc, err = time.LoadLocation(value)
- if err != nil {
- return
- }
-
- // multiple statements in one query
- case "multiStatements":
- var isBool bool
- cfg.MultiStatements, isBool = readBool(value)
- if !isBool {
- return errors.New("invalid bool value: " + value)
- }
-
- // time.Time parsing
- case "parseTime":
- var isBool bool
- cfg.ParseTime, isBool = readBool(value)
- if !isBool {
- return errors.New("invalid bool value: " + value)
- }
-
- // time.Time truncation
- case "timeTruncate":
- cfg.timeTruncate, err = time.ParseDuration(value)
- if err != nil {
- return fmt.Errorf("invalid timeTruncate value: %v, error: %w", value, err)
- }
-
- // I/O read Timeout
- case "readTimeout":
- cfg.ReadTimeout, err = time.ParseDuration(value)
- if err != nil {
- return
- }
-
- // Reject read-only connections
- case "rejectReadOnly":
- var isBool bool
- cfg.RejectReadOnly, isBool = readBool(value)
- if !isBool {
- return errors.New("invalid bool value: " + value)
- }
-
- // Server public key
- case "serverPubKey":
- name, err := url.QueryUnescape(value)
- if err != nil {
- return fmt.Errorf("invalid value for server pub key name: %v", err)
- }
- cfg.ServerPubKey = name
-
- // Strict mode
- case "strict":
- panic("strict mode has been removed. See https://github.com/go-sql-driver/mysql/wiki/strict-mode")
-
- // Dial Timeout
- case "timeout":
- cfg.Timeout, err = time.ParseDuration(value)
- if err != nil {
- return
- }
-
- // TLS-Encryption
- case "tls":
- boolValue, isBool := readBool(value)
- if isBool {
- if boolValue {
- cfg.TLSConfig = "true"
- } else {
- cfg.TLSConfig = "false"
- }
- } else if vl := strings.ToLower(value); vl == "skip-verify" || vl == "preferred" {
- cfg.TLSConfig = vl
- } else {
- name, err := url.QueryUnescape(value)
- if err != nil {
- return fmt.Errorf("invalid value for TLS config name: %v", err)
- }
- cfg.TLSConfig = name
- }
-
- // I/O write Timeout
- case "writeTimeout":
- cfg.WriteTimeout, err = time.ParseDuration(value)
- if err != nil {
- return
- }
- case "maxAllowedPacket":
- cfg.MaxAllowedPacket, err = strconv.Atoi(value)
- if err != nil {
- return
- }
-
- // Connection attributes
- case "connectionAttributes":
- connectionAttributes, err := url.QueryUnescape(value)
- if err != nil {
- return fmt.Errorf("invalid connectionAttributes value: %v", err)
- }
- cfg.ConnectionAttributes = connectionAttributes
-
- default:
- // lazy init
- if cfg.Params == nil {
- cfg.Params = make(map[string]string)
- }
-
- if cfg.Params[key], err = url.QueryUnescape(value); err != nil {
- return
- }
- }
- }
-
- return
-}
-
-func ensureHavePort(addr string) string {
- if _, _, err := net.SplitHostPort(addr); err != nil {
- return net.JoinHostPort(addr, "3306")
- }
- return addr
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go
deleted file mode 100644
index a7ef8890..00000000
--- a/vendor/github.com/go-sql-driver/mysql/errors.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "errors"
- "fmt"
- "log"
- "os"
-)
-
-// Various errors the driver might return. Can change between driver versions.
-var (
- ErrInvalidConn = errors.New("invalid connection")
- ErrMalformPkt = errors.New("malformed packet")
- ErrNoTLS = errors.New("TLS requested but server does not support TLS")
- ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN")
- ErrNativePassword = errors.New("this user requires mysql native password authentication")
- ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
- ErrUnknownPlugin = errors.New("this authentication plugin is not supported")
- ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+")
- ErrPktSync = errors.New("commands out of sync. You can't run this command now")
- ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?")
- ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the `Config.MaxAllowedPacket`")
- ErrBusyBuffer = errors.New("busy buffer")
-
- // errBadConnNoWrite is used for connection errors where nothing was sent to the database yet.
- // If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn
- // to trigger a resend.
- // See https://github.com/go-sql-driver/mysql/pull/302
- errBadConnNoWrite = errors.New("bad connection")
-)
-
-var defaultLogger = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
-
-// Logger is used to log critical error messages.
-type Logger interface {
- Print(v ...any)
-}
-
-// NopLogger is a nop implementation of the Logger interface.
-type NopLogger struct{}
-
-// Print implements Logger interface.
-func (nl *NopLogger) Print(_ ...any) {}
-
-// SetLogger is used to set the default logger for critical errors.
-// The initial logger is os.Stderr.
-func SetLogger(logger Logger) error {
- if logger == nil {
- return errors.New("logger is nil")
- }
- defaultLogger = logger
- return nil
-}
-
-// MySQLError is an error type which represents a single MySQL error
-type MySQLError struct {
- Number uint16
- SQLState [5]byte
- Message string
-}
-
-func (me *MySQLError) Error() string {
- if me.SQLState != [5]byte{} {
- return fmt.Sprintf("Error %d (%s): %s", me.Number, me.SQLState, me.Message)
- }
-
- return fmt.Sprintf("Error %d: %s", me.Number, me.Message)
-}
-
-func (me *MySQLError) Is(err error) bool {
- if merr, ok := err.(*MySQLError); ok {
- return merr.Number == me.Number
- }
- return false
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go
deleted file mode 100644
index 28608424..00000000
--- a/vendor/github.com/go-sql-driver/mysql/fields.go
+++ /dev/null
@@ -1,222 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "database/sql"
- "reflect"
-)
-
-func (mf *mysqlField) typeDatabaseName() string {
- switch mf.fieldType {
- case fieldTypeBit:
- return "BIT"
- case fieldTypeBLOB:
- if mf.charSet != binaryCollationID {
- return "TEXT"
- }
- return "BLOB"
- case fieldTypeDate:
- return "DATE"
- case fieldTypeDateTime:
- return "DATETIME"
- case fieldTypeDecimal:
- return "DECIMAL"
- case fieldTypeDouble:
- return "DOUBLE"
- case fieldTypeEnum:
- return "ENUM"
- case fieldTypeFloat:
- return "FLOAT"
- case fieldTypeGeometry:
- return "GEOMETRY"
- case fieldTypeInt24:
- if mf.flags&flagUnsigned != 0 {
- return "UNSIGNED MEDIUMINT"
- }
- return "MEDIUMINT"
- case fieldTypeJSON:
- return "JSON"
- case fieldTypeLong:
- if mf.flags&flagUnsigned != 0 {
- return "UNSIGNED INT"
- }
- return "INT"
- case fieldTypeLongBLOB:
- if mf.charSet != binaryCollationID {
- return "LONGTEXT"
- }
- return "LONGBLOB"
- case fieldTypeLongLong:
- if mf.flags&flagUnsigned != 0 {
- return "UNSIGNED BIGINT"
- }
- return "BIGINT"
- case fieldTypeMediumBLOB:
- if mf.charSet != binaryCollationID {
- return "MEDIUMTEXT"
- }
- return "MEDIUMBLOB"
- case fieldTypeNewDate:
- return "DATE"
- case fieldTypeNewDecimal:
- return "DECIMAL"
- case fieldTypeNULL:
- return "NULL"
- case fieldTypeSet:
- return "SET"
- case fieldTypeShort:
- if mf.flags&flagUnsigned != 0 {
- return "UNSIGNED SMALLINT"
- }
- return "SMALLINT"
- case fieldTypeString:
- if mf.flags&flagEnum != 0 {
- return "ENUM"
- } else if mf.flags&flagSet != 0 {
- return "SET"
- }
- if mf.charSet == binaryCollationID {
- return "BINARY"
- }
- return "CHAR"
- case fieldTypeTime:
- return "TIME"
- case fieldTypeTimestamp:
- return "TIMESTAMP"
- case fieldTypeTiny:
- if mf.flags&flagUnsigned != 0 {
- return "UNSIGNED TINYINT"
- }
- return "TINYINT"
- case fieldTypeTinyBLOB:
- if mf.charSet != binaryCollationID {
- return "TINYTEXT"
- }
- return "TINYBLOB"
- case fieldTypeVarChar:
- if mf.charSet == binaryCollationID {
- return "VARBINARY"
- }
- return "VARCHAR"
- case fieldTypeVarString:
- if mf.charSet == binaryCollationID {
- return "VARBINARY"
- }
- return "VARCHAR"
- case fieldTypeYear:
- return "YEAR"
- default:
- return ""
- }
-}
-
-var (
- scanTypeFloat32 = reflect.TypeOf(float32(0))
- scanTypeFloat64 = reflect.TypeOf(float64(0))
- scanTypeInt8 = reflect.TypeOf(int8(0))
- scanTypeInt16 = reflect.TypeOf(int16(0))
- scanTypeInt32 = reflect.TypeOf(int32(0))
- scanTypeInt64 = reflect.TypeOf(int64(0))
- scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
- scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
- scanTypeNullTime = reflect.TypeOf(sql.NullTime{})
- scanTypeUint8 = reflect.TypeOf(uint8(0))
- scanTypeUint16 = reflect.TypeOf(uint16(0))
- scanTypeUint32 = reflect.TypeOf(uint32(0))
- scanTypeUint64 = reflect.TypeOf(uint64(0))
- scanTypeString = reflect.TypeOf("")
- scanTypeNullString = reflect.TypeOf(sql.NullString{})
- scanTypeBytes = reflect.TypeOf([]byte{})
- scanTypeUnknown = reflect.TypeOf(new(any))
-)
-
-type mysqlField struct {
- tableName string
- name string
- length uint32
- flags fieldFlag
- fieldType fieldType
- decimals byte
- charSet uint8
-}
-
-func (mf *mysqlField) scanType() reflect.Type {
- switch mf.fieldType {
- case fieldTypeTiny:
- if mf.flags&flagNotNULL != 0 {
- if mf.flags&flagUnsigned != 0 {
- return scanTypeUint8
- }
- return scanTypeInt8
- }
- return scanTypeNullInt
-
- case fieldTypeShort, fieldTypeYear:
- if mf.flags&flagNotNULL != 0 {
- if mf.flags&flagUnsigned != 0 {
- return scanTypeUint16
- }
- return scanTypeInt16
- }
- return scanTypeNullInt
-
- case fieldTypeInt24, fieldTypeLong:
- if mf.flags&flagNotNULL != 0 {
- if mf.flags&flagUnsigned != 0 {
- return scanTypeUint32
- }
- return scanTypeInt32
- }
- return scanTypeNullInt
-
- case fieldTypeLongLong:
- if mf.flags&flagNotNULL != 0 {
- if mf.flags&flagUnsigned != 0 {
- return scanTypeUint64
- }
- return scanTypeInt64
- }
- return scanTypeNullInt
-
- case fieldTypeFloat:
- if mf.flags&flagNotNULL != 0 {
- return scanTypeFloat32
- }
- return scanTypeNullFloat
-
- case fieldTypeDouble:
- if mf.flags&flagNotNULL != 0 {
- return scanTypeFloat64
- }
- return scanTypeNullFloat
-
- case fieldTypeBit, fieldTypeTinyBLOB, fieldTypeMediumBLOB, fieldTypeLongBLOB,
- fieldTypeBLOB, fieldTypeVarString, fieldTypeString, fieldTypeGeometry:
- if mf.charSet == binaryCollationID {
- return scanTypeBytes
- }
- fallthrough
- case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
- fieldTypeEnum, fieldTypeSet, fieldTypeJSON, fieldTypeTime:
- if mf.flags&flagNotNULL != 0 {
- return scanTypeString
- }
- return scanTypeNullString
-
- case fieldTypeDate, fieldTypeNewDate,
- fieldTypeTimestamp, fieldTypeDateTime:
- // NullTime is always returned for more consistent behavior as it can
- // handle both cases of parseTime regardless if the field is nullable.
- return scanTypeNullTime
-
- default:
- return scanTypeUnknown
- }
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go
deleted file mode 100644
index 0c8af9f1..00000000
--- a/vendor/github.com/go-sql-driver/mysql/infile.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "fmt"
- "io"
- "os"
- "strings"
- "sync"
-)
-
-var (
- fileRegister map[string]bool
- fileRegisterLock sync.RWMutex
- readerRegister map[string]func() io.Reader
- readerRegisterLock sync.RWMutex
-)
-
-// RegisterLocalFile adds the given file to the file allowlist,
-// so that it can be used by "LOAD DATA LOCAL INFILE ".
-// Alternatively you can allow the use of all local files with
-// the DSN parameter 'allowAllFiles=true'
-//
-// filePath := "/home/gopher/data.csv"
-// mysql.RegisterLocalFile(filePath)
-// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo")
-// if err != nil {
-// ...
-func RegisterLocalFile(filePath string) {
- fileRegisterLock.Lock()
- // lazy map init
- if fileRegister == nil {
- fileRegister = make(map[string]bool)
- }
-
- fileRegister[strings.Trim(filePath, `"`)] = true
- fileRegisterLock.Unlock()
-}
-
-// DeregisterLocalFile removes the given filepath from the allowlist.
-func DeregisterLocalFile(filePath string) {
- fileRegisterLock.Lock()
- delete(fileRegister, strings.Trim(filePath, `"`))
- fileRegisterLock.Unlock()
-}
-
-// RegisterReaderHandler registers a handler function which is used
-// to receive a io.Reader.
-// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::".
-// If the handler returns a io.ReadCloser Close() is called when the
-// request is finished.
-//
-// mysql.RegisterReaderHandler("data", func() io.Reader {
-// var csvReader io.Reader // Some Reader that returns CSV data
-// ... // Open Reader here
-// return csvReader
-// })
-// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo")
-// if err != nil {
-// ...
-func RegisterReaderHandler(name string, handler func() io.Reader) {
- readerRegisterLock.Lock()
- // lazy map init
- if readerRegister == nil {
- readerRegister = make(map[string]func() io.Reader)
- }
-
- readerRegister[name] = handler
- readerRegisterLock.Unlock()
-}
-
-// DeregisterReaderHandler removes the ReaderHandler function with
-// the given name from the registry.
-func DeregisterReaderHandler(name string) {
- readerRegisterLock.Lock()
- delete(readerRegister, name)
- readerRegisterLock.Unlock()
-}
-
-func deferredClose(err *error, closer io.Closer) {
- closeErr := closer.Close()
- if *err == nil {
- *err = closeErr
- }
-}
-
-const defaultPacketSize = 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP
-
-func (mc *okHandler) handleInFileRequest(name string) (err error) {
- var rdr io.Reader
- var data []byte
- packetSize := defaultPacketSize
- if mc.maxWriteSize < packetSize {
- packetSize = mc.maxWriteSize
- }
-
- if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader
- // The server might return an an absolute path. See issue #355.
- name = name[idx+8:]
-
- readerRegisterLock.RLock()
- handler, inMap := readerRegister[name]
- readerRegisterLock.RUnlock()
-
- if inMap {
- rdr = handler()
- if rdr != nil {
- if cl, ok := rdr.(io.Closer); ok {
- defer deferredClose(&err, cl)
- }
- } else {
- err = fmt.Errorf("reader '%s' is ", name)
- }
- } else {
- err = fmt.Errorf("reader '%s' is not registered", name)
- }
- } else { // File
- name = strings.Trim(name, `"`)
- fileRegisterLock.RLock()
- fr := fileRegister[name]
- fileRegisterLock.RUnlock()
- if mc.cfg.AllowAllFiles || fr {
- var file *os.File
- var fi os.FileInfo
-
- if file, err = os.Open(name); err == nil {
- defer deferredClose(&err, file)
-
- // get file size
- if fi, err = file.Stat(); err == nil {
- rdr = file
- if fileSize := int(fi.Size()); fileSize < packetSize {
- packetSize = fileSize
- }
- }
- }
- } else {
- err = fmt.Errorf("local file '%s' is not registered", name)
- }
- }
-
- // send content packets
- // if packetSize == 0, the Reader contains no data
- if err == nil && packetSize > 0 {
- data := make([]byte, 4+packetSize)
- var n int
- for err == nil {
- n, err = rdr.Read(data[4:])
- if n > 0 {
- if ioErr := mc.conn().writePacket(data[:4+n]); ioErr != nil {
- return ioErr
- }
- }
- }
- if err == io.EOF {
- err = nil
- }
- }
-
- // send empty packet (termination)
- if data == nil {
- data = make([]byte, 4)
- }
- if ioErr := mc.conn().writePacket(data[:4]); ioErr != nil {
- return ioErr
- }
-
- // read OK packet
- if err == nil {
- return mc.readResultOK()
- }
-
- mc.conn().readPacket()
- return err
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/nulltime.go b/vendor/github.com/go-sql-driver/mysql/nulltime.go
deleted file mode 100644
index 316a48aa..00000000
--- a/vendor/github.com/go-sql-driver/mysql/nulltime.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "database/sql"
- "database/sql/driver"
- "fmt"
- "time"
-)
-
-// NullTime represents a time.Time that may be NULL.
-// NullTime implements the Scanner interface so
-// it can be used as a scan destination:
-//
-// var nt NullTime
-// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt)
-// ...
-// if nt.Valid {
-// // use nt.Time
-// } else {
-// // NULL value
-// }
-//
-// # This NullTime implementation is not driver-specific
-//
-// Deprecated: NullTime doesn't honor the loc DSN parameter.
-// NullTime.Scan interprets a time as UTC, not the loc DSN parameter.
-// Use sql.NullTime instead.
-type NullTime sql.NullTime
-
-// Scan implements the Scanner interface.
-// The value type must be time.Time or string / []byte (formatted time-string),
-// otherwise Scan fails.
-func (nt *NullTime) Scan(value any) (err error) {
- if value == nil {
- nt.Time, nt.Valid = time.Time{}, false
- return
- }
-
- switch v := value.(type) {
- case time.Time:
- nt.Time, nt.Valid = v, true
- return
- case []byte:
- nt.Time, err = parseDateTime(v, time.UTC)
- nt.Valid = (err == nil)
- return
- case string:
- nt.Time, err = parseDateTime([]byte(v), time.UTC)
- nt.Valid = (err == nil)
- return
- }
-
- nt.Valid = false
- return fmt.Errorf("can't convert %T to time.Time", value)
-}
-
-// Value implements the driver Valuer interface.
-func (nt NullTime) Value() (driver.Value, error) {
- if !nt.Valid {
- return nil, nil
- }
- return nt.Time, nil
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go
deleted file mode 100644
index 90a34728..00000000
--- a/vendor/github.com/go-sql-driver/mysql/packets.go
+++ /dev/null
@@ -1,1406 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "bytes"
- "crypto/tls"
- "database/sql/driver"
- "encoding/binary"
- "encoding/json"
- "fmt"
- "io"
- "math"
- "strconv"
- "time"
-)
-
-// Packets documentation:
-// http://dev.mysql.com/doc/internals/en/client-server-protocol.html
-
-// Read packet to buffer 'data'
-func (mc *mysqlConn) readPacket() ([]byte, error) {
- var prevData []byte
- for {
- // read packet header
- data, err := mc.buf.readNext(4)
- if err != nil {
- if cerr := mc.canceled.Value(); cerr != nil {
- return nil, cerr
- }
- mc.log(err)
- mc.Close()
- return nil, ErrInvalidConn
- }
-
- // packet length [24 bit]
- pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16)
-
- // check packet sync [8 bit]
- if data[3] != mc.sequence {
- mc.Close()
- if data[3] > mc.sequence {
- return nil, ErrPktSyncMul
- }
- return nil, ErrPktSync
- }
- mc.sequence++
-
- // packets with length 0 terminate a previous packet which is a
- // multiple of (2^24)-1 bytes long
- if pktLen == 0 {
- // there was no previous packet
- if prevData == nil {
- mc.log(ErrMalformPkt)
- mc.Close()
- return nil, ErrInvalidConn
- }
-
- return prevData, nil
- }
-
- // read packet body [pktLen bytes]
- data, err = mc.buf.readNext(pktLen)
- if err != nil {
- if cerr := mc.canceled.Value(); cerr != nil {
- return nil, cerr
- }
- mc.log(err)
- mc.Close()
- return nil, ErrInvalidConn
- }
-
- // return data if this was the last packet
- if pktLen < maxPacketSize {
- // zero allocations for non-split packets
- if prevData == nil {
- return data, nil
- }
-
- return append(prevData, data...), nil
- }
-
- prevData = append(prevData, data...)
- }
-}
-
-// Write packet buffer 'data'
-func (mc *mysqlConn) writePacket(data []byte) error {
- pktLen := len(data) - 4
-
- if pktLen > mc.maxAllowedPacket {
- return ErrPktTooLarge
- }
-
- for {
- var size int
- if pktLen >= maxPacketSize {
- data[0] = 0xff
- data[1] = 0xff
- data[2] = 0xff
- size = maxPacketSize
- } else {
- data[0] = byte(pktLen)
- data[1] = byte(pktLen >> 8)
- data[2] = byte(pktLen >> 16)
- size = pktLen
- }
- data[3] = mc.sequence
-
- // Write packet
- if mc.writeTimeout > 0 {
- if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil {
- return err
- }
- }
-
- n, err := mc.netConn.Write(data[:4+size])
- if err == nil && n == 4+size {
- mc.sequence++
- if size != maxPacketSize {
- return nil
- }
- pktLen -= size
- data = data[size:]
- continue
- }
-
- // Handle error
- if err == nil { // n != len(data)
- mc.cleanup()
- mc.log(ErrMalformPkt)
- } else {
- if cerr := mc.canceled.Value(); cerr != nil {
- return cerr
- }
- if n == 0 && pktLen == len(data)-4 {
- // only for the first loop iteration when nothing was written yet
- return errBadConnNoWrite
- }
- mc.cleanup()
- mc.log(err)
- }
- return ErrInvalidConn
- }
-}
-
-/******************************************************************************
-* Initialization Process *
-******************************************************************************/
-
-// Handshake Initialization Packet
-// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake
-func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err error) {
- data, err = mc.readPacket()
- if err != nil {
- // for init we can rewrite this to ErrBadConn for sql.Driver to retry, since
- // in connection initialization we don't risk retrying non-idempotent actions.
- if err == ErrInvalidConn {
- return nil, "", driver.ErrBadConn
- }
- return
- }
-
- if data[0] == iERR {
- return nil, "", mc.handleErrorPacket(data)
- }
-
- // protocol version [1 byte]
- if data[0] < minProtocolVersion {
- return nil, "", fmt.Errorf(
- "unsupported protocol version %d. Version %d or higher is required",
- data[0],
- minProtocolVersion,
- )
- }
-
- // server version [null terminated string]
- // connection id [4 bytes]
- pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4
-
- // first part of the password cipher [8 bytes]
- authData := data[pos : pos+8]
-
- // (filler) always 0x00 [1 byte]
- pos += 8 + 1
-
- // capability flags (lower 2 bytes) [2 bytes]
- mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
- if mc.flags&clientProtocol41 == 0 {
- return nil, "", ErrOldProtocol
- }
- if mc.flags&clientSSL == 0 && mc.cfg.TLS != nil {
- if mc.cfg.AllowFallbackToPlaintext {
- mc.cfg.TLS = nil
- } else {
- return nil, "", ErrNoTLS
- }
- }
- pos += 2
-
- if len(data) > pos {
- // character set [1 byte]
- // status flags [2 bytes]
- // capability flags (upper 2 bytes) [2 bytes]
- // length of auth-plugin-data [1 byte]
- // reserved (all [00]) [10 bytes]
- pos += 1 + 2 + 2 + 1 + 10
-
- // second part of the password cipher [minimum 13 bytes],
- // where len=MAX(13, length of auth-plugin-data - 8)
- //
- // The web documentation is ambiguous about the length. However,
- // according to mysql-5.7/sql/auth/sql_authentication.cc line 538,
- // the 13th byte is "\0 byte, terminating the second part of
- // a scramble". So the second part of the password cipher is
- // a NULL terminated string that's at least 13 bytes with the
- // last byte being NULL.
- //
- // The official Python library uses the fixed length 12
- // which seems to work but technically could have a hidden bug.
- authData = append(authData, data[pos:pos+12]...)
- pos += 13
-
- // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2)
- // \NUL otherwise
- if end := bytes.IndexByte(data[pos:], 0x00); end != -1 {
- plugin = string(data[pos : pos+end])
- } else {
- plugin = string(data[pos:])
- }
-
- // make a memory safe copy of the cipher slice
- var b [20]byte
- copy(b[:], authData)
- return b[:], plugin, nil
- }
-
- // make a memory safe copy of the cipher slice
- var b [8]byte
- copy(b[:], authData)
- return b[:], plugin, nil
-}
-
-// Client Authentication Packet
-// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse
-func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string) error {
- // Adjust client flags based on server support
- clientFlags := clientProtocol41 |
- clientSecureConn |
- clientLongPassword |
- clientTransactions |
- clientLocalFiles |
- clientPluginAuth |
- clientMultiResults |
- clientConnectAttrs |
- mc.flags&clientLongFlag
-
- if mc.cfg.ClientFoundRows {
- clientFlags |= clientFoundRows
- }
-
- // To enable TLS / SSL
- if mc.cfg.TLS != nil {
- clientFlags |= clientSSL
- }
-
- if mc.cfg.MultiStatements {
- clientFlags |= clientMultiStatements
- }
-
- // encode length of the auth plugin data
- var authRespLEIBuf [9]byte
- authRespLen := len(authResp)
- authRespLEI := appendLengthEncodedInteger(authRespLEIBuf[:0], uint64(authRespLen))
- if len(authRespLEI) > 1 {
- // if the length can not be written in 1 byte, it must be written as a
- // length encoded integer
- clientFlags |= clientPluginAuthLenEncClientData
- }
-
- pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + len(authRespLEI) + len(authResp) + 21 + 1
-
- // To specify a db name
- if n := len(mc.cfg.DBName); n > 0 {
- clientFlags |= clientConnectWithDB
- pktLen += n + 1
- }
-
- // encode length of the connection attributes
- var connAttrsLEIBuf [9]byte
- connAttrsLen := len(mc.connector.encodedAttributes)
- connAttrsLEI := appendLengthEncodedInteger(connAttrsLEIBuf[:0], uint64(connAttrsLen))
- pktLen += len(connAttrsLEI) + len(mc.connector.encodedAttributes)
-
- // Calculate packet length and get buffer with that size
- data, err := mc.buf.takeBuffer(pktLen + 4)
- if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- mc.log(err)
- return errBadConnNoWrite
- }
-
- // ClientFlags [32 bit]
- data[4] = byte(clientFlags)
- data[5] = byte(clientFlags >> 8)
- data[6] = byte(clientFlags >> 16)
- data[7] = byte(clientFlags >> 24)
-
- // MaxPacketSize [32 bit] (none)
- data[8] = 0x00
- data[9] = 0x00
- data[10] = 0x00
- data[11] = 0x00
-
- // Collation ID [1 byte]
- cname := mc.cfg.Collation
- if cname == "" {
- cname = defaultCollation
- }
- var found bool
- data[12], found = collations[cname]
- if !found {
- // Note possibility for false negatives:
- // could be triggered although the collation is valid if the
- // collations map does not contain entries the server supports.
- return fmt.Errorf("unknown collation: %q", cname)
- }
-
- // Filler [23 bytes] (all 0x00)
- pos := 13
- for ; pos < 13+23; pos++ {
- data[pos] = 0
- }
-
- // SSL Connection Request Packet
- // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest
- if mc.cfg.TLS != nil {
- // Send TLS / SSL request packet
- if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil {
- return err
- }
-
- // Switch to TLS
- tlsConn := tls.Client(mc.netConn, mc.cfg.TLS)
- if err := tlsConn.Handshake(); err != nil {
- return err
- }
- mc.netConn = tlsConn
- mc.buf.nc = tlsConn
- }
-
- // User [null terminated string]
- if len(mc.cfg.User) > 0 {
- pos += copy(data[pos:], mc.cfg.User)
- }
- data[pos] = 0x00
- pos++
-
- // Auth Data [length encoded integer]
- pos += copy(data[pos:], authRespLEI)
- pos += copy(data[pos:], authResp)
-
- // Databasename [null terminated string]
- if len(mc.cfg.DBName) > 0 {
- pos += copy(data[pos:], mc.cfg.DBName)
- data[pos] = 0x00
- pos++
- }
-
- pos += copy(data[pos:], plugin)
- data[pos] = 0x00
- pos++
-
- // Connection Attributes
- pos += copy(data[pos:], connAttrsLEI)
- pos += copy(data[pos:], []byte(mc.connector.encodedAttributes))
-
- // Send Auth packet
- return mc.writePacket(data[:pos])
-}
-
-// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
-func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error {
- pktLen := 4 + len(authData)
- data, err := mc.buf.takeSmallBuffer(pktLen)
- if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- mc.log(err)
- return errBadConnNoWrite
- }
-
- // Add the auth data [EOF]
- copy(data[4:], authData)
- return mc.writePacket(data)
-}
-
-/******************************************************************************
-* Command Packets *
-******************************************************************************/
-
-func (mc *mysqlConn) writeCommandPacket(command byte) error {
- // Reset Packet Sequence
- mc.sequence = 0
-
- data, err := mc.buf.takeSmallBuffer(4 + 1)
- if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- mc.log(err)
- return errBadConnNoWrite
- }
-
- // Add command byte
- data[4] = command
-
- // Send CMD packet
- return mc.writePacket(data)
-}
-
-func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
- // Reset Packet Sequence
- mc.sequence = 0
-
- pktLen := 1 + len(arg)
- data, err := mc.buf.takeBuffer(pktLen + 4)
- if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- mc.log(err)
- return errBadConnNoWrite
- }
-
- // Add command byte
- data[4] = command
-
- // Add arg
- copy(data[5:], arg)
-
- // Send CMD packet
- return mc.writePacket(data)
-}
-
-func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
- // Reset Packet Sequence
- mc.sequence = 0
-
- data, err := mc.buf.takeSmallBuffer(4 + 1 + 4)
- if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- mc.log(err)
- return errBadConnNoWrite
- }
-
- // Add command byte
- data[4] = command
-
- // Add arg [32 bit]
- data[5] = byte(arg)
- data[6] = byte(arg >> 8)
- data[7] = byte(arg >> 16)
- data[8] = byte(arg >> 24)
-
- // Send CMD packet
- return mc.writePacket(data)
-}
-
-/******************************************************************************
-* Result Packets *
-******************************************************************************/
-
-func (mc *mysqlConn) readAuthResult() ([]byte, string, error) {
- data, err := mc.readPacket()
- if err != nil {
- return nil, "", err
- }
-
- // packet indicator
- switch data[0] {
-
- case iOK:
- // resultUnchanged, since auth happens before any queries or
- // commands have been executed.
- return nil, "", mc.resultUnchanged().handleOkPacket(data)
-
- case iAuthMoreData:
- return data[1:], "", err
-
- case iEOF:
- if len(data) == 1 {
- // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest
- return nil, "mysql_old_password", nil
- }
- pluginEndIndex := bytes.IndexByte(data, 0x00)
- if pluginEndIndex < 0 {
- return nil, "", ErrMalformPkt
- }
- plugin := string(data[1:pluginEndIndex])
- authData := data[pluginEndIndex+1:]
- return authData, plugin, nil
-
- default: // Error otherwise
- return nil, "", mc.handleErrorPacket(data)
- }
-}
-
-// Returns error if Packet is not a 'Result OK'-Packet
-func (mc *okHandler) readResultOK() error {
- data, err := mc.conn().readPacket()
- if err != nil {
- return err
- }
-
- if data[0] == iOK {
- return mc.handleOkPacket(data)
- }
- return mc.conn().handleErrorPacket(data)
-}
-
-// Result Set Header Packet
-// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset
-func (mc *okHandler) readResultSetHeaderPacket() (int, error) {
- // handleOkPacket replaces both values; other cases leave the values unchanged.
- mc.result.affectedRows = append(mc.result.affectedRows, 0)
- mc.result.insertIds = append(mc.result.insertIds, 0)
-
- data, err := mc.conn().readPacket()
- if err == nil {
- switch data[0] {
-
- case iOK:
- return 0, mc.handleOkPacket(data)
-
- case iERR:
- return 0, mc.conn().handleErrorPacket(data)
-
- case iLocalInFile:
- return 0, mc.handleInFileRequest(string(data[1:]))
- }
-
- // column count
- num, _, _ := readLengthEncodedInteger(data)
- // ignore remaining data in the packet. see #1478.
- return int(num), nil
- }
- return 0, err
-}
-
-// Error Packet
-// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet
-func (mc *mysqlConn) handleErrorPacket(data []byte) error {
- if data[0] != iERR {
- return ErrMalformPkt
- }
-
- // 0xff [1 byte]
-
- // Error Number [16 bit uint]
- errno := binary.LittleEndian.Uint16(data[1:3])
-
- // 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION
- // 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover)
- if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly {
- // Oops; we are connected to a read-only connection, and won't be able
- // to issue any write statements. Since RejectReadOnly is configured,
- // we throw away this connection hoping this one would have write
- // permission. This is specifically for a possible race condition
- // during failover (e.g. on AWS Aurora). See README.md for more.
- //
- // We explicitly close the connection before returning
- // driver.ErrBadConn to ensure that `database/sql` purges this
- // connection and initiates a new one for next statement next time.
- mc.Close()
- return driver.ErrBadConn
- }
-
- me := &MySQLError{Number: errno}
-
- pos := 3
-
- // SQL State [optional: # + 5bytes string]
- if data[3] == 0x23 {
- copy(me.SQLState[:], data[4:4+5])
- pos = 9
- }
-
- // Error Message [string]
- me.Message = string(data[pos:])
-
- return me
-}
-
-func readStatus(b []byte) statusFlag {
- return statusFlag(b[0]) | statusFlag(b[1])<<8
-}
-
-// Returns an instance of okHandler for codepaths where mysqlConn.result doesn't
-// need to be cleared first (e.g. during authentication, or while additional
-// resultsets are being fetched.)
-func (mc *mysqlConn) resultUnchanged() *okHandler {
- return (*okHandler)(mc)
-}
-
-// okHandler represents the state of the connection when mysqlConn.result has
-// been prepared for processing of OK packets.
-//
-// To correctly populate mysqlConn.result (updated by handleOkPacket()), all
-// callpaths must either:
-//
-// 1. first clear it using clearResult(), or
-// 2. confirm that they don't need to (by calling resultUnchanged()).
-//
-// Both return an instance of type *okHandler.
-type okHandler mysqlConn
-
-// Exposes the underlying type's methods.
-func (mc *okHandler) conn() *mysqlConn {
- return (*mysqlConn)(mc)
-}
-
-// clearResult clears the connection's stored affectedRows and insertIds
-// fields.
-//
-// It returns a handler that can process OK responses.
-func (mc *mysqlConn) clearResult() *okHandler {
- mc.result = mysqlResult{}
- return (*okHandler)(mc)
-}
-
-// Ok Packet
-// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet
-func (mc *okHandler) handleOkPacket(data []byte) error {
- var n, m int
- var affectedRows, insertId uint64
-
- // 0x00 [1 byte]
-
- // Affected rows [Length Coded Binary]
- affectedRows, _, n = readLengthEncodedInteger(data[1:])
-
- // Insert id [Length Coded Binary]
- insertId, _, m = readLengthEncodedInteger(data[1+n:])
-
- // Update for the current statement result (only used by
- // readResultSetHeaderPacket).
- if len(mc.result.affectedRows) > 0 {
- mc.result.affectedRows[len(mc.result.affectedRows)-1] = int64(affectedRows)
- }
- if len(mc.result.insertIds) > 0 {
- mc.result.insertIds[len(mc.result.insertIds)-1] = int64(insertId)
- }
-
- // server_status [2 bytes]
- mc.status = readStatus(data[1+n+m : 1+n+m+2])
- if mc.status&statusMoreResultsExists != 0 {
- return nil
- }
-
- // warning count [2 bytes]
-
- return nil
-}
-
-// Read Packets as Field Packets until EOF-Packet or an Error appears
-// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41
-func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
- columns := make([]mysqlField, count)
-
- for i := 0; ; i++ {
- data, err := mc.readPacket()
- if err != nil {
- return nil, err
- }
-
- // EOF Packet
- if data[0] == iEOF && (len(data) == 5 || len(data) == 1) {
- if i == count {
- return columns, nil
- }
- return nil, fmt.Errorf("column count mismatch n:%d len:%d", count, len(columns))
- }
-
- // Catalog
- pos, err := skipLengthEncodedString(data)
- if err != nil {
- return nil, err
- }
-
- // Database [len coded string]
- n, err := skipLengthEncodedString(data[pos:])
- if err != nil {
- return nil, err
- }
- pos += n
-
- // Table [len coded string]
- if mc.cfg.ColumnsWithAlias {
- tableName, _, n, err := readLengthEncodedString(data[pos:])
- if err != nil {
- return nil, err
- }
- pos += n
- columns[i].tableName = string(tableName)
- } else {
- n, err = skipLengthEncodedString(data[pos:])
- if err != nil {
- return nil, err
- }
- pos += n
- }
-
- // Original table [len coded string]
- n, err = skipLengthEncodedString(data[pos:])
- if err != nil {
- return nil, err
- }
- pos += n
-
- // Name [len coded string]
- name, _, n, err := readLengthEncodedString(data[pos:])
- if err != nil {
- return nil, err
- }
- columns[i].name = string(name)
- pos += n
-
- // Original name [len coded string]
- n, err = skipLengthEncodedString(data[pos:])
- if err != nil {
- return nil, err
- }
- pos += n
-
- // Filler [uint8]
- pos++
-
- // Charset [charset, collation uint8]
- columns[i].charSet = data[pos]
- pos += 2
-
- // Length [uint32]
- columns[i].length = binary.LittleEndian.Uint32(data[pos : pos+4])
- pos += 4
-
- // Field type [uint8]
- columns[i].fieldType = fieldType(data[pos])
- pos++
-
- // Flags [uint16]
- columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2]))
- pos += 2
-
- // Decimals [uint8]
- columns[i].decimals = data[pos]
- //pos++
-
- // Default value [len coded binary]
- //if pos < len(data) {
- // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:])
- //}
- }
-}
-
-// Read Packets as Field Packets until EOF-Packet or an Error appears
-// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow
-func (rows *textRows) readRow(dest []driver.Value) error {
- mc := rows.mc
-
- if rows.rs.done {
- return io.EOF
- }
-
- data, err := mc.readPacket()
- if err != nil {
- return err
- }
-
- // EOF Packet
- if data[0] == iEOF && len(data) == 5 {
- // server_status [2 bytes]
- rows.mc.status = readStatus(data[3:])
- rows.rs.done = true
- if !rows.HasNextResultSet() {
- rows.mc = nil
- }
- return io.EOF
- }
- if data[0] == iERR {
- rows.mc = nil
- return mc.handleErrorPacket(data)
- }
-
- // RowSet Packet
- var (
- n int
- isNull bool
- pos int = 0
- )
-
- for i := range dest {
- // Read bytes and convert to string
- var buf []byte
- buf, isNull, n, err = readLengthEncodedString(data[pos:])
- pos += n
-
- if err != nil {
- return err
- }
-
- if isNull {
- dest[i] = nil
- continue
- }
-
- switch rows.rs.columns[i].fieldType {
- case fieldTypeTimestamp,
- fieldTypeDateTime,
- fieldTypeDate,
- fieldTypeNewDate:
- if mc.parseTime {
- dest[i], err = parseDateTime(buf, mc.cfg.Loc)
- } else {
- dest[i] = buf
- }
-
- case fieldTypeTiny, fieldTypeShort, fieldTypeInt24, fieldTypeYear, fieldTypeLong:
- dest[i], err = strconv.ParseInt(string(buf), 10, 64)
-
- case fieldTypeLongLong:
- if rows.rs.columns[i].flags&flagUnsigned != 0 {
- dest[i], err = strconv.ParseUint(string(buf), 10, 64)
- } else {
- dest[i], err = strconv.ParseInt(string(buf), 10, 64)
- }
-
- case fieldTypeFloat:
- var d float64
- d, err = strconv.ParseFloat(string(buf), 32)
- dest[i] = float32(d)
-
- case fieldTypeDouble:
- dest[i], err = strconv.ParseFloat(string(buf), 64)
-
- default:
- dest[i] = buf
- }
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read
-func (mc *mysqlConn) readUntilEOF() error {
- for {
- data, err := mc.readPacket()
- if err != nil {
- return err
- }
-
- switch data[0] {
- case iERR:
- return mc.handleErrorPacket(data)
- case iEOF:
- if len(data) == 5 {
- mc.status = readStatus(data[3:])
- }
- return nil
- }
- }
-}
-
-/******************************************************************************
-* Prepared Statements *
-******************************************************************************/
-
-// Prepare Result Packets
-// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html
-func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) {
- data, err := stmt.mc.readPacket()
- if err == nil {
- // packet indicator [1 byte]
- if data[0] != iOK {
- return 0, stmt.mc.handleErrorPacket(data)
- }
-
- // statement id [4 bytes]
- stmt.id = binary.LittleEndian.Uint32(data[1:5])
-
- // Column count [16 bit uint]
- columnCount := binary.LittleEndian.Uint16(data[5:7])
-
- // Param count [16 bit uint]
- stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9]))
-
- // Reserved [8 bit]
-
- // Warning count [16 bit uint]
-
- return columnCount, nil
- }
- return 0, err
-}
-
-// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html
-func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error {
- maxLen := stmt.mc.maxAllowedPacket - 1
- pktLen := maxLen
-
- // After the header (bytes 0-3) follows before the data:
- // 1 byte command
- // 4 bytes stmtID
- // 2 bytes paramID
- const dataOffset = 1 + 4 + 2
-
- // Cannot use the write buffer since
- // a) the buffer is too small
- // b) it is in use
- data := make([]byte, 4+1+4+2+len(arg))
-
- copy(data[4+dataOffset:], arg)
-
- for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset {
- if dataOffset+argLen < maxLen {
- pktLen = dataOffset + argLen
- }
-
- stmt.mc.sequence = 0
- // Add command byte [1 byte]
- data[4] = comStmtSendLongData
-
- // Add stmtID [32 bit]
- data[5] = byte(stmt.id)
- data[6] = byte(stmt.id >> 8)
- data[7] = byte(stmt.id >> 16)
- data[8] = byte(stmt.id >> 24)
-
- // Add paramID [16 bit]
- data[9] = byte(paramID)
- data[10] = byte(paramID >> 8)
-
- // Send CMD packet
- err := stmt.mc.writePacket(data[:4+pktLen])
- if err == nil {
- data = data[pktLen-dataOffset:]
- continue
- }
- return err
-
- }
-
- // Reset Packet Sequence
- stmt.mc.sequence = 0
- return nil
-}
-
-// Execute Prepared Statement
-// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html
-func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
- if len(args) != stmt.paramCount {
- return fmt.Errorf(
- "argument count mismatch (got: %d; has: %d)",
- len(args),
- stmt.paramCount,
- )
- }
-
- const minPktLen = 4 + 1 + 4 + 1 + 4
- mc := stmt.mc
-
- // Determine threshold dynamically to avoid packet size shortage.
- longDataSize := mc.maxAllowedPacket / (stmt.paramCount + 1)
- if longDataSize < 64 {
- longDataSize = 64
- }
-
- // Reset packet-sequence
- mc.sequence = 0
-
- var data []byte
- var err error
-
- if len(args) == 0 {
- data, err = mc.buf.takeBuffer(minPktLen)
- } else {
- data, err = mc.buf.takeCompleteBuffer()
- // In this case the len(data) == cap(data) which is used to optimise the flow below.
- }
- if err != nil {
- // cannot take the buffer. Something must be wrong with the connection
- mc.log(err)
- return errBadConnNoWrite
- }
-
- // command [1 byte]
- data[4] = comStmtExecute
-
- // statement_id [4 bytes]
- data[5] = byte(stmt.id)
- data[6] = byte(stmt.id >> 8)
- data[7] = byte(stmt.id >> 16)
- data[8] = byte(stmt.id >> 24)
-
- // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte]
- data[9] = 0x00
-
- // iteration_count (uint32(1)) [4 bytes]
- data[10] = 0x01
- data[11] = 0x00
- data[12] = 0x00
- data[13] = 0x00
-
- if len(args) > 0 {
- pos := minPktLen
-
- var nullMask []byte
- if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= cap(data) {
- // buffer has to be extended but we don't know by how much so
- // we depend on append after all data with known sizes fit.
- // We stop at that because we deal with a lot of columns here
- // which makes the required allocation size hard to guess.
- tmp := make([]byte, pos+maskLen+typesLen)
- copy(tmp[:pos], data[:pos])
- data = tmp
- nullMask = data[pos : pos+maskLen]
- // No need to clean nullMask as make ensures that.
- pos += maskLen
- } else {
- nullMask = data[pos : pos+maskLen]
- for i := range nullMask {
- nullMask[i] = 0
- }
- pos += maskLen
- }
-
- // newParameterBoundFlag 1 [1 byte]
- data[pos] = 0x01
- pos++
-
- // type of each parameter [len(args)*2 bytes]
- paramTypes := data[pos:]
- pos += len(args) * 2
-
- // value of each parameter [n bytes]
- paramValues := data[pos:pos]
- valuesCap := cap(paramValues)
-
- for i, arg := range args {
- // build NULL-bitmap
- if arg == nil {
- nullMask[i/8] |= 1 << (uint(i) & 7)
- paramTypes[i+i] = byte(fieldTypeNULL)
- paramTypes[i+i+1] = 0x00
- continue
- }
-
- if v, ok := arg.(json.RawMessage); ok {
- arg = []byte(v)
- }
- // cache types and values
- switch v := arg.(type) {
- case int64:
- paramTypes[i+i] = byte(fieldTypeLongLong)
- paramTypes[i+i+1] = 0x00
-
- if cap(paramValues)-len(paramValues)-8 >= 0 {
- paramValues = paramValues[:len(paramValues)+8]
- binary.LittleEndian.PutUint64(
- paramValues[len(paramValues)-8:],
- uint64(v),
- )
- } else {
- paramValues = append(paramValues,
- uint64ToBytes(uint64(v))...,
- )
- }
-
- case uint64:
- paramTypes[i+i] = byte(fieldTypeLongLong)
- paramTypes[i+i+1] = 0x80 // type is unsigned
-
- if cap(paramValues)-len(paramValues)-8 >= 0 {
- paramValues = paramValues[:len(paramValues)+8]
- binary.LittleEndian.PutUint64(
- paramValues[len(paramValues)-8:],
- uint64(v),
- )
- } else {
- paramValues = append(paramValues,
- uint64ToBytes(uint64(v))...,
- )
- }
-
- case float64:
- paramTypes[i+i] = byte(fieldTypeDouble)
- paramTypes[i+i+1] = 0x00
-
- if cap(paramValues)-len(paramValues)-8 >= 0 {
- paramValues = paramValues[:len(paramValues)+8]
- binary.LittleEndian.PutUint64(
- paramValues[len(paramValues)-8:],
- math.Float64bits(v),
- )
- } else {
- paramValues = append(paramValues,
- uint64ToBytes(math.Float64bits(v))...,
- )
- }
-
- case bool:
- paramTypes[i+i] = byte(fieldTypeTiny)
- paramTypes[i+i+1] = 0x00
-
- if v {
- paramValues = append(paramValues, 0x01)
- } else {
- paramValues = append(paramValues, 0x00)
- }
-
- case []byte:
- // Common case (non-nil value) first
- if v != nil {
- paramTypes[i+i] = byte(fieldTypeString)
- paramTypes[i+i+1] = 0x00
-
- if len(v) < longDataSize {
- paramValues = appendLengthEncodedInteger(paramValues,
- uint64(len(v)),
- )
- paramValues = append(paramValues, v...)
- } else {
- if err := stmt.writeCommandLongData(i, v); err != nil {
- return err
- }
- }
- continue
- }
-
- // Handle []byte(nil) as a NULL value
- nullMask[i/8] |= 1 << (uint(i) & 7)
- paramTypes[i+i] = byte(fieldTypeNULL)
- paramTypes[i+i+1] = 0x00
-
- case string:
- paramTypes[i+i] = byte(fieldTypeString)
- paramTypes[i+i+1] = 0x00
-
- if len(v) < longDataSize {
- paramValues = appendLengthEncodedInteger(paramValues,
- uint64(len(v)),
- )
- paramValues = append(paramValues, v...)
- } else {
- if err := stmt.writeCommandLongData(i, []byte(v)); err != nil {
- return err
- }
- }
-
- case time.Time:
- paramTypes[i+i] = byte(fieldTypeString)
- paramTypes[i+i+1] = 0x00
-
- var a [64]byte
- var b = a[:0]
-
- if v.IsZero() {
- b = append(b, "0000-00-00"...)
- } else {
- b, err = appendDateTime(b, v.In(mc.cfg.Loc), mc.cfg.timeTruncate)
- if err != nil {
- return err
- }
- }
-
- paramValues = appendLengthEncodedInteger(paramValues,
- uint64(len(b)),
- )
- paramValues = append(paramValues, b...)
-
- default:
- return fmt.Errorf("cannot convert type: %T", arg)
- }
- }
-
- // Check if param values exceeded the available buffer
- // In that case we must build the data packet with the new values buffer
- if valuesCap != cap(paramValues) {
- data = append(data[:pos], paramValues...)
- if err = mc.buf.store(data); err != nil {
- mc.log(err)
- return errBadConnNoWrite
- }
- }
-
- pos += len(paramValues)
- data = data[:pos]
- }
-
- return mc.writePacket(data)
-}
-
-// For each remaining resultset in the stream, discards its rows and updates
-// mc.affectedRows and mc.insertIds.
-func (mc *okHandler) discardResults() error {
- for mc.status&statusMoreResultsExists != 0 {
- resLen, err := mc.readResultSetHeaderPacket()
- if err != nil {
- return err
- }
- if resLen > 0 {
- // columns
- if err := mc.conn().readUntilEOF(); err != nil {
- return err
- }
- // rows
- if err := mc.conn().readUntilEOF(); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html
-func (rows *binaryRows) readRow(dest []driver.Value) error {
- data, err := rows.mc.readPacket()
- if err != nil {
- return err
- }
-
- // packet indicator [1 byte]
- if data[0] != iOK {
- // EOF Packet
- if data[0] == iEOF && len(data) == 5 {
- rows.mc.status = readStatus(data[3:])
- rows.rs.done = true
- if !rows.HasNextResultSet() {
- rows.mc = nil
- }
- return io.EOF
- }
- mc := rows.mc
- rows.mc = nil
-
- // Error otherwise
- return mc.handleErrorPacket(data)
- }
-
- // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes]
- pos := 1 + (len(dest)+7+2)>>3
- nullMask := data[1:pos]
-
- for i := range dest {
- // Field is NULL
- // (byte >> bit-pos) % 2 == 1
- if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 {
- dest[i] = nil
- continue
- }
-
- // Convert to byte-coded string
- switch rows.rs.columns[i].fieldType {
- case fieldTypeNULL:
- dest[i] = nil
- continue
-
- // Numeric Types
- case fieldTypeTiny:
- if rows.rs.columns[i].flags&flagUnsigned != 0 {
- dest[i] = int64(data[pos])
- } else {
- dest[i] = int64(int8(data[pos]))
- }
- pos++
- continue
-
- case fieldTypeShort, fieldTypeYear:
- if rows.rs.columns[i].flags&flagUnsigned != 0 {
- dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2]))
- } else {
- dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2])))
- }
- pos += 2
- continue
-
- case fieldTypeInt24, fieldTypeLong:
- if rows.rs.columns[i].flags&flagUnsigned != 0 {
- dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4]))
- } else {
- dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4])))
- }
- pos += 4
- continue
-
- case fieldTypeLongLong:
- if rows.rs.columns[i].flags&flagUnsigned != 0 {
- val := binary.LittleEndian.Uint64(data[pos : pos+8])
- if val > math.MaxInt64 {
- dest[i] = uint64ToString(val)
- } else {
- dest[i] = int64(val)
- }
- } else {
- dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8]))
- }
- pos += 8
- continue
-
- case fieldTypeFloat:
- dest[i] = math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))
- pos += 4
- continue
-
- case fieldTypeDouble:
- dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8]))
- pos += 8
- continue
-
- // Length coded Binary Strings
- case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
- fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
- fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
- fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON:
- var isNull bool
- var n int
- dest[i], isNull, n, err = readLengthEncodedString(data[pos:])
- pos += n
- if err == nil {
- if !isNull {
- continue
- } else {
- dest[i] = nil
- continue
- }
- }
- return err
-
- case
- fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD
- fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal]
- fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal]
-
- num, isNull, n := readLengthEncodedInteger(data[pos:])
- pos += n
-
- switch {
- case isNull:
- dest[i] = nil
- continue
- case rows.rs.columns[i].fieldType == fieldTypeTime:
- // database/sql does not support an equivalent to TIME, return a string
- var dstlen uint8
- switch decimals := rows.rs.columns[i].decimals; decimals {
- case 0x00, 0x1f:
- dstlen = 8
- case 1, 2, 3, 4, 5, 6:
- dstlen = 8 + 1 + decimals
- default:
- return fmt.Errorf(
- "protocol error, illegal decimals value %d",
- rows.rs.columns[i].decimals,
- )
- }
- dest[i], err = formatBinaryTime(data[pos:pos+int(num)], dstlen)
- case rows.mc.parseTime:
- dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc)
- default:
- var dstlen uint8
- if rows.rs.columns[i].fieldType == fieldTypeDate {
- dstlen = 10
- } else {
- switch decimals := rows.rs.columns[i].decimals; decimals {
- case 0x00, 0x1f:
- dstlen = 19
- case 1, 2, 3, 4, 5, 6:
- dstlen = 19 + 1 + decimals
- default:
- return fmt.Errorf(
- "protocol error, illegal decimals value %d",
- rows.rs.columns[i].decimals,
- )
- }
- }
- dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen)
- }
-
- if err == nil {
- pos += int(num)
- continue
- } else {
- return err
- }
-
- // Please report if this happens!
- default:
- return fmt.Errorf("unknown field type %d", rows.rs.columns[i].fieldType)
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/result.go b/vendor/github.com/go-sql-driver/mysql/result.go
deleted file mode 100644
index d5163146..00000000
--- a/vendor/github.com/go-sql-driver/mysql/result.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import "database/sql/driver"
-
-// Result exposes data not available through *connection.Result.
-//
-// This is accessible by executing statements using sql.Conn.Raw() and
-// downcasting the returned result:
-//
-// res, err := rawConn.Exec(...)
-// res.(mysql.Result).AllRowsAffected()
-type Result interface {
- driver.Result
- // AllRowsAffected returns a slice containing the affected rows for each
- // executed statement.
- AllRowsAffected() []int64
- // AllLastInsertIds returns a slice containing the last inserted ID for each
- // executed statement.
- AllLastInsertIds() []int64
-}
-
-type mysqlResult struct {
- // One entry in both slices is created for every executed statement result.
- affectedRows []int64
- insertIds []int64
-}
-
-func (res *mysqlResult) LastInsertId() (int64, error) {
- return res.insertIds[len(res.insertIds)-1], nil
-}
-
-func (res *mysqlResult) RowsAffected() (int64, error) {
- return res.affectedRows[len(res.affectedRows)-1], nil
-}
-
-func (res *mysqlResult) AllLastInsertIds() []int64 {
- return append([]int64{}, res.insertIds...) // defensive copy
-}
-
-func (res *mysqlResult) AllRowsAffected() []int64 {
- return append([]int64{}, res.affectedRows...) // defensive copy
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go
deleted file mode 100644
index 81fa6062..00000000
--- a/vendor/github.com/go-sql-driver/mysql/rows.go
+++ /dev/null
@@ -1,232 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "database/sql/driver"
- "io"
- "math"
- "reflect"
-)
-
-type resultSet struct {
- columns []mysqlField
- columnNames []string
- done bool
-}
-
-type mysqlRows struct {
- mc *mysqlConn
- rs resultSet
- finish func()
-}
-
-type binaryRows struct {
- mysqlRows
-}
-
-type textRows struct {
- mysqlRows
-}
-
-func (rows *mysqlRows) Columns() []string {
- if rows.rs.columnNames != nil {
- return rows.rs.columnNames
- }
-
- columns := make([]string, len(rows.rs.columns))
- if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias {
- for i := range columns {
- if tableName := rows.rs.columns[i].tableName; len(tableName) > 0 {
- columns[i] = tableName + "." + rows.rs.columns[i].name
- } else {
- columns[i] = rows.rs.columns[i].name
- }
- }
- } else {
- for i := range columns {
- columns[i] = rows.rs.columns[i].name
- }
- }
-
- rows.rs.columnNames = columns
- return columns
-}
-
-func (rows *mysqlRows) ColumnTypeDatabaseTypeName(i int) string {
- return rows.rs.columns[i].typeDatabaseName()
-}
-
-// func (rows *mysqlRows) ColumnTypeLength(i int) (length int64, ok bool) {
-// return int64(rows.rs.columns[i].length), true
-// }
-
-func (rows *mysqlRows) ColumnTypeNullable(i int) (nullable, ok bool) {
- return rows.rs.columns[i].flags&flagNotNULL == 0, true
-}
-
-func (rows *mysqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) {
- column := rows.rs.columns[i]
- decimals := int64(column.decimals)
-
- switch column.fieldType {
- case fieldTypeDecimal, fieldTypeNewDecimal:
- if decimals > 0 {
- return int64(column.length) - 2, decimals, true
- }
- return int64(column.length) - 1, decimals, true
- case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeTime:
- return decimals, decimals, true
- case fieldTypeFloat, fieldTypeDouble:
- if decimals == 0x1f {
- return math.MaxInt64, math.MaxInt64, true
- }
- return math.MaxInt64, decimals, true
- }
-
- return 0, 0, false
-}
-
-func (rows *mysqlRows) ColumnTypeScanType(i int) reflect.Type {
- return rows.rs.columns[i].scanType()
-}
-
-func (rows *mysqlRows) Close() (err error) {
- if f := rows.finish; f != nil {
- f()
- rows.finish = nil
- }
-
- mc := rows.mc
- if mc == nil {
- return nil
- }
- if err := mc.error(); err != nil {
- return err
- }
-
- // flip the buffer for this connection if we need to drain it.
- // note that for a successful query (i.e. one where rows.next()
- // has been called until it returns false), `rows.mc` will be nil
- // by the time the user calls `(*Rows).Close`, so we won't reach this
- // see: https://github.com/golang/go/commit/651ddbdb5056ded455f47f9c494c67b389622a47
- mc.buf.flip()
-
- // Remove unread packets from stream
- if !rows.rs.done {
- err = mc.readUntilEOF()
- }
- if err == nil {
- handleOk := mc.clearResult()
- if err = handleOk.discardResults(); err != nil {
- return err
- }
- }
-
- rows.mc = nil
- return err
-}
-
-func (rows *mysqlRows) HasNextResultSet() (b bool) {
- if rows.mc == nil {
- return false
- }
- return rows.mc.status&statusMoreResultsExists != 0
-}
-
-func (rows *mysqlRows) nextResultSet() (int, error) {
- if rows.mc == nil {
- return 0, io.EOF
- }
- if err := rows.mc.error(); err != nil {
- return 0, err
- }
-
- // Remove unread packets from stream
- if !rows.rs.done {
- if err := rows.mc.readUntilEOF(); err != nil {
- return 0, err
- }
- rows.rs.done = true
- }
-
- if !rows.HasNextResultSet() {
- rows.mc = nil
- return 0, io.EOF
- }
- rows.rs = resultSet{}
- // rows.mc.affectedRows and rows.mc.insertIds accumulate on each call to
- // nextResultSet.
- resLen, err := rows.mc.resultUnchanged().readResultSetHeaderPacket()
- if err != nil {
- // Clean up about multi-results flag
- rows.rs.done = true
- rows.mc.status = rows.mc.status & (^statusMoreResultsExists)
- }
- return resLen, err
-}
-
-func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) {
- for {
- resLen, err := rows.nextResultSet()
- if err != nil {
- return 0, err
- }
-
- if resLen > 0 {
- return resLen, nil
- }
-
- rows.rs.done = true
- }
-}
-
-func (rows *binaryRows) NextResultSet() error {
- resLen, err := rows.nextNotEmptyResultSet()
- if err != nil {
- return err
- }
-
- rows.rs.columns, err = rows.mc.readColumns(resLen)
- return err
-}
-
-func (rows *binaryRows) Next(dest []driver.Value) error {
- if mc := rows.mc; mc != nil {
- if err := mc.error(); err != nil {
- return err
- }
-
- // Fetch next row from stream
- return rows.readRow(dest)
- }
- return io.EOF
-}
-
-func (rows *textRows) NextResultSet() (err error) {
- resLen, err := rows.nextNotEmptyResultSet()
- if err != nil {
- return err
- }
-
- rows.rs.columns, err = rows.mc.readColumns(resLen)
- return err
-}
-
-func (rows *textRows) Next(dest []driver.Value) error {
- if mc := rows.mc; mc != nil {
- if err := mc.error(); err != nil {
- return err
- }
-
- // Fetch next row from stream
- return rows.readRow(dest)
- }
- return io.EOF
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go
deleted file mode 100644
index 0436f224..00000000
--- a/vendor/github.com/go-sql-driver/mysql/statement.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "database/sql/driver"
- "encoding/json"
- "fmt"
- "io"
- "reflect"
-)
-
-type mysqlStmt struct {
- mc *mysqlConn
- id uint32
- paramCount int
-}
-
-func (stmt *mysqlStmt) Close() error {
- if stmt.mc == nil || stmt.mc.closed.Load() {
- // driver.Stmt.Close can be called more than once, thus this function
- // has to be idempotent.
- // See also Issue #450 and golang/go#16019.
- //errLog.Print(ErrInvalidConn)
- return driver.ErrBadConn
- }
-
- err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id)
- stmt.mc = nil
- return err
-}
-
-func (stmt *mysqlStmt) NumInput() int {
- return stmt.paramCount
-}
-
-func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
- return converter{}
-}
-
-func (stmt *mysqlStmt) CheckNamedValue(nv *driver.NamedValue) (err error) {
- nv.Value, err = converter{}.ConvertValue(nv.Value)
- return
-}
-
-func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
- if stmt.mc.closed.Load() {
- stmt.mc.log(ErrInvalidConn)
- return nil, driver.ErrBadConn
- }
- // Send command
- err := stmt.writeExecutePacket(args)
- if err != nil {
- return nil, stmt.mc.markBadConn(err)
- }
-
- mc := stmt.mc
- handleOk := stmt.mc.clearResult()
-
- // Read Result
- resLen, err := handleOk.readResultSetHeaderPacket()
- if err != nil {
- return nil, err
- }
-
- if resLen > 0 {
- // Columns
- if err = mc.readUntilEOF(); err != nil {
- return nil, err
- }
-
- // Rows
- if err := mc.readUntilEOF(); err != nil {
- return nil, err
- }
- }
-
- if err := handleOk.discardResults(); err != nil {
- return nil, err
- }
-
- copied := mc.result
- return &copied, nil
-}
-
-func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
- return stmt.query(args)
-}
-
-func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
- if stmt.mc.closed.Load() {
- stmt.mc.log(ErrInvalidConn)
- return nil, driver.ErrBadConn
- }
- // Send command
- err := stmt.writeExecutePacket(args)
- if err != nil {
- return nil, stmt.mc.markBadConn(err)
- }
-
- mc := stmt.mc
-
- // Read Result
- handleOk := stmt.mc.clearResult()
- resLen, err := handleOk.readResultSetHeaderPacket()
- if err != nil {
- return nil, err
- }
-
- rows := new(binaryRows)
-
- if resLen > 0 {
- rows.mc = mc
- rows.rs.columns, err = mc.readColumns(resLen)
- } else {
- rows.rs.done = true
-
- switch err := rows.NextResultSet(); err {
- case nil, io.EOF:
- return rows, nil
- default:
- return nil, err
- }
- }
-
- return rows, err
-}
-
-var jsonType = reflect.TypeOf(json.RawMessage{})
-
-type converter struct{}
-
-// ConvertValue mirrors the reference/default converter in database/sql/driver
-// with _one_ exception. We support uint64 with their high bit and the default
-// implementation does not. This function should be kept in sync with
-// database/sql/driver defaultConverter.ConvertValue() except for that
-// deliberate difference.
-func (c converter) ConvertValue(v any) (driver.Value, error) {
- if driver.IsValue(v) {
- return v, nil
- }
-
- if vr, ok := v.(driver.Valuer); ok {
- sv, err := callValuerValue(vr)
- if err != nil {
- return nil, err
- }
- if driver.IsValue(sv) {
- return sv, nil
- }
- // A value returned from the Valuer interface can be "a type handled by
- // a database driver's NamedValueChecker interface" so we should accept
- // uint64 here as well.
- if u, ok := sv.(uint64); ok {
- return u, nil
- }
- return nil, fmt.Errorf("non-Value type %T returned from Value", sv)
- }
- rv := reflect.ValueOf(v)
- switch rv.Kind() {
- case reflect.Ptr:
- // indirect pointers
- if rv.IsNil() {
- return nil, nil
- } else {
- return c.ConvertValue(rv.Elem().Interface())
- }
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return rv.Int(), nil
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return rv.Uint(), nil
- case reflect.Float32, reflect.Float64:
- return rv.Float(), nil
- case reflect.Bool:
- return rv.Bool(), nil
- case reflect.Slice:
- switch t := rv.Type(); {
- case t == jsonType:
- return v, nil
- case t.Elem().Kind() == reflect.Uint8:
- return rv.Bytes(), nil
- default:
- return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, t.Elem().Kind())
- }
- case reflect.String:
- return rv.String(), nil
- }
- return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind())
-}
-
-var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem()
-
-// callValuerValue returns vr.Value(), with one exception:
-// If vr.Value is an auto-generated method on a pointer type and the
-// pointer is nil, it would panic at runtime in the panicwrap
-// method. Treat it like nil instead.
-//
-// This is so people can implement driver.Value on value types and
-// still use nil pointers to those types to mean nil/NULL, just like
-// string/*string.
-//
-// This is an exact copy of the same-named unexported function from the
-// database/sql package.
-func callValuerValue(vr driver.Valuer) (v driver.Value, err error) {
- if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Ptr &&
- rv.IsNil() &&
- rv.Type().Elem().Implements(valuerReflectType) {
- return nil, nil
- }
- return vr.Value()
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go
deleted file mode 100644
index 4a4b6100..00000000
--- a/vendor/github.com/go-sql-driver/mysql/transaction.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-type mysqlTx struct {
- mc *mysqlConn
-}
-
-func (tx *mysqlTx) Commit() (err error) {
- if tx.mc == nil || tx.mc.closed.Load() {
- return ErrInvalidConn
- }
- err = tx.mc.exec("COMMIT")
- tx.mc = nil
- return
-}
-
-func (tx *mysqlTx) Rollback() (err error) {
- if tx.mc == nil || tx.mc.closed.Load() {
- return ErrInvalidConn
- }
- err = tx.mc.exec("ROLLBACK")
- tx.mc = nil
- return
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go
deleted file mode 100644
index cda24fe7..00000000
--- a/vendor/github.com/go-sql-driver/mysql/utils.go
+++ /dev/null
@@ -1,843 +0,0 @@
-// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
-//
-// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this file,
-// You can obtain one at http://mozilla.org/MPL/2.0/.
-
-package mysql
-
-import (
- "crypto/tls"
- "database/sql"
- "database/sql/driver"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-)
-
-// Registry for custom tls.Configs
-var (
- tlsConfigLock sync.RWMutex
- tlsConfigRegistry map[string]*tls.Config
-)
-
-// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
-// Use the key as a value in the DSN where tls=value.
-//
-// Note: The provided tls.Config is exclusively owned by the driver after
-// registering it.
-//
-// rootCertPool := x509.NewCertPool()
-// pem, err := os.ReadFile("/path/ca-cert.pem")
-// if err != nil {
-// log.Fatal(err)
-// }
-// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok {
-// log.Fatal("Failed to append PEM.")
-// }
-// clientCert := make([]tls.Certificate, 0, 1)
-// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem")
-// if err != nil {
-// log.Fatal(err)
-// }
-// clientCert = append(clientCert, certs)
-// mysql.RegisterTLSConfig("custom", &tls.Config{
-// RootCAs: rootCertPool,
-// Certificates: clientCert,
-// })
-// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom")
-func RegisterTLSConfig(key string, config *tls.Config) error {
- if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" || strings.ToLower(key) == "preferred" {
- return fmt.Errorf("key '%s' is reserved", key)
- }
-
- tlsConfigLock.Lock()
- if tlsConfigRegistry == nil {
- tlsConfigRegistry = make(map[string]*tls.Config)
- }
-
- tlsConfigRegistry[key] = config
- tlsConfigLock.Unlock()
- return nil
-}
-
-// DeregisterTLSConfig removes the tls.Config associated with key.
-func DeregisterTLSConfig(key string) {
- tlsConfigLock.Lock()
- if tlsConfigRegistry != nil {
- delete(tlsConfigRegistry, key)
- }
- tlsConfigLock.Unlock()
-}
-
-func getTLSConfigClone(key string) (config *tls.Config) {
- tlsConfigLock.RLock()
- if v, ok := tlsConfigRegistry[key]; ok {
- config = v.Clone()
- }
- tlsConfigLock.RUnlock()
- return
-}
-
-// Returns the bool value of the input.
-// The 2nd return value indicates if the input was a valid bool value
-func readBool(input string) (value bool, valid bool) {
- switch input {
- case "1", "true", "TRUE", "True":
- return true, true
- case "0", "false", "FALSE", "False":
- return false, true
- }
-
- // Not a valid bool value
- return
-}
-
-/******************************************************************************
-* Time related utils *
-******************************************************************************/
-
-func parseDateTime(b []byte, loc *time.Location) (time.Time, error) {
- const base = "0000-00-00 00:00:00.000000"
- switch len(b) {
- case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM"
- if string(b) == base[:len(b)] {
- return time.Time{}, nil
- }
-
- year, err := parseByteYear(b)
- if err != nil {
- return time.Time{}, err
- }
- if b[4] != '-' {
- return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[4])
- }
-
- m, err := parseByte2Digits(b[5], b[6])
- if err != nil {
- return time.Time{}, err
- }
- month := time.Month(m)
-
- if b[7] != '-' {
- return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[7])
- }
-
- day, err := parseByte2Digits(b[8], b[9])
- if err != nil {
- return time.Time{}, err
- }
- if len(b) == 10 {
- return time.Date(year, month, day, 0, 0, 0, 0, loc), nil
- }
-
- if b[10] != ' ' {
- return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[10])
- }
-
- hour, err := parseByte2Digits(b[11], b[12])
- if err != nil {
- return time.Time{}, err
- }
- if b[13] != ':' {
- return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[13])
- }
-
- min, err := parseByte2Digits(b[14], b[15])
- if err != nil {
- return time.Time{}, err
- }
- if b[16] != ':' {
- return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[16])
- }
-
- sec, err := parseByte2Digits(b[17], b[18])
- if err != nil {
- return time.Time{}, err
- }
- if len(b) == 19 {
- return time.Date(year, month, day, hour, min, sec, 0, loc), nil
- }
-
- if b[19] != '.' {
- return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[19])
- }
- nsec, err := parseByteNanoSec(b[20:])
- if err != nil {
- return time.Time{}, err
- }
- return time.Date(year, month, day, hour, min, sec, nsec, loc), nil
- default:
- return time.Time{}, fmt.Errorf("invalid time bytes: %s", b)
- }
-}
-
-func parseByteYear(b []byte) (int, error) {
- year, n := 0, 1000
- for i := 0; i < 4; i++ {
- v, err := bToi(b[i])
- if err != nil {
- return 0, err
- }
- year += v * n
- n /= 10
- }
- return year, nil
-}
-
-func parseByte2Digits(b1, b2 byte) (int, error) {
- d1, err := bToi(b1)
- if err != nil {
- return 0, err
- }
- d2, err := bToi(b2)
- if err != nil {
- return 0, err
- }
- return d1*10 + d2, nil
-}
-
-func parseByteNanoSec(b []byte) (int, error) {
- ns, digit := 0, 100000 // max is 6-digits
- for i := 0; i < len(b); i++ {
- v, err := bToi(b[i])
- if err != nil {
- return 0, err
- }
- ns += v * digit
- digit /= 10
- }
- // nanoseconds has 10-digits. (needs to scale digits)
- // 10 - 6 = 4, so we have to multiple 1000.
- return ns * 1000, nil
-}
-
-func bToi(b byte) (int, error) {
- if b < '0' || b > '9' {
- return 0, errors.New("not [0-9]")
- }
- return int(b - '0'), nil
-}
-
-func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) {
- switch num {
- case 0:
- return time.Time{}, nil
- case 4:
- return time.Date(
- int(binary.LittleEndian.Uint16(data[:2])), // year
- time.Month(data[2]), // month
- int(data[3]), // day
- 0, 0, 0, 0,
- loc,
- ), nil
- case 7:
- return time.Date(
- int(binary.LittleEndian.Uint16(data[:2])), // year
- time.Month(data[2]), // month
- int(data[3]), // day
- int(data[4]), // hour
- int(data[5]), // minutes
- int(data[6]), // seconds
- 0,
- loc,
- ), nil
- case 11:
- return time.Date(
- int(binary.LittleEndian.Uint16(data[:2])), // year
- time.Month(data[2]), // month
- int(data[3]), // day
- int(data[4]), // hour
- int(data[5]), // minutes
- int(data[6]), // seconds
- int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds
- loc,
- ), nil
- }
- return nil, fmt.Errorf("invalid DATETIME packet length %d", num)
-}
-
-func appendDateTime(buf []byte, t time.Time, timeTruncate time.Duration) ([]byte, error) {
- if timeTruncate > 0 {
- t = t.Truncate(timeTruncate)
- }
-
- year, month, day := t.Date()
- hour, min, sec := t.Clock()
- nsec := t.Nanosecond()
-
- if year < 1 || year > 9999 {
- return buf, errors.New("year is not in the range [1, 9999]: " + strconv.Itoa(year)) // use errors.New instead of fmt.Errorf to avoid year escape to heap
- }
- year100 := year / 100
- year1 := year % 100
-
- var localBuf [len("2006-01-02T15:04:05.999999999")]byte // does not escape
- localBuf[0], localBuf[1], localBuf[2], localBuf[3] = digits10[year100], digits01[year100], digits10[year1], digits01[year1]
- localBuf[4] = '-'
- localBuf[5], localBuf[6] = digits10[month], digits01[month]
- localBuf[7] = '-'
- localBuf[8], localBuf[9] = digits10[day], digits01[day]
-
- if hour == 0 && min == 0 && sec == 0 && nsec == 0 {
- return append(buf, localBuf[:10]...), nil
- }
-
- localBuf[10] = ' '
- localBuf[11], localBuf[12] = digits10[hour], digits01[hour]
- localBuf[13] = ':'
- localBuf[14], localBuf[15] = digits10[min], digits01[min]
- localBuf[16] = ':'
- localBuf[17], localBuf[18] = digits10[sec], digits01[sec]
-
- if nsec == 0 {
- return append(buf, localBuf[:19]...), nil
- }
- nsec100000000 := nsec / 100000000
- nsec1000000 := (nsec / 1000000) % 100
- nsec10000 := (nsec / 10000) % 100
- nsec100 := (nsec / 100) % 100
- nsec1 := nsec % 100
- localBuf[19] = '.'
-
- // milli second
- localBuf[20], localBuf[21], localBuf[22] =
- digits01[nsec100000000], digits10[nsec1000000], digits01[nsec1000000]
- // micro second
- localBuf[23], localBuf[24], localBuf[25] =
- digits10[nsec10000], digits01[nsec10000], digits10[nsec100]
- // nano second
- localBuf[26], localBuf[27], localBuf[28] =
- digits01[nsec100], digits10[nsec1], digits01[nsec1]
-
- // trim trailing zeros
- n := len(localBuf)
- for n > 0 && localBuf[n-1] == '0' {
- n--
- }
-
- return append(buf, localBuf[:n]...), nil
-}
-
-// zeroDateTime is used in formatBinaryDateTime to avoid an allocation
-// if the DATE or DATETIME has the zero value.
-// It must never be changed.
-// The current behavior depends on database/sql copying the result.
-var zeroDateTime = []byte("0000-00-00 00:00:00.000000")
-
-const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"
-const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999"
-
-func appendMicrosecs(dst, src []byte, decimals int) []byte {
- if decimals <= 0 {
- return dst
- }
- if len(src) == 0 {
- return append(dst, ".000000"[:decimals+1]...)
- }
-
- microsecs := binary.LittleEndian.Uint32(src[:4])
- p1 := byte(microsecs / 10000)
- microsecs -= 10000 * uint32(p1)
- p2 := byte(microsecs / 100)
- microsecs -= 100 * uint32(p2)
- p3 := byte(microsecs)
-
- switch decimals {
- default:
- return append(dst, '.',
- digits10[p1], digits01[p1],
- digits10[p2], digits01[p2],
- digits10[p3], digits01[p3],
- )
- case 1:
- return append(dst, '.',
- digits10[p1],
- )
- case 2:
- return append(dst, '.',
- digits10[p1], digits01[p1],
- )
- case 3:
- return append(dst, '.',
- digits10[p1], digits01[p1],
- digits10[p2],
- )
- case 4:
- return append(dst, '.',
- digits10[p1], digits01[p1],
- digits10[p2], digits01[p2],
- )
- case 5:
- return append(dst, '.',
- digits10[p1], digits01[p1],
- digits10[p2], digits01[p2],
- digits10[p3],
- )
- }
-}
-
-func formatBinaryDateTime(src []byte, length uint8) (driver.Value, error) {
- // length expects the deterministic length of the zero value,
- // negative time and 100+ hours are automatically added if needed
- if len(src) == 0 {
- return zeroDateTime[:length], nil
- }
- var dst []byte // return value
- var p1, p2, p3 byte // current digit pair
-
- switch length {
- case 10, 19, 21, 22, 23, 24, 25, 26:
- default:
- t := "DATE"
- if length > 10 {
- t += "TIME"
- }
- return nil, fmt.Errorf("illegal %s length %d", t, length)
- }
- switch len(src) {
- case 4, 7, 11:
- default:
- t := "DATE"
- if length > 10 {
- t += "TIME"
- }
- return nil, fmt.Errorf("illegal %s packet length %d", t, len(src))
- }
- dst = make([]byte, 0, length)
- // start with the date
- year := binary.LittleEndian.Uint16(src[:2])
- pt := year / 100
- p1 = byte(year - 100*uint16(pt))
- p2, p3 = src[2], src[3]
- dst = append(dst,
- digits10[pt], digits01[pt],
- digits10[p1], digits01[p1], '-',
- digits10[p2], digits01[p2], '-',
- digits10[p3], digits01[p3],
- )
- if length == 10 {
- return dst, nil
- }
- if len(src) == 4 {
- return append(dst, zeroDateTime[10:length]...), nil
- }
- dst = append(dst, ' ')
- p1 = src[4] // hour
- src = src[5:]
-
- // p1 is 2-digit hour, src is after hour
- p2, p3 = src[0], src[1]
- dst = append(dst,
- digits10[p1], digits01[p1], ':',
- digits10[p2], digits01[p2], ':',
- digits10[p3], digits01[p3],
- )
- return appendMicrosecs(dst, src[2:], int(length)-20), nil
-}
-
-func formatBinaryTime(src []byte, length uint8) (driver.Value, error) {
- // length expects the deterministic length of the zero value,
- // negative time and 100+ hours are automatically added if needed
- if len(src) == 0 {
- return zeroDateTime[11 : 11+length], nil
- }
- var dst []byte // return value
-
- switch length {
- case
- 8, // time (can be up to 10 when negative and 100+ hours)
- 10, 11, 12, 13, 14, 15: // time with fractional seconds
- default:
- return nil, fmt.Errorf("illegal TIME length %d", length)
- }
- switch len(src) {
- case 8, 12:
- default:
- return nil, fmt.Errorf("invalid TIME packet length %d", len(src))
- }
- // +2 to enable negative time and 100+ hours
- dst = make([]byte, 0, length+2)
- if src[0] == 1 {
- dst = append(dst, '-')
- }
- days := binary.LittleEndian.Uint32(src[1:5])
- hours := int64(days)*24 + int64(src[5])
-
- if hours >= 100 {
- dst = strconv.AppendInt(dst, hours, 10)
- } else {
- dst = append(dst, digits10[hours], digits01[hours])
- }
-
- min, sec := src[6], src[7]
- dst = append(dst, ':',
- digits10[min], digits01[min], ':',
- digits10[sec], digits01[sec],
- )
- return appendMicrosecs(dst, src[8:], int(length)-9), nil
-}
-
-/******************************************************************************
-* Convert from and to bytes *
-******************************************************************************/
-
-func uint64ToBytes(n uint64) []byte {
- return []byte{
- byte(n),
- byte(n >> 8),
- byte(n >> 16),
- byte(n >> 24),
- byte(n >> 32),
- byte(n >> 40),
- byte(n >> 48),
- byte(n >> 56),
- }
-}
-
-func uint64ToString(n uint64) []byte {
- var a [20]byte
- i := 20
-
- // U+0030 = 0
- // ...
- // U+0039 = 9
-
- var q uint64
- for n >= 10 {
- i--
- q = n / 10
- a[i] = uint8(n-q*10) + 0x30
- n = q
- }
-
- i--
- a[i] = uint8(n) + 0x30
-
- return a[i:]
-}
-
-// treats string value as unsigned integer representation
-func stringToInt(b []byte) int {
- val := 0
- for i := range b {
- val *= 10
- val += int(b[i] - 0x30)
- }
- return val
-}
-
-// returns the string read as a bytes slice, whether the value is NULL,
-// the number of bytes read and an error, in case the string is longer than
-// the input slice
-func readLengthEncodedString(b []byte) ([]byte, bool, int, error) {
- // Get length
- num, isNull, n := readLengthEncodedInteger(b)
- if num < 1 {
- return b[n:n], isNull, n, nil
- }
-
- n += int(num)
-
- // Check data length
- if len(b) >= n {
- return b[n-int(num) : n : n], false, n, nil
- }
- return nil, false, n, io.EOF
-}
-
-// returns the number of bytes skipped and an error, in case the string is
-// longer than the input slice
-func skipLengthEncodedString(b []byte) (int, error) {
- // Get length
- num, _, n := readLengthEncodedInteger(b)
- if num < 1 {
- return n, nil
- }
-
- n += int(num)
-
- // Check data length
- if len(b) >= n {
- return n, nil
- }
- return n, io.EOF
-}
-
-// returns the number read, whether the value is NULL and the number of bytes read
-func readLengthEncodedInteger(b []byte) (uint64, bool, int) {
- // See issue #349
- if len(b) == 0 {
- return 0, true, 1
- }
-
- switch b[0] {
- // 251: NULL
- case 0xfb:
- return 0, true, 1
-
- // 252: value of following 2
- case 0xfc:
- return uint64(b[1]) | uint64(b[2])<<8, false, 3
-
- // 253: value of following 3
- case 0xfd:
- return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4
-
- // 254: value of following 8
- case 0xfe:
- return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 |
- uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 |
- uint64(b[7])<<48 | uint64(b[8])<<56,
- false, 9
- }
-
- // 0-250: value of first byte
- return uint64(b[0]), false, 1
-}
-
-// encodes a uint64 value and appends it to the given bytes slice
-func appendLengthEncodedInteger(b []byte, n uint64) []byte {
- switch {
- case n <= 250:
- return append(b, byte(n))
-
- case n <= 0xffff:
- return append(b, 0xfc, byte(n), byte(n>>8))
-
- case n <= 0xffffff:
- return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16))
- }
- return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24),
- byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56))
-}
-
-func appendLengthEncodedString(b []byte, s string) []byte {
- b = appendLengthEncodedInteger(b, uint64(len(s)))
- return append(b, s...)
-}
-
-// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize.
-// If cap(buf) is not enough, reallocate new buffer.
-func reserveBuffer(buf []byte, appendSize int) []byte {
- newSize := len(buf) + appendSize
- if cap(buf) < newSize {
- // Grow buffer exponentially
- newBuf := make([]byte, len(buf)*2+appendSize)
- copy(newBuf, buf)
- buf = newBuf
- }
- return buf[:newSize]
-}
-
-// escapeBytesBackslash escapes []byte with backslashes (\)
-// This escapes the contents of a string (provided as []byte) by adding backslashes before special
-// characters, and turning others into specific escape sequences, such as
-// turning newlines into \n and null bytes into \0.
-// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932
-func escapeBytesBackslash(buf, v []byte) []byte {
- pos := len(buf)
- buf = reserveBuffer(buf, len(v)*2)
-
- for _, c := range v {
- switch c {
- case '\x00':
- buf[pos+1] = '0'
- buf[pos] = '\\'
- pos += 2
- case '\n':
- buf[pos+1] = 'n'
- buf[pos] = '\\'
- pos += 2
- case '\r':
- buf[pos+1] = 'r'
- buf[pos] = '\\'
- pos += 2
- case '\x1a':
- buf[pos+1] = 'Z'
- buf[pos] = '\\'
- pos += 2
- case '\'':
- buf[pos+1] = '\''
- buf[pos] = '\\'
- pos += 2
- case '"':
- buf[pos+1] = '"'
- buf[pos] = '\\'
- pos += 2
- case '\\':
- buf[pos+1] = '\\'
- buf[pos] = '\\'
- pos += 2
- default:
- buf[pos] = c
- pos++
- }
- }
-
- return buf[:pos]
-}
-
-// escapeStringBackslash is similar to escapeBytesBackslash but for string.
-func escapeStringBackslash(buf []byte, v string) []byte {
- pos := len(buf)
- buf = reserveBuffer(buf, len(v)*2)
-
- for i := 0; i < len(v); i++ {
- c := v[i]
- switch c {
- case '\x00':
- buf[pos+1] = '0'
- buf[pos] = '\\'
- pos += 2
- case '\n':
- buf[pos+1] = 'n'
- buf[pos] = '\\'
- pos += 2
- case '\r':
- buf[pos+1] = 'r'
- buf[pos] = '\\'
- pos += 2
- case '\x1a':
- buf[pos+1] = 'Z'
- buf[pos] = '\\'
- pos += 2
- case '\'':
- buf[pos+1] = '\''
- buf[pos] = '\\'
- pos += 2
- case '"':
- buf[pos+1] = '"'
- buf[pos] = '\\'
- pos += 2
- case '\\':
- buf[pos+1] = '\\'
- buf[pos] = '\\'
- pos += 2
- default:
- buf[pos] = c
- pos++
- }
- }
-
- return buf[:pos]
-}
-
-// escapeBytesQuotes escapes apostrophes in []byte by doubling them up.
-// This escapes the contents of a string by doubling up any apostrophes that
-// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in
-// effect on the server.
-// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038
-func escapeBytesQuotes(buf, v []byte) []byte {
- pos := len(buf)
- buf = reserveBuffer(buf, len(v)*2)
-
- for _, c := range v {
- if c == '\'' {
- buf[pos+1] = '\''
- buf[pos] = '\''
- pos += 2
- } else {
- buf[pos] = c
- pos++
- }
- }
-
- return buf[:pos]
-}
-
-// escapeStringQuotes is similar to escapeBytesQuotes but for string.
-func escapeStringQuotes(buf []byte, v string) []byte {
- pos := len(buf)
- buf = reserveBuffer(buf, len(v)*2)
-
- for i := 0; i < len(v); i++ {
- c := v[i]
- if c == '\'' {
- buf[pos+1] = '\''
- buf[pos] = '\''
- pos += 2
- } else {
- buf[pos] = c
- pos++
- }
- }
-
- return buf[:pos]
-}
-
-/******************************************************************************
-* Sync utils *
-******************************************************************************/
-
-// noCopy may be embedded into structs which must not be copied
-// after the first use.
-//
-// See https://github.com/golang/go/issues/8005#issuecomment-190753527
-// for details.
-type noCopy struct{}
-
-// Lock is a no-op used by -copylocks checker from `go vet`.
-func (*noCopy) Lock() {}
-
-// Unlock is a no-op used by -copylocks checker from `go vet`.
-// noCopy should implement sync.Locker from Go 1.11
-// https://github.com/golang/go/commit/c2eba53e7f80df21d51285879d51ab81bcfbf6bc
-// https://github.com/golang/go/issues/26165
-func (*noCopy) Unlock() {}
-
-// atomicError is a wrapper for atomically accessed error values
-type atomicError struct {
- _ noCopy
- value atomic.Value
-}
-
-// Set sets the error value regardless of the previous value.
-// The value must not be nil
-func (ae *atomicError) Set(value error) {
- ae.value.Store(value)
-}
-
-// Value returns the current error value
-func (ae *atomicError) Value() error {
- if v := ae.value.Load(); v != nil {
- // this will panic if the value doesn't implement the error interface
- return v.(error)
- }
- return nil
-}
-
-func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
- dargs := make([]driver.Value, len(named))
- for n, param := range named {
- if len(param.Name) > 0 {
- // TODO: support the use of Named Parameters #561
- return nil, errors.New("mysql: driver does not support the use of Named Parameters")
- }
- dargs[n] = param.Value
- }
- return dargs, nil
-}
-
-func mapIsolationLevel(level driver.IsolationLevel) (string, error) {
- switch sql.IsolationLevel(level) {
- case sql.LevelRepeatableRead:
- return "REPEATABLE READ", nil
- case sql.LevelReadCommitted:
- return "READ COMMITTED", nil
- case sql.LevelReadUncommitted:
- return "READ UNCOMMITTED", nil
- case sql.LevelSerializable:
- return "SERIALIZABLE", nil
- default:
- return "", fmt.Errorf("mysql: unsupported isolation level: %v", level)
- }
-}
diff --git a/vendor/github.com/gofrs/uuid/.gitignore b/vendor/github.com/gofrs/uuid/.gitignore
deleted file mode 100644
index 666dbbb5..00000000
--- a/vendor/github.com/gofrs/uuid/.gitignore
+++ /dev/null
@@ -1,15 +0,0 @@
-# Binaries for programs and plugins
-*.exe
-*.exe~
-*.dll
-*.so
-*.dylib
-
-# Test binary, build with `go test -c`
-*.test
-
-# Output of the go coverage tool, specifically when used with LiteIDE
-*.out
-
-# binary bundle generated by go-fuzz
-uuid-fuzz.zip
diff --git a/vendor/github.com/gofrs/uuid/LICENSE b/vendor/github.com/gofrs/uuid/LICENSE
deleted file mode 100644
index 926d5498..00000000
--- a/vendor/github.com/gofrs/uuid/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (C) 2013-2018 by Maxim Bublis
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/gofrs/uuid/README.md b/vendor/github.com/gofrs/uuid/README.md
deleted file mode 100644
index 4f73bec8..00000000
--- a/vendor/github.com/gofrs/uuid/README.md
+++ /dev/null
@@ -1,117 +0,0 @@
-# UUID
-
-[](https://github.com/gofrs/uuid/blob/master/LICENSE)
-[](https://travis-ci.org/gofrs/uuid)
-[](http://godoc.org/github.com/gofrs/uuid)
-[](https://codecov.io/gh/gofrs/uuid/)
-[](https://goreportcard.com/report/github.com/gofrs/uuid)
-
-Package uuid provides a pure Go implementation of Universally Unique Identifiers
-(UUID) variant as defined in RFC-4122. This package supports both the creation
-and parsing of UUIDs in different formats.
-
-This package supports the following UUID versions:
-* Version 1, based on timestamp and MAC address (RFC-4122)
-* Version 3, based on MD5 hashing of a named value (RFC-4122)
-* Version 4, based on random numbers (RFC-4122)
-* Version 5, based on SHA-1 hashing of a named value (RFC-4122)
-
-This package also supports experimental Universally Unique Identifier implementations based on a
-[draft RFC](https://www.ietf.org/archive/id/draft-peabody-dispatch-new-uuid-format-04.html) that updates RFC-4122
-* Version 6, a k-sortable id based on timestamp, and field-compatible with v1 (draft-peabody-dispatch-new-uuid-format, RFC-4122)
-* Version 7, a k-sortable id based on timestamp (draft-peabody-dispatch-new-uuid-format, RFC-4122)
-
-The v6 and v7 IDs are **not** considered a part of the stable API, and may be subject to behavior or API changes as part of minor releases
-to this package. They will be updated as the draft RFC changes, and will become stable if and when the draft RFC is accepted.
-
-## Project History
-
-This project was originally forked from the
-[github.com/satori/go.uuid](https://github.com/satori/go.uuid) repository after
-it appeared to be no longer maintained, while exhibiting [critical
-flaws](https://github.com/satori/go.uuid/issues/73). We have decided to take
-over this project to ensure it receives regular maintenance for the benefit of
-the larger Go community.
-
-We'd like to thank Maxim Bublis for his hard work on the original iteration of
-the package.
-
-## License
-
-This source code of this package is released under the MIT License. Please see
-the [LICENSE](https://github.com/gofrs/uuid/blob/master/LICENSE) for the full
-content of the license.
-
-## Recommended Package Version
-
-We recommend using v2.0.0+ of this package, as versions prior to 2.0.0 were
-created before our fork of the original package and have some known
-deficiencies.
-
-## Installation
-
-It is recommended to use a package manager like `dep` that understands tagged
-releases of a package, as well as semantic versioning.
-
-If you are unable to make use of a dependency manager with your project, you can
-use the `go get` command to download it directly:
-
-```Shell
-$ go get github.com/gofrs/uuid
-```
-
-## Requirements
-
-Due to subtests not being supported in older versions of Go, this package is
-only regularly tested against Go 1.7+. This package may work perfectly fine with
-Go 1.2+, but support for these older versions is not actively maintained.
-
-## Go 1.11 Modules
-
-As of v3.2.0, this repository no longer adopts Go modules, and v3.2.0 no longer has a `go.mod` file. As a result, v3.2.0 also drops support for the `github.com/gofrs/uuid/v3` import path. Only module-based consumers are impacted. With the v3.2.0 release, _all_ gofrs/uuid consumers should use the `github.com/gofrs/uuid` import path.
-
-An existing module-based consumer will continue to be able to build using the `github.com/gofrs/uuid/v3` import path using any valid consumer `go.mod` that worked prior to the publishing of v3.2.0, but any module-based consumer should start using the `github.com/gofrs/uuid` import path when possible and _must_ use the `github.com/gofrs/uuid` import path prior to upgrading to v3.2.0.
-
-Please refer to [Issue #61](https://github.com/gofrs/uuid/issues/61) and [Issue #66](https://github.com/gofrs/uuid/issues/66) for more details.
-
-## Usage
-
-Here is a quick overview of how to use this package. For more detailed
-documentation, please see the [GoDoc Page](http://godoc.org/github.com/gofrs/uuid).
-
-```go
-package main
-
-import (
- "log"
-
- "github.com/gofrs/uuid"
-)
-
-// Create a Version 4 UUID, panicking on error.
-// Use this form to initialize package-level variables.
-var u1 = uuid.Must(uuid.NewV4())
-
-func main() {
- // Create a Version 4 UUID.
- u2, err := uuid.NewV4()
- if err != nil {
- log.Fatalf("failed to generate UUID: %v", err)
- }
- log.Printf("generated Version 4 UUID %v", u2)
-
- // Parse a UUID from a string.
- s := "6ba7b810-9dad-11d1-80b4-00c04fd430c8"
- u3, err := uuid.FromString(s)
- if err != nil {
- log.Fatalf("failed to parse UUID %q: %v", s, err)
- }
- log.Printf("successfully parsed UUID %v", u3)
-}
-```
-
-## References
-
-* [RFC-4122](https://tools.ietf.org/html/rfc4122)
-* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01)
-* [New UUID Formats RFC Draft (Peabody) Rev 04](https://www.ietf.org/archive/id/draft-peabody-dispatch-new-uuid-format-04.html#)
diff --git a/vendor/github.com/gofrs/uuid/codec.go b/vendor/github.com/gofrs/uuid/codec.go
deleted file mode 100644
index 66502641..00000000
--- a/vendor/github.com/gofrs/uuid/codec.go
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright (C) 2013-2018 by Maxim Bublis
-//
-// Permission is hereby granted, free of charge, to any person obtaining
-// a copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to
-// permit persons to whom the Software is furnished to do so, subject to
-// the following conditions:
-//
-// The above copyright notice and this permission notice shall be
-// included in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-package uuid
-
-import (
- "errors"
- "fmt"
-)
-
-// FromBytes returns a UUID generated from the raw byte slice input.
-// It will return an error if the slice isn't 16 bytes long.
-func FromBytes(input []byte) (UUID, error) {
- u := UUID{}
- err := u.UnmarshalBinary(input)
- return u, err
-}
-
-// FromBytesOrNil returns a UUID generated from the raw byte slice input.
-// Same behavior as FromBytes(), but returns uuid.Nil instead of an error.
-func FromBytesOrNil(input []byte) UUID {
- uuid, err := FromBytes(input)
- if err != nil {
- return Nil
- }
- return uuid
-}
-
-var errInvalidFormat = errors.New("uuid: invalid UUID format")
-
-func fromHexChar(c byte) byte {
- switch {
- case '0' <= c && c <= '9':
- return c - '0'
- case 'a' <= c && c <= 'f':
- return c - 'a' + 10
- case 'A' <= c && c <= 'F':
- return c - 'A' + 10
- }
- return 255
-}
-
-// Parse parses the UUID stored in the string text. Parsing and supported
-// formats are the same as UnmarshalText.
-func (u *UUID) Parse(s string) error {
- switch len(s) {
- case 32: // hash
- case 36: // canonical
- case 34, 38:
- if s[0] != '{' || s[len(s)-1] != '}' {
- return fmt.Errorf("uuid: incorrect UUID format in string %q", s)
- }
- s = s[1 : len(s)-1]
- case 41, 45:
- if s[:9] != "urn:uuid:" {
- return fmt.Errorf("uuid: incorrect UUID format in string %q", s[:9])
- }
- s = s[9:]
- default:
- return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(s), s)
- }
- // canonical
- if len(s) == 36 {
- if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
- return fmt.Errorf("uuid: incorrect UUID format in string %q", s)
- }
- for i, x := range [16]byte{
- 0, 2, 4, 6,
- 9, 11,
- 14, 16,
- 19, 21,
- 24, 26, 28, 30, 32, 34,
- } {
- v1 := fromHexChar(s[x])
- v2 := fromHexChar(s[x+1])
- if v1|v2 == 255 {
- return errInvalidFormat
- }
- u[i] = (v1 << 4) | v2
- }
- return nil
- }
- // hash like
- for i := 0; i < 32; i += 2 {
- v1 := fromHexChar(s[i])
- v2 := fromHexChar(s[i+1])
- if v1|v2 == 255 {
- return errInvalidFormat
- }
- u[i/2] = (v1 << 4) | v2
- }
- return nil
-}
-
-// FromString returns a UUID parsed from the input string.
-// Input is expected in a form accepted by UnmarshalText.
-func FromString(text string) (UUID, error) {
- var u UUID
- err := u.Parse(text)
- return u, err
-}
-
-// FromStringOrNil returns a UUID parsed from the input string.
-// Same behavior as FromString(), but returns uuid.Nil instead of an error.
-func FromStringOrNil(input string) UUID {
- uuid, err := FromString(input)
- if err != nil {
- return Nil
- }
- return uuid
-}
-
-// MarshalText implements the encoding.TextMarshaler interface.
-// The encoding is the same as returned by the String() method.
-func (u UUID) MarshalText() ([]byte, error) {
- var buf [36]byte
- encodeCanonical(buf[:], u)
- return buf[:], nil
-}
-
-// UnmarshalText implements the encoding.TextUnmarshaler interface.
-// Following formats are supported:
-//
-// "6ba7b810-9dad-11d1-80b4-00c04fd430c8",
-// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
-// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
-// "6ba7b8109dad11d180b400c04fd430c8"
-// "{6ba7b8109dad11d180b400c04fd430c8}",
-// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8"
-//
-// ABNF for supported UUID text representation follows:
-//
-// URN := 'urn'
-// UUID-NID := 'uuid'
-//
-// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' |
-// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' |
-// 'A' | 'B' | 'C' | 'D' | 'E' | 'F'
-//
-// hexoct := hexdig hexdig
-// 2hexoct := hexoct hexoct
-// 4hexoct := 2hexoct 2hexoct
-// 6hexoct := 4hexoct 2hexoct
-// 12hexoct := 6hexoct 6hexoct
-//
-// hashlike := 12hexoct
-// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct
-//
-// plain := canonical | hashlike
-// uuid := canonical | hashlike | braced | urn
-//
-// braced := '{' plain '}' | '{' hashlike '}'
-// urn := URN ':' UUID-NID ':' plain
-func (u *UUID) UnmarshalText(b []byte) error {
- switch len(b) {
- case 32: // hash
- case 36: // canonical
- case 34, 38:
- if b[0] != '{' || b[len(b)-1] != '}' {
- return fmt.Errorf("uuid: incorrect UUID format in string %q", b)
- }
- b = b[1 : len(b)-1]
- case 41, 45:
- if string(b[:9]) != "urn:uuid:" {
- return fmt.Errorf("uuid: incorrect UUID format in string %q", b[:9])
- }
- b = b[9:]
- default:
- return fmt.Errorf("uuid: incorrect UUID length %d in string %q", len(b), b)
- }
- if len(b) == 36 {
- if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
- return fmt.Errorf("uuid: incorrect UUID format in string %q", b)
- }
- for i, x := range [16]byte{
- 0, 2, 4, 6,
- 9, 11,
- 14, 16,
- 19, 21,
- 24, 26, 28, 30, 32, 34,
- } {
- v1 := fromHexChar(b[x])
- v2 := fromHexChar(b[x+1])
- if v1|v2 == 255 {
- return errInvalidFormat
- }
- u[i] = (v1 << 4) | v2
- }
- return nil
- }
- for i := 0; i < 32; i += 2 {
- v1 := fromHexChar(b[i])
- v2 := fromHexChar(b[i+1])
- if v1|v2 == 255 {
- return errInvalidFormat
- }
- u[i/2] = (v1 << 4) | v2
- }
- return nil
-}
-
-// MarshalBinary implements the encoding.BinaryMarshaler interface.
-func (u UUID) MarshalBinary() ([]byte, error) {
- return u.Bytes(), nil
-}
-
-// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
-// It will return an error if the slice isn't 16 bytes long.
-func (u *UUID) UnmarshalBinary(data []byte) error {
- if len(data) != Size {
- return fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data))
- }
- copy(u[:], data)
-
- return nil
-}
diff --git a/vendor/github.com/gofrs/uuid/fuzz.go b/vendor/github.com/gofrs/uuid/fuzz.go
deleted file mode 100644
index ccf8d4ca..00000000
--- a/vendor/github.com/gofrs/uuid/fuzz.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright (c) 2018 Andrei Tudor Călin
-//
-// Permission is hereby granted, free of charge, to any person obtaining
-// a copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to
-// permit persons to whom the Software is furnished to do so, subject to
-// the following conditions:
-//
-// The above copyright notice and this permission notice shall be
-// included in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-//go:build gofuzz
-// +build gofuzz
-
-package uuid
-
-// Fuzz implements a simple fuzz test for FromString / UnmarshalText.
-//
-// To run:
-//
-// $ go get github.com/dvyukov/go-fuzz/...
-// $ cd $GOPATH/src/github.com/gofrs/uuid
-// $ go-fuzz-build github.com/gofrs/uuid
-// $ go-fuzz -bin=uuid-fuzz.zip -workdir=./testdata
-//
-// If you make significant changes to FromString / UnmarshalText and add
-// new cases to fromStringTests (in codec_test.go), please run
-//
-// $ go test -seed_fuzz_corpus
-//
-// to seed the corpus with the new interesting inputs, then run the fuzzer.
-func Fuzz(data []byte) int {
- _, err := FromString(string(data))
- if err != nil {
- return 0
- }
- return 1
-}
diff --git a/vendor/github.com/gofrs/uuid/generator.go b/vendor/github.com/gofrs/uuid/generator.go
deleted file mode 100644
index 44be9e15..00000000
--- a/vendor/github.com/gofrs/uuid/generator.go
+++ /dev/null
@@ -1,456 +0,0 @@
-// Copyright (C) 2013-2018 by Maxim Bublis
-//
-// Permission is hereby granted, free of charge, to any person obtaining
-// a copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to
-// permit persons to whom the Software is furnished to do so, subject to
-// the following conditions:
-//
-// The above copyright notice and this permission notice shall be
-// included in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-package uuid
-
-import (
- "crypto/md5"
- "crypto/rand"
- "crypto/sha1"
- "encoding/binary"
- "fmt"
- "hash"
- "io"
- "net"
- "sync"
- "time"
-)
-
-// Difference in 100-nanosecond intervals between
-// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970).
-const epochStart = 122192928000000000
-
-// EpochFunc is the function type used to provide the current time.
-type EpochFunc func() time.Time
-
-// HWAddrFunc is the function type used to provide hardware (MAC) addresses.
-type HWAddrFunc func() (net.HardwareAddr, error)
-
-// DefaultGenerator is the default UUID Generator used by this package.
-var DefaultGenerator Generator = NewGen()
-
-// NewV1 returns a UUID based on the current timestamp and MAC address.
-func NewV1() (UUID, error) {
- return DefaultGenerator.NewV1()
-}
-
-// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name.
-func NewV3(ns UUID, name string) UUID {
- return DefaultGenerator.NewV3(ns, name)
-}
-
-// NewV4 returns a randomly generated UUID.
-func NewV4() (UUID, error) {
- return DefaultGenerator.NewV4()
-}
-
-// NewV5 returns a UUID based on SHA-1 hash of the namespace UUID and name.
-func NewV5(ns UUID, name string) UUID {
- return DefaultGenerator.NewV5(ns, name)
-}
-
-// NewV6 returns a k-sortable UUID based on a timestamp and 48 bits of
-// pseudorandom data. The timestamp in a V6 UUID is the same as V1, with the bit
-// order being adjusted to allow the UUID to be k-sortable.
-//
-// This is implemented based on revision 03 of the Peabody UUID draft, and may
-// be subject to change pending further revisions. Until the final specification
-// revision is finished, changes required to implement updates to the spec will
-// not be considered a breaking change. They will happen as a minor version
-// releases until the spec is final.
-func NewV6() (UUID, error) {
- return DefaultGenerator.NewV6()
-}
-
-// NewV7 returns a k-sortable UUID based on the current millisecond precision
-// UNIX epoch and 74 bits of pseudorandom data. It supports single-node batch generation (multiple UUIDs in the same timestamp) with a Monotonic Random counter.
-//
-// This is implemented based on revision 04 of the Peabody UUID draft, and may
-// be subject to change pending further revisions. Until the final specification
-// revision is finished, changes required to implement updates to the spec will
-// not be considered a breaking change. They will happen as a minor version
-// releases until the spec is final.
-func NewV7() (UUID, error) {
- return DefaultGenerator.NewV7()
-}
-
-// Generator provides an interface for generating UUIDs.
-type Generator interface {
- NewV1() (UUID, error)
- NewV3(ns UUID, name string) UUID
- NewV4() (UUID, error)
- NewV5(ns UUID, name string) UUID
- NewV6() (UUID, error)
- NewV7() (UUID, error)
-}
-
-// Gen is a reference UUID generator based on the specifications laid out in
-// RFC-4122 and DCE 1.1: Authentication and Security Services. This type
-// satisfies the Generator interface as defined in this package.
-//
-// For consumers who are generating V1 UUIDs, but don't want to expose the MAC
-// address of the node generating the UUIDs, the NewGenWithHWAF() function has been
-// provided as a convenience. See the function's documentation for more info.
-//
-// The authors of this package do not feel that the majority of users will need
-// to obfuscate their MAC address, and so we recommend using NewGen() to create
-// a new generator.
-type Gen struct {
- clockSequenceOnce sync.Once
- hardwareAddrOnce sync.Once
- storageMutex sync.Mutex
-
- rand io.Reader
-
- epochFunc EpochFunc
- hwAddrFunc HWAddrFunc
- lastTime uint64
- clockSequence uint16
- hardwareAddr [6]byte
-}
-
-// GenOption is a function type that can be used to configure a Gen generator.
-type GenOption func(*Gen)
-
-// interface check -- build will fail if *Gen doesn't satisfy Generator
-var _ Generator = (*Gen)(nil)
-
-// NewGen returns a new instance of Gen with some default values set. Most
-// people should use this.
-func NewGen() *Gen {
- return NewGenWithHWAF(defaultHWAddrFunc)
-}
-
-// NewGenWithHWAF builds a new UUID generator with the HWAddrFunc provided. Most
-// consumers should use NewGen() instead.
-//
-// This is used so that consumers can generate their own MAC addresses, for use
-// in the generated UUIDs, if there is some concern about exposing the physical
-// address of the machine generating the UUID.
-//
-// The Gen generator will only invoke the HWAddrFunc once, and cache that MAC
-// address for all the future UUIDs generated by it. If you'd like to switch the
-// MAC address being used, you'll need to create a new generator using this
-// function.
-func NewGenWithHWAF(hwaf HWAddrFunc) *Gen {
- return NewGenWithOptions(WithHWAddrFunc(hwaf))
-}
-
-// NewGenWithOptions returns a new instance of Gen with the options provided.
-// Most people should use NewGen() or NewGenWithHWAF() instead.
-//
-// To customize the generator, you can pass in one or more GenOption functions.
-// For example:
-//
-// gen := NewGenWithOptions(
-// WithHWAddrFunc(myHWAddrFunc),
-// WithEpochFunc(myEpochFunc),
-// WithRandomReader(myRandomReader),
-// )
-//
-// NewGenWithOptions(WithHWAddrFunc(myHWAddrFunc)) is equivalent to calling
-// NewGenWithHWAF(myHWAddrFunc)
-// NewGenWithOptions() is equivalent to calling NewGen()
-func NewGenWithOptions(opts ...GenOption) *Gen {
- gen := &Gen{
- epochFunc: time.Now,
- hwAddrFunc: defaultHWAddrFunc,
- rand: rand.Reader,
- }
-
- for _, opt := range opts {
- opt(gen)
- }
-
- return gen
-}
-
-// WithHWAddrFunc is a GenOption that allows you to provide your own HWAddrFunc
-// function.
-// When this option is nil, the defaultHWAddrFunc is used.
-func WithHWAddrFunc(hwaf HWAddrFunc) GenOption {
- return func(gen *Gen) {
- if hwaf == nil {
- hwaf = defaultHWAddrFunc
- }
-
- gen.hwAddrFunc = hwaf
- }
-}
-
-// WithEpochFunc is a GenOption that allows you to provide your own EpochFunc
-// function.
-// When this option is nil, time.Now is used.
-func WithEpochFunc(epochf EpochFunc) GenOption {
- return func(gen *Gen) {
- if epochf == nil {
- epochf = time.Now
- }
-
- gen.epochFunc = epochf
- }
-}
-
-// WithRandomReader is a GenOption that allows you to provide your own random
-// reader.
-// When this option is nil, the default rand.Reader is used.
-func WithRandomReader(reader io.Reader) GenOption {
- return func(gen *Gen) {
- if reader == nil {
- reader = rand.Reader
- }
-
- gen.rand = reader
- }
-}
-
-// NewV1 returns a UUID based on the current timestamp and MAC address.
-func (g *Gen) NewV1() (UUID, error) {
- u := UUID{}
-
- timeNow, clockSeq, err := g.getClockSequence(false)
- if err != nil {
- return Nil, err
- }
- binary.BigEndian.PutUint32(u[0:], uint32(timeNow))
- binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
- binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
- binary.BigEndian.PutUint16(u[8:], clockSeq)
-
- hardwareAddr, err := g.getHardwareAddr()
- if err != nil {
- return Nil, err
- }
- copy(u[10:], hardwareAddr)
-
- u.SetVersion(V1)
- u.SetVariant(VariantRFC4122)
-
- return u, nil
-}
-
-// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name.
-func (g *Gen) NewV3(ns UUID, name string) UUID {
- u := newFromHash(md5.New(), ns, name)
- u.SetVersion(V3)
- u.SetVariant(VariantRFC4122)
-
- return u
-}
-
-// NewV4 returns a randomly generated UUID.
-func (g *Gen) NewV4() (UUID, error) {
- u := UUID{}
- if _, err := io.ReadFull(g.rand, u[:]); err != nil {
- return Nil, err
- }
- u.SetVersion(V4)
- u.SetVariant(VariantRFC4122)
-
- return u, nil
-}
-
-// NewV5 returns a UUID based on SHA-1 hash of the namespace UUID and name.
-func (g *Gen) NewV5(ns UUID, name string) UUID {
- u := newFromHash(sha1.New(), ns, name)
- u.SetVersion(V5)
- u.SetVariant(VariantRFC4122)
-
- return u
-}
-
-// NewV6 returns a k-sortable UUID based on a timestamp and 48 bits of
-// pseudorandom data. The timestamp in a V6 UUID is the same as V1, with the bit
-// order being adjusted to allow the UUID to be k-sortable.
-//
-// This is implemented based on revision 03 of the Peabody UUID draft, and may
-// be subject to change pending further revisions. Until the final specification
-// revision is finished, changes required to implement updates to the spec will
-// not be considered a breaking change. They will happen as a minor version
-// releases until the spec is final.
-func (g *Gen) NewV6() (UUID, error) {
- var u UUID
-
- if _, err := io.ReadFull(g.rand, u[10:]); err != nil {
- return Nil, err
- }
-
- timeNow, clockSeq, err := g.getClockSequence(false)
- if err != nil {
- return Nil, err
- }
-
- binary.BigEndian.PutUint32(u[0:], uint32(timeNow>>28)) // set time_high
- binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>12)) // set time_mid
- binary.BigEndian.PutUint16(u[6:], uint16(timeNow&0xfff)) // set time_low (minus four version bits)
- binary.BigEndian.PutUint16(u[8:], clockSeq&0x3fff) // set clk_seq_hi_res (minus two variant bits)
-
- u.SetVersion(V6)
- u.SetVariant(VariantRFC4122)
-
- return u, nil
-}
-
-// getClockSequence returns the epoch and clock sequence for V1,V6 and V7 UUIDs.
-//
-// When useUnixTSMs is false, it uses the Coordinated Universal Time (UTC) as a count of 100-
-//
-// nanosecond intervals since 00:00:00.00, 15 October 1582 (the date of Gregorian reform to the Christian calendar).
-func (g *Gen) getClockSequence(useUnixTSMs bool) (uint64, uint16, error) {
- var err error
- g.clockSequenceOnce.Do(func() {
- buf := make([]byte, 2)
- if _, err = io.ReadFull(g.rand, buf); err != nil {
- return
- }
- g.clockSequence = binary.BigEndian.Uint16(buf)
- })
- if err != nil {
- return 0, 0, err
- }
-
- g.storageMutex.Lock()
- defer g.storageMutex.Unlock()
-
- var timeNow uint64
- if useUnixTSMs {
- timeNow = uint64(g.epochFunc().UnixMilli())
- } else {
- timeNow = g.getEpoch()
- }
- // Clock didn't change since last UUID generation.
- // Should increase clock sequence.
- if timeNow <= g.lastTime {
- g.clockSequence++
- }
- g.lastTime = timeNow
-
- return timeNow, g.clockSequence, nil
-}
-
-// NewV7 returns a k-sortable UUID based on the current millisecond precision
-// UNIX epoch and 74 bits of pseudorandom data.
-//
-// This is implemented based on revision 04 of the Peabody UUID draft, and may
-// be subject to change pending further revisions. Until the final specification
-// revision is finished, changes required to implement updates to the spec will
-// not be considered a breaking change. They will happen as a minor version
-// releases until the spec is final.
-func (g *Gen) NewV7() (UUID, error) {
- var u UUID
- /* https://www.ietf.org/archive/id/draft-peabody-dispatch-new-uuid-format-04.html#name-uuid-version-7
- 0 1 2 3
- 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | unix_ts_ms |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | unix_ts_ms | ver | rand_a |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- |var| rand_b |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | rand_b |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */
-
- ms, clockSeq, err := g.getClockSequence(true)
- if err != nil {
- return Nil, err
- }
- //UUIDv7 features a 48 bit timestamp. First 32bit (4bytes) represents seconds since 1970, followed by 2 bytes for the ms granularity.
- u[0] = byte(ms >> 40) //1-6 bytes: big-endian unsigned number of Unix epoch timestamp
- u[1] = byte(ms >> 32)
- u[2] = byte(ms >> 24)
- u[3] = byte(ms >> 16)
- u[4] = byte(ms >> 8)
- u[5] = byte(ms)
-
- //support batching by using a monotonic pseudo-random sequence
- //The 6th byte contains the version and partially rand_a data.
- //We will lose the most significant bites from the clockSeq (with SetVersion), but it is ok, we need the least significant that contains the counter to ensure the monotonic property
- binary.BigEndian.PutUint16(u[6:8], clockSeq) // set rand_a with clock seq which is random and monotonic
-
- //override first 4bits of u[6].
- u.SetVersion(V7)
-
- //set rand_b 64bits of pseudo-random bits (first 2 will be overridden)
- if _, err = io.ReadFull(g.rand, u[8:16]); err != nil {
- return Nil, err
- }
- //override first 2 bits of byte[8] for the variant
- u.SetVariant(VariantRFC4122)
-
- return u, nil
-}
-
-// Returns the hardware address.
-func (g *Gen) getHardwareAddr() ([]byte, error) {
- var err error
- g.hardwareAddrOnce.Do(func() {
- var hwAddr net.HardwareAddr
- if hwAddr, err = g.hwAddrFunc(); err == nil {
- copy(g.hardwareAddr[:], hwAddr)
- return
- }
-
- // Initialize hardwareAddr randomly in case
- // of real network interfaces absence.
- if _, err = io.ReadFull(g.rand, g.hardwareAddr[:]); err != nil {
- return
- }
- // Set multicast bit as recommended by RFC-4122
- g.hardwareAddr[0] |= 0x01
- })
- if err != nil {
- return []byte{}, err
- }
- return g.hardwareAddr[:], nil
-}
-
-// Returns the difference between UUID epoch (October 15, 1582)
-// and current time in 100-nanosecond intervals.
-func (g *Gen) getEpoch() uint64 {
- return epochStart + uint64(g.epochFunc().UnixNano()/100)
-}
-
-// Returns the UUID based on the hashing of the namespace UUID and name.
-func newFromHash(h hash.Hash, ns UUID, name string) UUID {
- u := UUID{}
- h.Write(ns[:])
- h.Write([]byte(name))
- copy(u[:], h.Sum(nil))
-
- return u
-}
-
-var netInterfaces = net.Interfaces
-
-// Returns the hardware address.
-func defaultHWAddrFunc() (net.HardwareAddr, error) {
- ifaces, err := netInterfaces()
- if err != nil {
- return []byte{}, err
- }
- for _, iface := range ifaces {
- if len(iface.HardwareAddr) >= 6 {
- return iface.HardwareAddr, nil
- }
- }
- return []byte{}, fmt.Errorf("uuid: no HW address found")
-}
diff --git a/vendor/github.com/gofrs/uuid/sql.go b/vendor/github.com/gofrs/uuid/sql.go
deleted file mode 100644
index 01d5d884..00000000
--- a/vendor/github.com/gofrs/uuid/sql.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright (C) 2013-2018 by Maxim Bublis
-//
-// Permission is hereby granted, free of charge, to any person obtaining
-// a copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to
-// permit persons to whom the Software is furnished to do so, subject to
-// the following conditions:
-//
-// The above copyright notice and this permission notice shall be
-// included in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-package uuid
-
-import (
- "database/sql"
- "database/sql/driver"
- "fmt"
-)
-
-var _ driver.Valuer = UUID{}
-var _ sql.Scanner = (*UUID)(nil)
-
-// Value implements the driver.Valuer interface.
-func (u UUID) Value() (driver.Value, error) {
- return u.String(), nil
-}
-
-// Scan implements the sql.Scanner interface.
-// A 16-byte slice will be handled by UnmarshalBinary, while
-// a longer byte slice or a string will be handled by UnmarshalText.
-func (u *UUID) Scan(src interface{}) error {
- switch src := src.(type) {
- case UUID: // support gorm convert from UUID to NullUUID
- *u = src
- return nil
-
- case []byte:
- if len(src) == Size {
- return u.UnmarshalBinary(src)
- }
- return u.UnmarshalText(src)
-
- case string:
- uu, err := FromString(src)
- *u = uu
- return err
- }
-
- return fmt.Errorf("uuid: cannot convert %T to UUID", src)
-}
-
-// NullUUID can be used with the standard sql package to represent a
-// UUID value that can be NULL in the database.
-type NullUUID struct {
- UUID UUID
- Valid bool
-}
-
-// Value implements the driver.Valuer interface.
-func (u NullUUID) Value() (driver.Value, error) {
- if !u.Valid {
- return nil, nil
- }
- // Delegate to UUID Value function
- return u.UUID.Value()
-}
-
-// Scan implements the sql.Scanner interface.
-func (u *NullUUID) Scan(src interface{}) error {
- if src == nil {
- u.UUID, u.Valid = Nil, false
- return nil
- }
-
- // Delegate to UUID Scan function
- u.Valid = true
- return u.UUID.Scan(src)
-}
-
-var nullJSON = []byte("null")
-
-// MarshalJSON marshals the NullUUID as null or the nested UUID
-func (u NullUUID) MarshalJSON() ([]byte, error) {
- if !u.Valid {
- return nullJSON, nil
- }
- var buf [38]byte
- buf[0] = '"'
- encodeCanonical(buf[1:37], u.UUID)
- buf[37] = '"'
- return buf[:], nil
-}
-
-// UnmarshalJSON unmarshals a NullUUID
-func (u *NullUUID) UnmarshalJSON(b []byte) error {
- if string(b) == "null" {
- u.UUID, u.Valid = Nil, false
- return nil
- }
- if n := len(b); n >= 2 && b[0] == '"' {
- b = b[1 : n-1]
- }
- err := u.UUID.UnmarshalText(b)
- u.Valid = (err == nil)
- return err
-}
diff --git a/vendor/github.com/gofrs/uuid/uuid.go b/vendor/github.com/gofrs/uuid/uuid.go
deleted file mode 100644
index 5320fb53..00000000
--- a/vendor/github.com/gofrs/uuid/uuid.go
+++ /dev/null
@@ -1,285 +0,0 @@
-// Copyright (C) 2013-2018 by Maxim Bublis
-//
-// Permission is hereby granted, free of charge, to any person obtaining
-// a copy of this software and associated documentation files (the
-// "Software"), to deal in the Software without restriction, including
-// without limitation the rights to use, copy, modify, merge, publish,
-// distribute, sublicense, and/or sell copies of the Software, and to
-// permit persons to whom the Software is furnished to do so, subject to
-// the following conditions:
-//
-// The above copyright notice and this permission notice shall be
-// included in all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-// Package uuid provides implementations of the Universally Unique Identifier
-// (UUID), as specified in RFC-4122 and the Peabody RFC Draft (revision 03).
-//
-// RFC-4122[1] provides the specification for versions 1, 3, 4, and 5. The
-// Peabody UUID RFC Draft[2] provides the specification for the new k-sortable
-// UUIDs, versions 6 and 7.
-//
-// DCE 1.1[3] provides the specification for version 2, but version 2 support
-// was removed from this package in v4 due to some concerns with the
-// specification itself. Reading the spec, it seems that it would result in
-// generating UUIDs that aren't very unique. In having read the spec it seemed
-// that our implementation did not meet the spec. It also seems to be at-odds
-// with RFC 4122, meaning we would need quite a bit of special code to support
-// it. Lastly, there were no Version 2 implementations that we could find to
-// ensure we were understanding the specification correctly.
-//
-// [1] https://tools.ietf.org/html/rfc4122
-// [2] https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03
-// [3] http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01
-package uuid
-
-import (
- "encoding/binary"
- "encoding/hex"
- "fmt"
- "time"
-)
-
-// Size of a UUID in bytes.
-const Size = 16
-
-// UUID is an array type to represent the value of a UUID, as defined in RFC-4122.
-type UUID [Size]byte
-
-// UUID versions.
-const (
- _ byte = iota
- V1 // Version 1 (date-time and MAC address)
- _ // Version 2 (date-time and MAC address, DCE security version) [removed]
- V3 // Version 3 (namespace name-based)
- V4 // Version 4 (random)
- V5 // Version 5 (namespace name-based)
- V6 // Version 6 (k-sortable timestamp and random data, field-compatible with v1) [peabody draft]
- V7 // Version 7 (k-sortable timestamp and random data) [peabody draft]
- _ // Version 8 (k-sortable timestamp, meant for custom implementations) [peabody draft] [not implemented]
-)
-
-// UUID layout variants.
-const (
- VariantNCS byte = iota
- VariantRFC4122
- VariantMicrosoft
- VariantFuture
-)
-
-// UUID DCE domains.
-const (
- DomainPerson = iota
- DomainGroup
- DomainOrg
-)
-
-// Timestamp is the count of 100-nanosecond intervals since 00:00:00.00,
-// 15 October 1582 within a V1 UUID. This type has no meaning for other
-// UUID versions since they don't have an embedded timestamp.
-type Timestamp uint64
-
-const _100nsPerSecond = 10000000
-
-// Time returns the UTC time.Time representation of a Timestamp
-func (t Timestamp) Time() (time.Time, error) {
- secs := uint64(t) / _100nsPerSecond
- nsecs := 100 * (uint64(t) % _100nsPerSecond)
-
- return time.Unix(int64(secs)-(epochStart/_100nsPerSecond), int64(nsecs)), nil
-}
-
-// TimestampFromV1 returns the Timestamp embedded within a V1 UUID.
-// Returns an error if the UUID is any version other than 1.
-func TimestampFromV1(u UUID) (Timestamp, error) {
- if u.Version() != 1 {
- err := fmt.Errorf("uuid: %s is version %d, not version 1", u, u.Version())
- return 0, err
- }
-
- low := binary.BigEndian.Uint32(u[0:4])
- mid := binary.BigEndian.Uint16(u[4:6])
- hi := binary.BigEndian.Uint16(u[6:8]) & 0xfff
-
- return Timestamp(uint64(low) + (uint64(mid) << 32) + (uint64(hi) << 48)), nil
-}
-
-// TimestampFromV6 returns the Timestamp embedded within a V6 UUID. This
-// function returns an error if the UUID is any version other than 6.
-//
-// This is implemented based on revision 03 of the Peabody UUID draft, and may
-// be subject to change pending further revisions. Until the final specification
-// revision is finished, changes required to implement updates to the spec will
-// not be considered a breaking change. They will happen as a minor version
-// releases until the spec is final.
-func TimestampFromV6(u UUID) (Timestamp, error) {
- if u.Version() != 6 {
- return 0, fmt.Errorf("uuid: %s is version %d, not version 6", u, u.Version())
- }
-
- hi := binary.BigEndian.Uint32(u[0:4])
- mid := binary.BigEndian.Uint16(u[4:6])
- low := binary.BigEndian.Uint16(u[6:8]) & 0xfff
-
- return Timestamp(uint64(low) + (uint64(mid) << 12) + (uint64(hi) << 28)), nil
-}
-
-// Nil is the nil UUID, as specified in RFC-4122, that has all 128 bits set to
-// zero.
-var Nil = UUID{}
-
-// Predefined namespace UUIDs.
-var (
- NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
- NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
- NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
- NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
-)
-
-// IsNil returns if the UUID is equal to the nil UUID
-func (u UUID) IsNil() bool {
- return u == Nil
-}
-
-// Version returns the algorithm version used to generate the UUID.
-func (u UUID) Version() byte {
- return u[6] >> 4
-}
-
-// Variant returns the UUID layout variant.
-func (u UUID) Variant() byte {
- switch {
- case (u[8] >> 7) == 0x00:
- return VariantNCS
- case (u[8] >> 6) == 0x02:
- return VariantRFC4122
- case (u[8] >> 5) == 0x06:
- return VariantMicrosoft
- case (u[8] >> 5) == 0x07:
- fallthrough
- default:
- return VariantFuture
- }
-}
-
-// Bytes returns a byte slice representation of the UUID.
-func (u UUID) Bytes() []byte {
- return u[:]
-}
-
-// encodeCanonical encodes the canonical RFC-4122 form of UUID u into the
-// first 36 bytes dst.
-func encodeCanonical(dst []byte, u UUID) {
- const hextable = "0123456789abcdef"
- dst[8] = '-'
- dst[13] = '-'
- dst[18] = '-'
- dst[23] = '-'
- for i, x := range [16]byte{
- 0, 2, 4, 6,
- 9, 11,
- 14, 16,
- 19, 21,
- 24, 26, 28, 30, 32, 34,
- } {
- c := u[i]
- dst[x] = hextable[c>>4]
- dst[x+1] = hextable[c&0x0f]
- }
-}
-
-// String returns a canonical RFC-4122 string representation of the UUID:
-// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.
-func (u UUID) String() string {
- var buf [36]byte
- encodeCanonical(buf[:], u)
- return string(buf[:])
-}
-
-// Format implements fmt.Formatter for UUID values.
-//
-// The behavior is as follows:
-// The 'x' and 'X' verbs output only the hex digits of the UUID, using a-f for 'x' and A-F for 'X'.
-// The 'v', '+v', 's' and 'q' verbs return the canonical RFC-4122 string representation.
-// The 'S' verb returns the RFC-4122 format, but with capital hex digits.
-// The '#v' verb returns the "Go syntax" representation, which is a 16 byte array initializer.
-// All other verbs not handled directly by the fmt package (like '%p') are unsupported and will return
-// "%!verb(uuid.UUID=value)" as recommended by the fmt package.
-func (u UUID) Format(f fmt.State, c rune) {
- if c == 'v' && f.Flag('#') {
- fmt.Fprintf(f, "%#v", [Size]byte(u))
- return
- }
- switch c {
- case 'x', 'X':
- b := make([]byte, 32)
- hex.Encode(b, u[:])
- if c == 'X' {
- toUpperHex(b)
- }
- _, _ = f.Write(b)
- case 'v', 's', 'S':
- b, _ := u.MarshalText()
- if c == 'S' {
- toUpperHex(b)
- }
- _, _ = f.Write(b)
- case 'q':
- b := make([]byte, 38)
- b[0] = '"'
- encodeCanonical(b[1:], u)
- b[37] = '"'
- _, _ = f.Write(b)
- default:
- // invalid/unsupported format verb
- fmt.Fprintf(f, "%%!%c(uuid.UUID=%s)", c, u.String())
- }
-}
-
-func toUpperHex(b []byte) {
- for i, c := range b {
- if 'a' <= c && c <= 'f' {
- b[i] = c - ('a' - 'A')
- }
- }
-}
-
-// SetVersion sets the version bits.
-func (u *UUID) SetVersion(v byte) {
- u[6] = (u[6] & 0x0f) | (v << 4)
-}
-
-// SetVariant sets the variant bits.
-func (u *UUID) SetVariant(v byte) {
- switch v {
- case VariantNCS:
- u[8] = (u[8]&(0xff>>1) | (0x00 << 7))
- case VariantRFC4122:
- u[8] = (u[8]&(0xff>>2) | (0x02 << 6))
- case VariantMicrosoft:
- u[8] = (u[8]&(0xff>>3) | (0x06 << 5))
- case VariantFuture:
- fallthrough
- default:
- u[8] = (u[8]&(0xff>>3) | (0x07 << 5))
- }
-}
-
-// Must is a helper that wraps a call to a function returning (UUID, error)
-// and panics if the error is non-nil. It is intended for use in variable
-// initializations such as
-//
-// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000"))
-func Must(u UUID, err error) UUID {
- if err != nil {
- panic(err)
- }
- return u
-}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/.gitignore b/vendor/github.com/golang-jwt/jwt/v5/.gitignore
deleted file mode 100644
index 09573e01..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-.DS_Store
-bin
-.idea/
-
diff --git a/vendor/github.com/golang-jwt/jwt/v5/LICENSE b/vendor/github.com/golang-jwt/jwt/v5/LICENSE
deleted file mode 100644
index 35dbc252..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/LICENSE
+++ /dev/null
@@ -1,9 +0,0 @@
-Copyright (c) 2012 Dave Grijalva
-Copyright (c) 2021 golang-jwt maintainers
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
diff --git a/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md
deleted file mode 100644
index ff9c57e1..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md
+++ /dev/null
@@ -1,195 +0,0 @@
-# Migration Guide (v5.0.0)
-
-Version `v5` contains a major rework of core functionalities in the `jwt-go`
-library. This includes support for several validation options as well as a
-re-design of the `Claims` interface. Lastly, we reworked how errors work under
-the hood, which should provide a better overall developer experience.
-
-Starting from [v5.0.0](https://github.com/golang-jwt/jwt/releases/tag/v5.0.0),
-the import path will be:
-
- "github.com/golang-jwt/jwt/v5"
-
-For most users, changing the import path *should* suffice. However, since we
-intentionally changed and cleaned some of the public API, existing programs
-might need to be updated. The following sections describe significant changes
-and corresponding updates for existing programs.
-
-## Parsing and Validation Options
-
-Under the hood, a new `Validator` struct takes care of validating the claims. A
-long awaited feature has been the option to fine-tune the validation of tokens.
-This is now possible with several `ParserOption` functions that can be appended
-to most `Parse` functions, such as `ParseWithClaims`. The most important options
-and changes are:
- * Added `WithLeeway` to support specifying the leeway that is allowed when
- validating time-based claims, such as `exp` or `nbf`.
- * Changed default behavior to not check the `iat` claim. Usage of this claim
- is OPTIONAL according to the JWT RFC. The claim itself is also purely
- informational according to the RFC, so a strict validation failure is not
- recommended. If you want to check for sensible values in these claims,
- please use the `WithIssuedAt` parser option.
- * Added `WithAudience`, `WithSubject` and `WithIssuer` to support checking for
- expected `aud`, `sub` and `iss`.
- * Added `WithStrictDecoding` and `WithPaddingAllowed` options to allow
- previously global settings to enable base64 strict encoding and the parsing
- of base64 strings with padding. The latter is strictly speaking against the
- standard, but unfortunately some of the major identity providers issue some
- of these incorrect tokens. Both options are disabled by default.
-
-## Changes to the `Claims` interface
-
-### Complete Restructuring
-
-Previously, the claims interface was satisfied with an implementation of a
-`Valid() error` function. This had several issues:
- * The different claim types (struct claims, map claims, etc.) then contained
- similar (but not 100 % identical) code of how this validation was done. This
- lead to a lot of (almost) duplicate code and was hard to maintain
- * It was not really semantically close to what a "claim" (or a set of claims)
- really is; which is a list of defined key/value pairs with a certain
- semantic meaning.
-
-Since all the validation functionality is now extracted into the validator, all
-`VerifyXXX` and `Valid` functions have been removed from the `Claims` interface.
-Instead, the interface now represents a list of getters to retrieve values with
-a specific meaning. This allows us to completely decouple the validation logic
-with the underlying storage representation of the claim, which could be a
-struct, a map or even something stored in a database.
-
-```go
-type Claims interface {
- GetExpirationTime() (*NumericDate, error)
- GetIssuedAt() (*NumericDate, error)
- GetNotBefore() (*NumericDate, error)
- GetIssuer() (string, error)
- GetSubject() (string, error)
- GetAudience() (ClaimStrings, error)
-}
-```
-
-Users that previously directly called the `Valid` function on their claims,
-e.g., to perform validation independently of parsing/verifying a token, can now
-use the `jwt.NewValidator` function to create a `Validator` independently of the
-`Parser`.
-
-```go
-var v = jwt.NewValidator(jwt.WithLeeway(5*time.Second))
-v.Validate(myClaims)
-```
-
-### Supported Claim Types and Removal of `StandardClaims`
-
-The two standard claim types supported by this library, `MapClaims` and
-`RegisteredClaims` both implement the necessary functions of this interface. The
-old `StandardClaims` struct, which has already been deprecated in `v4` is now
-removed.
-
-Users using custom claims, in most cases, will not experience any changes in the
-behavior as long as they embedded `RegisteredClaims`. If they created a new
-claim type from scratch, they now need to implemented the proper getter
-functions.
-
-### Migrating Application Specific Logic of the old `Valid`
-
-Previously, users could override the `Valid` method in a custom claim, for
-example to extend the validation with application-specific claims. However, this
-was always very dangerous, since once could easily disable the standard
-validation and signature checking.
-
-In order to avoid that, while still supporting the use-case, a new
-`ClaimsValidator` interface has been introduced. This interface consists of the
-`Validate() error` function. If the validator sees, that a `Claims` struct
-implements this interface, the errors returned to the `Validate` function will
-be *appended* to the regular standard validation. It is not possible to disable
-the standard validation anymore (even only by accident).
-
-Usage examples can be found in [example_test.go](./example_test.go), to build
-claims structs like the following.
-
-```go
-// MyCustomClaims includes all registered claims, plus Foo.
-type MyCustomClaims struct {
- Foo string `json:"foo"`
- jwt.RegisteredClaims
-}
-
-// Validate can be used to execute additional application-specific claims
-// validation.
-func (m MyCustomClaims) Validate() error {
- if m.Foo != "bar" {
- return errors.New("must be foobar")
- }
-
- return nil
-}
-```
-
-## Changes to the `Token` and `Parser` struct
-
-The previously global functions `DecodeSegment` and `EncodeSegment` were moved
-to the `Parser` and `Token` struct respectively. This will allow us in the
-future to configure the behavior of these two based on options supplied on the
-parser or the token (creation). This also removes two previously global
-variables and moves them to parser options `WithStrictDecoding` and
-`WithPaddingAllowed`.
-
-In order to do that, we had to adjust the way signing methods work. Previously
-they were given a base64 encoded signature in `Verify` and were expected to
-return a base64 encoded version of the signature in `Sign`, both as a `string`.
-However, this made it necessary to have `DecodeSegment` and `EncodeSegment`
-global and was a less than perfect design because we were repeating
-encoding/decoding steps for all signing methods. Now, `Sign` and `Verify`
-operate on a decoded signature as a `[]byte`, which feels more natural for a
-cryptographic operation anyway. Lastly, `Parse` and `SignedString` take care of
-the final encoding/decoding part.
-
-In addition to that, we also changed the `Signature` field on `Token` from a
-`string` to `[]byte` and this is also now populated with the decoded form. This
-is also more consistent, because the other parts of the JWT, mainly `Header` and
-`Claims` were already stored in decoded form in `Token`. Only the signature was
-stored in base64 encoded form, which was redundant with the information in the
-`Raw` field, which contains the complete token as base64.
-
-```go
-type Token struct {
- Raw string // Raw contains the raw token
- Method SigningMethod // Method is the signing method used or to be used
- Header map[string]interface{} // Header is the first segment of the token in decoded form
- Claims Claims // Claims is the second segment of the token in decoded form
- Signature []byte // Signature is the third segment of the token in decoded form
- Valid bool // Valid specifies if the token is valid
-}
-```
-
-Most (if not all) of these changes should not impact the normal usage of this
-library. Only users directly accessing the `Signature` field as well as
-developers of custom signing methods should be affected.
-
-# Migration Guide (v4.0.0)
-
-Starting from [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0),
-the import path will be:
-
- "github.com/golang-jwt/jwt/v4"
-
-The `/v4` version will be backwards compatible with existing `v3.x.y` tags in
-this repo, as well as `github.com/dgrijalva/jwt-go`. For most users this should
-be a drop-in replacement, if you're having troubles migrating, please open an
-issue.
-
-You can replace all occurrences of `github.com/dgrijalva/jwt-go` or
-`github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v4`, either manually
-or by using tools such as `sed` or `gofmt`.
-
-And then you'd typically run:
-
-```
-go get github.com/golang-jwt/jwt/v4
-go mod tidy
-```
-
-# Older releases (before v3.2.0)
-
-The original migration guide for older releases can be found at
-https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md.
diff --git a/vendor/github.com/golang-jwt/jwt/v5/README.md b/vendor/github.com/golang-jwt/jwt/v5/README.md
deleted file mode 100644
index 964598a3..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/README.md
+++ /dev/null
@@ -1,167 +0,0 @@
-# jwt-go
-
-[](https://github.com/golang-jwt/jwt/actions/workflows/build.yml)
-[](https://pkg.go.dev/github.com/golang-jwt/jwt/v5)
-[](https://coveralls.io/github/golang-jwt/jwt?branch=main)
-
-A [go](http://www.golang.org) (or 'golang' for search engine friendliness)
-implementation of [JSON Web
-Tokens](https://datatracker.ietf.org/doc/html/rfc7519).
-
-Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0)
-this project adds Go module support, but maintains backwards compatibility with
-older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. See the
-[`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information. Version
-v5.0.0 introduces major improvements to the validation of tokens, but is not
-entirely backwards compatible.
-
-> After the original author of the library suggested migrating the maintenance
-> of `jwt-go`, a dedicated team of open source maintainers decided to clone the
-> existing library into this repository. See
-> [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a
-> detailed discussion on this topic.
-
-
-**SECURITY NOTICE:** Some older versions of Go have a security issue in the
-crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue
-[dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more
-detail.
-
-**SECURITY NOTICE:** It's important that you [validate the `alg` presented is
-what you
-expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/).
-This library attempts to make it easy to do the right thing by requiring key
-types match the expected alg, but you should take the extra step to verify it in
-your usage. See the examples provided.
-
-### Supported Go versions
-
-Our support of Go versions is aligned with Go's [version release
-policy](https://golang.org/doc/devel/release#policy). So we will support a major
-version of Go until there are two newer major releases. We no longer support
-building jwt-go with unsupported Go versions, as these contain security
-vulnerabilities which will not be fixed.
-
-## What the heck is a JWT?
-
-JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web
-Tokens.
-
-In short, it's a signed JSON object that does something useful (for example,
-authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is
-made of three parts, separated by `.`'s. The first two parts are JSON objects,
-that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648)
-encoded. The last part is the signature, encoded the same way.
-
-The first part is called the header. It contains the necessary information for
-verifying the last part, the signature. For example, which encryption method
-was used for signing and what key was used.
-
-The part in the middle is the interesting bit. It's called the Claims and
-contains the actual stuff you care about. Refer to [RFC
-7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about
-reserved keys and the proper way to add your own.
-
-## What's in the box?
-
-This library supports the parsing and verification as well as the generation and
-signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA,
-RSA-PSS, and ECDSA, though hooks are present for adding your own.
-
-## Installation Guidelines
-
-1. To install the jwt package, you first need to have
- [Go](https://go.dev/doc/install) installed, then you can use the command
- below to add `jwt-go` as a dependency in your Go program.
-
-```sh
-go get -u github.com/golang-jwt/jwt/v5
-```
-
-2. Import it in your code:
-
-```go
-import "github.com/golang-jwt/jwt/v5"
-```
-
-## Usage
-
-A detailed usage guide, including how to sign and verify tokens can be found on
-our [documentation website](https://golang-jwt.github.io/jwt/usage/create/).
-
-## Examples
-
-See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v5)
-for examples of usage:
-
-* [Simple example of parsing and validating a
- token](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#example-Parse-Hmac)
-* [Simple example of building and signing a
- token](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#example-New-Hmac)
-* [Directory of
- Examples](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#pkg-examples)
-
-## Compliance
-
-This library was last reviewed to comply with [RFC
-7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few
-notable differences:
-
-* In order to protect against accidental use of [Unsecured
- JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using
- `alg=none` will only be accepted if the constant
- `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
-
-## Project Status & Versioning
-
-This library is considered production ready. Feedback and feature requests are
-appreciated. The API should be considered stable. There should be very few
-backwards-incompatible changes outside of major version updates (and only with
-good reason).
-
-This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull
-requests will land on `main`. Periodically, versions will be tagged from
-`main`. You can find all the releases on [the project releases
-page](https://github.com/golang-jwt/jwt/releases).
-
-**BREAKING CHANGES:*** A full list of breaking changes is available in
-`VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating
-your code.
-
-## Extensions
-
-This library publishes all the necessary components for adding your own signing
-methods or key functions. Simply implement the `SigningMethod` interface and
-register a factory method using `RegisterSigningMethod` or provide a
-`jwt.Keyfunc`.
-
-A common use case would be integrating with different 3rd party signature
-providers, like key management services from various cloud providers or Hardware
-Security Modules (HSMs) or to implement additional standards.
-
-| Extension | Purpose | Repo |
-| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------ |
-| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go |
-| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms |
-| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc |
-
-*Disclaimer*: Unless otherwise specified, these integrations are maintained by
-third parties and should not be considered as a primary offer by any of the
-mentioned cloud providers
-
-## More
-
-Go package documentation can be found [on
-pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt/v5). Additional
-documentation can be found on [our project
-page](https://golang-jwt.github.io/jwt/).
-
-The command line utility included in this project (cmd/jwt) provides a
-straightforward example of token creation and parsing as well as a useful tool
-for debugging your own integration. You'll also find several implementation
-examples in the documentation.
-
-[golang-jwt](https://github.com/orgs/golang-jwt) incorporates a modified version
-of the JWT logo, which is distributed under the terms of the [MIT
-License](https://github.com/jsonwebtoken/jsonwebtoken.github.io/blob/master/LICENSE.txt).
diff --git a/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md b/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md
deleted file mode 100644
index b08402c3..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md
+++ /dev/null
@@ -1,19 +0,0 @@
-# Security Policy
-
-## Supported Versions
-
-As of February 2022 (and until this document is updated), the latest version `v4` is supported.
-
-## Reporting a Vulnerability
-
-If you think you found a vulnerability, and even if you are not sure, please report it to jwt-go-security@googlegroups.com or one of the other [golang-jwt maintainers](https://github.com/orgs/golang-jwt/people). Please try be explicit, describe steps to reproduce the security issue with code example(s).
-
-You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem.
-
-## Public Discussions
-
-Please avoid publicly discussing a potential security vulnerability.
-
-Let's take this offline and find a solution first, this limits the potential impact as much as possible.
-
-We appreciate your help!
diff --git a/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md b/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md
deleted file mode 100644
index b5039e49..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md
+++ /dev/null
@@ -1,137 +0,0 @@
-# `jwt-go` Version History
-
-The following version history is kept for historic purposes. To retrieve the current changes of each version, please refer to the change-log of the specific release versions on https://github.com/golang-jwt/jwt/releases.
-
-## 4.0.0
-
-* Introduces support for Go modules. The `v4` version will be backwards compatible with `v3.x.y`.
-
-## 3.2.2
-
-* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)).
-* Fixed a potential issue that could occur when the verification of `exp`, `iat` or `nbf` was not required and contained invalid contents, i.e. non-numeric/date. Thanks for @thaJeztah for making us aware of that and @giorgos-f3 for originally reporting it to the formtech fork ([#40](https://github.com/golang-jwt/jwt/pull/40)).
-* Added support for EdDSA / ED25519 ([#36](https://github.com/golang-jwt/jwt/pull/36)).
-* Optimized allocations ([#33](https://github.com/golang-jwt/jwt/pull/33)).
-
-## 3.2.1
-
-* **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code
- * Changed the import path from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`
-* Fixed type confusing issue between `string` and `[]string` in `VerifyAudience` ([#12](https://github.com/golang-jwt/jwt/pull/12)). This fixes CVE-2020-26160
-
-#### 3.2.0
-
-* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation
-* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate
-* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before.
-* Deprecated `ParseFromRequestWithClaims` to simplify API in the future.
-
-#### 3.1.0
-
-* Improvements to `jwt` command line tool
-* Added `SkipClaimsValidation` option to `Parser`
-* Documentation updates
-
-#### 3.0.0
-
-* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code
- * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods.
- * `ParseFromRequest` has been moved to `request` subpackage and usage has changed
- * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims.
-* Other Additions and Changes
- * Added `Claims` interface type to allow users to decode the claims into a custom type
- * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into.
- * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage
- * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims`
- * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`.
- * Added several new, more specific, validation errors to error type bitmask
- * Moved examples from README to executable example files
- * Signing method registry is now thread safe
- * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser)
-
-#### 2.7.0
-
-This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes.
-
-* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying
-* Error text for expired tokens includes how long it's been expired
-* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM`
-* Documentation updates
-
-#### 2.6.0
-
-* Exposed inner error within ValidationError
-* Fixed validation errors when using UseJSONNumber flag
-* Added several unit tests
-
-#### 2.5.0
-
-* Added support for signing method none. You shouldn't use this. The API tries to make this clear.
-* Updated/fixed some documentation
-* Added more helpful error message when trying to parse tokens that begin with `BEARER `
-
-#### 2.4.0
-
-* Added new type, Parser, to allow for configuration of various parsing parameters
- * You can now specify a list of valid signing methods. Anything outside this set will be rejected.
- * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON
-* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go)
-* Fixed some bugs with ECDSA parsing
-
-#### 2.3.0
-
-* Added support for ECDSA signing methods
-* Added support for RSA PSS signing methods (requires go v1.4)
-
-#### 2.2.0
-
-* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic.
-
-#### 2.1.0
-
-Backwards compatible API change that was missed in 2.0.0.
-
-* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte`
-
-#### 2.0.0
-
-There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change.
-
-The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
-
-It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.
-
-* **Compatibility Breaking Changes**
- * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct`
- * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct`
- * `KeyFunc` now returns `interface{}` instead of `[]byte`
- * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key
- * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key
-* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type.
- * Added public package global `SigningMethodHS256`
- * Added public package global `SigningMethodHS384`
- * Added public package global `SigningMethodHS512`
-* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type.
- * Added public package global `SigningMethodRS256`
- * Added public package global `SigningMethodRS384`
- * Added public package global `SigningMethodRS512`
-* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged.
-* Refactored the RSA implementation to be easier to read
-* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM`
-
-## 1.0.2
-
-* Fixed bug in parsing public keys from certificates
-* Added more tests around the parsing of keys for RS256
-* Code refactoring in RS256 implementation. No functional changes
-
-## 1.0.1
-
-* Fixed panic if RS256 signing method was passed an invalid key
-
-## 1.0.0
-
-* First versioned release
-* API stabilized
-* Supports creating, signing, parsing, and validating JWT tokens
-* Supports RS256 and HS256 signing methods
diff --git a/vendor/github.com/golang-jwt/jwt/v5/claims.go b/vendor/github.com/golang-jwt/jwt/v5/claims.go
deleted file mode 100644
index d50ff3da..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/claims.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package jwt
-
-// Claims represent any form of a JWT Claims Set according to
-// https://datatracker.ietf.org/doc/html/rfc7519#section-4. In order to have a
-// common basis for validation, it is required that an implementation is able to
-// supply at least the claim names provided in
-// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1 namely `exp`,
-// `iat`, `nbf`, `iss`, `sub` and `aud`.
-type Claims interface {
- GetExpirationTime() (*NumericDate, error)
- GetIssuedAt() (*NumericDate, error)
- GetNotBefore() (*NumericDate, error)
- GetIssuer() (string, error)
- GetSubject() (string, error)
- GetAudience() (ClaimStrings, error)
-}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/doc.go b/vendor/github.com/golang-jwt/jwt/v5/doc.go
deleted file mode 100644
index a86dc1a3..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/doc.go
+++ /dev/null
@@ -1,4 +0,0 @@
-// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
-//
-// See README.md for more info.
-package jwt
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
deleted file mode 100644
index c929e4a0..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
+++ /dev/null
@@ -1,134 +0,0 @@
-package jwt
-
-import (
- "crypto"
- "crypto/ecdsa"
- "crypto/rand"
- "errors"
- "math/big"
-)
-
-var (
- // Sadly this is missing from crypto/ecdsa compared to crypto/rsa
- ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
-)
-
-// SigningMethodECDSA implements the ECDSA family of signing methods.
-// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
-type SigningMethodECDSA struct {
- Name string
- Hash crypto.Hash
- KeySize int
- CurveBits int
-}
-
-// Specific instances for EC256 and company
-var (
- SigningMethodES256 *SigningMethodECDSA
- SigningMethodES384 *SigningMethodECDSA
- SigningMethodES512 *SigningMethodECDSA
-)
-
-func init() {
- // ES256
- SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
- RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
- return SigningMethodES256
- })
-
- // ES384
- SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
- RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
- return SigningMethodES384
- })
-
- // ES512
- SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
- RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
- return SigningMethodES512
- })
-}
-
-func (m *SigningMethodECDSA) Alg() string {
- return m.Name
-}
-
-// Verify implements token verification for the SigningMethod.
-// For this verify method, key must be an ecdsa.PublicKey struct
-func (m *SigningMethodECDSA) Verify(signingString string, sig []byte, key interface{}) error {
- // Get the key
- var ecdsaKey *ecdsa.PublicKey
- switch k := key.(type) {
- case *ecdsa.PublicKey:
- ecdsaKey = k
- default:
- return newError("ECDSA verify expects *ecdsa.PublicKey", ErrInvalidKeyType)
- }
-
- if len(sig) != 2*m.KeySize {
- return ErrECDSAVerification
- }
-
- r := big.NewInt(0).SetBytes(sig[:m.KeySize])
- s := big.NewInt(0).SetBytes(sig[m.KeySize:])
-
- // Create hasher
- if !m.Hash.Available() {
- return ErrHashUnavailable
- }
- hasher := m.Hash.New()
- hasher.Write([]byte(signingString))
-
- // Verify the signature
- if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus {
- return nil
- }
-
- return ErrECDSAVerification
-}
-
-// Sign implements token signing for the SigningMethod.
-// For this signing method, key must be an ecdsa.PrivateKey struct
-func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) ([]byte, error) {
- // Get the key
- var ecdsaKey *ecdsa.PrivateKey
- switch k := key.(type) {
- case *ecdsa.PrivateKey:
- ecdsaKey = k
- default:
- return nil, newError("ECDSA sign expects *ecdsa.PrivateKey", ErrInvalidKeyType)
- }
-
- // Create the hasher
- if !m.Hash.Available() {
- return nil, ErrHashUnavailable
- }
-
- hasher := m.Hash.New()
- hasher.Write([]byte(signingString))
-
- // Sign the string and return r, s
- if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
- curveBits := ecdsaKey.Curve.Params().BitSize
-
- if m.CurveBits != curveBits {
- return nil, ErrInvalidKey
- }
-
- keyBytes := curveBits / 8
- if curveBits%8 > 0 {
- keyBytes += 1
- }
-
- // We serialize the outputs (r and s) into big-endian byte arrays
- // padded with zeros on the left to make sure the sizes work out.
- // Output must be 2*keyBytes long.
- out := make([]byte, 2*keyBytes)
- r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output.
- s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output.
-
- return out, nil
- } else {
- return nil, err
- }
-}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go
deleted file mode 100644
index 5700636d..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package jwt
-
-import (
- "crypto/ecdsa"
- "crypto/x509"
- "encoding/pem"
- "errors"
-)
-
-var (
- ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key")
- ErrNotECPrivateKey = errors.New("key is not a valid ECDSA private key")
-)
-
-// ParseECPrivateKeyFromPEM parses a PEM encoded Elliptic Curve Private Key Structure
-func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
- var err error
-
- // Parse PEM block
- var block *pem.Block
- if block, _ = pem.Decode(key); block == nil {
- return nil, ErrKeyMustBePEMEncoded
- }
-
- // Parse the key
- var parsedKey interface{}
- if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
- if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
- return nil, err
- }
- }
-
- var pkey *ecdsa.PrivateKey
- var ok bool
- if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
- return nil, ErrNotECPrivateKey
- }
-
- return pkey, nil
-}
-
-// ParseECPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key
-func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
- var err error
-
- // Parse PEM block
- var block *pem.Block
- if block, _ = pem.Decode(key); block == nil {
- return nil, ErrKeyMustBePEMEncoded
- }
-
- // Parse the key
- var parsedKey interface{}
- if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
- if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
- parsedKey = cert.PublicKey
- } else {
- return nil, err
- }
- }
-
- var pkey *ecdsa.PublicKey
- var ok bool
- if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
- return nil, ErrNotECPublicKey
- }
-
- return pkey, nil
-}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ed25519.go b/vendor/github.com/golang-jwt/jwt/v5/ed25519.go
deleted file mode 100644
index c2138119..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/ed25519.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package jwt
-
-import (
- "crypto"
- "crypto/ed25519"
- "crypto/rand"
- "errors"
-)
-
-var (
- ErrEd25519Verification = errors.New("ed25519: verification error")
-)
-
-// SigningMethodEd25519 implements the EdDSA family.
-// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification
-type SigningMethodEd25519 struct{}
-
-// Specific instance for EdDSA
-var (
- SigningMethodEdDSA *SigningMethodEd25519
-)
-
-func init() {
- SigningMethodEdDSA = &SigningMethodEd25519{}
- RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod {
- return SigningMethodEdDSA
- })
-}
-
-func (m *SigningMethodEd25519) Alg() string {
- return "EdDSA"
-}
-
-// Verify implements token verification for the SigningMethod.
-// For this verify method, key must be an ed25519.PublicKey
-func (m *SigningMethodEd25519) Verify(signingString string, sig []byte, key interface{}) error {
- var ed25519Key ed25519.PublicKey
- var ok bool
-
- if ed25519Key, ok = key.(ed25519.PublicKey); !ok {
- return newError("Ed25519 verify expects ed25519.PublicKey", ErrInvalidKeyType)
- }
-
- if len(ed25519Key) != ed25519.PublicKeySize {
- return ErrInvalidKey
- }
-
- // Verify the signature
- if !ed25519.Verify(ed25519Key, []byte(signingString), sig) {
- return ErrEd25519Verification
- }
-
- return nil
-}
-
-// Sign implements token signing for the SigningMethod.
-// For this signing method, key must be an ed25519.PrivateKey
-func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) ([]byte, error) {
- var ed25519Key crypto.Signer
- var ok bool
-
- if ed25519Key, ok = key.(crypto.Signer); !ok {
- return nil, newError("Ed25519 sign expects crypto.Signer", ErrInvalidKeyType)
- }
-
- if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok {
- return nil, ErrInvalidKey
- }
-
- // Sign the string and return the result. ed25519 performs a two-pass hash
- // as part of its algorithm. Therefore, we need to pass a non-prehashed
- // message into the Sign function, as indicated by crypto.Hash(0)
- sig, err := ed25519Key.Sign(rand.Reader, []byte(signingString), crypto.Hash(0))
- if err != nil {
- return nil, err
- }
-
- return sig, nil
-}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go
deleted file mode 100644
index cdb5e68e..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package jwt
-
-import (
- "crypto"
- "crypto/ed25519"
- "crypto/x509"
- "encoding/pem"
- "errors"
-)
-
-var (
- ErrNotEdPrivateKey = errors.New("key is not a valid Ed25519 private key")
- ErrNotEdPublicKey = errors.New("key is not a valid Ed25519 public key")
-)
-
-// ParseEdPrivateKeyFromPEM parses a PEM-encoded Edwards curve private key
-func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) {
- var err error
-
- // Parse PEM block
- var block *pem.Block
- if block, _ = pem.Decode(key); block == nil {
- return nil, ErrKeyMustBePEMEncoded
- }
-
- // Parse the key
- var parsedKey interface{}
- if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
- return nil, err
- }
-
- var pkey ed25519.PrivateKey
- var ok bool
- if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok {
- return nil, ErrNotEdPrivateKey
- }
-
- return pkey, nil
-}
-
-// ParseEdPublicKeyFromPEM parses a PEM-encoded Edwards curve public key
-func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) {
- var err error
-
- // Parse PEM block
- var block *pem.Block
- if block, _ = pem.Decode(key); block == nil {
- return nil, ErrKeyMustBePEMEncoded
- }
-
- // Parse the key
- var parsedKey interface{}
- if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
- return nil, err
- }
-
- var pkey ed25519.PublicKey
- var ok bool
- if pkey, ok = parsedKey.(ed25519.PublicKey); !ok {
- return nil, ErrNotEdPublicKey
- }
-
- return pkey, nil
-}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors.go b/vendor/github.com/golang-jwt/jwt/v5/errors.go
deleted file mode 100644
index 23bb616d..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/errors.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package jwt
-
-import (
- "errors"
- "strings"
-)
-
-var (
- ErrInvalidKey = errors.New("key is invalid")
- ErrInvalidKeyType = errors.New("key is of invalid type")
- ErrHashUnavailable = errors.New("the requested hash function is unavailable")
- ErrTokenMalformed = errors.New("token is malformed")
- ErrTokenUnverifiable = errors.New("token is unverifiable")
- ErrTokenSignatureInvalid = errors.New("token signature is invalid")
- ErrTokenRequiredClaimMissing = errors.New("token is missing required claim")
- ErrTokenInvalidAudience = errors.New("token has invalid audience")
- ErrTokenExpired = errors.New("token is expired")
- ErrTokenUsedBeforeIssued = errors.New("token used before issued")
- ErrTokenInvalidIssuer = errors.New("token has invalid issuer")
- ErrTokenInvalidSubject = errors.New("token has invalid subject")
- ErrTokenNotValidYet = errors.New("token is not valid yet")
- ErrTokenInvalidId = errors.New("token has invalid id")
- ErrTokenInvalidClaims = errors.New("token has invalid claims")
- ErrInvalidType = errors.New("invalid type for claim")
-)
-
-// joinedError is an error type that works similar to what [errors.Join]
-// produces, with the exception that it has a nice error string; mainly its
-// error messages are concatenated using a comma, rather than a newline.
-type joinedError struct {
- errs []error
-}
-
-func (je joinedError) Error() string {
- msg := []string{}
- for _, err := range je.errs {
- msg = append(msg, err.Error())
- }
-
- return strings.Join(msg, ", ")
-}
-
-// joinErrors joins together multiple errors. Useful for scenarios where
-// multiple errors next to each other occur, e.g., in claims validation.
-func joinErrors(errs ...error) error {
- return &joinedError{
- errs: errs,
- }
-}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go b/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go
deleted file mode 100644
index a893d355..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go
+++ /dev/null
@@ -1,47 +0,0 @@
-//go:build go1.20
-// +build go1.20
-
-package jwt
-
-import (
- "fmt"
-)
-
-// Unwrap implements the multiple error unwrapping for this error type, which is
-// possible in Go 1.20.
-func (je joinedError) Unwrap() []error {
- return je.errs
-}
-
-// newError creates a new error message with a detailed error message. The
-// message will be prefixed with the contents of the supplied error type.
-// Additionally, more errors, that provide more context can be supplied which
-// will be appended to the message. This makes use of Go 1.20's possibility to
-// include more than one %w formatting directive in [fmt.Errorf].
-//
-// For example,
-//
-// newError("no keyfunc was provided", ErrTokenUnverifiable)
-//
-// will produce the error string
-//
-// "token is unverifiable: no keyfunc was provided"
-func newError(message string, err error, more ...error) error {
- var format string
- var args []any
- if message != "" {
- format = "%w: %s"
- args = []any{err, message}
- } else {
- format = "%w"
- args = []any{err}
- }
-
- for _, e := range more {
- format += ": %w"
- args = append(args, e)
- }
-
- err = fmt.Errorf(format, args...)
- return err
-}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go b/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go
deleted file mode 100644
index 2ad542f0..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go
+++ /dev/null
@@ -1,78 +0,0 @@
-//go:build !go1.20
-// +build !go1.20
-
-package jwt
-
-import (
- "errors"
- "fmt"
-)
-
-// Is implements checking for multiple errors using [errors.Is], since multiple
-// error unwrapping is not possible in versions less than Go 1.20.
-func (je joinedError) Is(err error) bool {
- for _, e := range je.errs {
- if errors.Is(e, err) {
- return true
- }
- }
-
- return false
-}
-
-// wrappedErrors is a workaround for wrapping multiple errors in environments
-// where Go 1.20 is not available. It basically uses the already implemented
-// functionality of joinedError to handle multiple errors with supplies a
-// custom error message that is identical to the one we produce in Go 1.20 using
-// multiple %w directives.
-type wrappedErrors struct {
- msg string
- joinedError
-}
-
-// Error returns the stored error string
-func (we wrappedErrors) Error() string {
- return we.msg
-}
-
-// newError creates a new error message with a detailed error message. The
-// message will be prefixed with the contents of the supplied error type.
-// Additionally, more errors, that provide more context can be supplied which
-// will be appended to the message. Since we cannot use of Go 1.20's possibility
-// to include more than one %w formatting directive in [fmt.Errorf], we have to
-// emulate that.
-//
-// For example,
-//
-// newError("no keyfunc was provided", ErrTokenUnverifiable)
-//
-// will produce the error string
-//
-// "token is unverifiable: no keyfunc was provided"
-func newError(message string, err error, more ...error) error {
- // We cannot wrap multiple errors here with %w, so we have to be a little
- // bit creative. Basically, we are using %s instead of %w to produce the
- // same error message and then throw the result into a custom error struct.
- var format string
- var args []any
- if message != "" {
- format = "%s: %s"
- args = []any{err, message}
- } else {
- format = "%s"
- args = []any{err}
- }
- errs := []error{err}
-
- for _, e := range more {
- format += ": %s"
- args = append(args, e)
- errs = append(errs, e)
- }
-
- err = &wrappedErrors{
- msg: fmt.Sprintf(format, args...),
- joinedError: joinedError{errs: errs},
- }
- return err
-}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/hmac.go b/vendor/github.com/golang-jwt/jwt/v5/hmac.go
deleted file mode 100644
index aca600ce..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/hmac.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package jwt
-
-import (
- "crypto"
- "crypto/hmac"
- "errors"
-)
-
-// SigningMethodHMAC implements the HMAC-SHA family of signing methods.
-// Expects key type of []byte for both signing and validation
-type SigningMethodHMAC struct {
- Name string
- Hash crypto.Hash
-}
-
-// Specific instances for HS256 and company
-var (
- SigningMethodHS256 *SigningMethodHMAC
- SigningMethodHS384 *SigningMethodHMAC
- SigningMethodHS512 *SigningMethodHMAC
- ErrSignatureInvalid = errors.New("signature is invalid")
-)
-
-func init() {
- // HS256
- SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
- RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
- return SigningMethodHS256
- })
-
- // HS384
- SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
- RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
- return SigningMethodHS384
- })
-
- // HS512
- SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
- RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
- return SigningMethodHS512
- })
-}
-
-func (m *SigningMethodHMAC) Alg() string {
- return m.Name
-}
-
-// Verify implements token verification for the SigningMethod. Returns nil if
-// the signature is valid. Key must be []byte.
-//
-// Note it is not advised to provide a []byte which was converted from a 'human
-// readable' string using a subset of ASCII characters. To maximize entropy, you
-// should ideally be providing a []byte key which was produced from a
-// cryptographically random source, e.g. crypto/rand. Additional information
-// about this, and why we intentionally are not supporting string as a key can
-// be found on our usage guide
-// https://golang-jwt.github.io/jwt/usage/signing_methods/#signing-methods-and-key-types.
-func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key interface{}) error {
- // Verify the key is the right type
- keyBytes, ok := key.([]byte)
- if !ok {
- return newError("HMAC verify expects []byte", ErrInvalidKeyType)
- }
-
- // Can we use the specified hashing method?
- if !m.Hash.Available() {
- return ErrHashUnavailable
- }
-
- // This signing method is symmetric, so we validate the signature
- // by reproducing the signature from the signing string and key, then
- // comparing that against the provided signature.
- hasher := hmac.New(m.Hash.New, keyBytes)
- hasher.Write([]byte(signingString))
- if !hmac.Equal(sig, hasher.Sum(nil)) {
- return ErrSignatureInvalid
- }
-
- // No validation errors. Signature is good.
- return nil
-}
-
-// Sign implements token signing for the SigningMethod. Key must be []byte.
-//
-// Note it is not advised to provide a []byte which was converted from a 'human
-// readable' string using a subset of ASCII characters. To maximize entropy, you
-// should ideally be providing a []byte key which was produced from a
-// cryptographically random source, e.g. crypto/rand. Additional information
-// about this, and why we intentionally are not supporting string as a key can
-// be found on our usage guide https://golang-jwt.github.io/jwt/usage/signing_methods/.
-func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) ([]byte, error) {
- if keyBytes, ok := key.([]byte); ok {
- if !m.Hash.Available() {
- return nil, ErrHashUnavailable
- }
-
- hasher := hmac.New(m.Hash.New, keyBytes)
- hasher.Write([]byte(signingString))
-
- return hasher.Sum(nil), nil
- }
-
- return nil, newError("HMAC sign expects []byte", ErrInvalidKeyType)
-}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/map_claims.go b/vendor/github.com/golang-jwt/jwt/v5/map_claims.go
deleted file mode 100644
index b2b51a1f..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/map_claims.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package jwt
-
-import (
- "encoding/json"
- "fmt"
-)
-
-// MapClaims is a claims type that uses the map[string]interface{} for JSON
-// decoding. This is the default claims type if you don't supply one
-type MapClaims map[string]interface{}
-
-// GetExpirationTime implements the Claims interface.
-func (m MapClaims) GetExpirationTime() (*NumericDate, error) {
- return m.parseNumericDate("exp")
-}
-
-// GetNotBefore implements the Claims interface.
-func (m MapClaims) GetNotBefore() (*NumericDate, error) {
- return m.parseNumericDate("nbf")
-}
-
-// GetIssuedAt implements the Claims interface.
-func (m MapClaims) GetIssuedAt() (*NumericDate, error) {
- return m.parseNumericDate("iat")
-}
-
-// GetAudience implements the Claims interface.
-func (m MapClaims) GetAudience() (ClaimStrings, error) {
- return m.parseClaimsString("aud")
-}
-
-// GetIssuer implements the Claims interface.
-func (m MapClaims) GetIssuer() (string, error) {
- return m.parseString("iss")
-}
-
-// GetSubject implements the Claims interface.
-func (m MapClaims) GetSubject() (string, error) {
- return m.parseString("sub")
-}
-
-// parseNumericDate tries to parse a key in the map claims type as a number
-// date. This will succeed, if the underlying type is either a [float64] or a
-// [json.Number]. Otherwise, nil will be returned.
-func (m MapClaims) parseNumericDate(key string) (*NumericDate, error) {
- v, ok := m[key]
- if !ok {
- return nil, nil
- }
-
- switch exp := v.(type) {
- case float64:
- if exp == 0 {
- return nil, nil
- }
-
- return newNumericDateFromSeconds(exp), nil
- case json.Number:
- v, _ := exp.Float64()
-
- return newNumericDateFromSeconds(v), nil
- }
-
- return nil, newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType)
-}
-
-// parseClaimsString tries to parse a key in the map claims type as a
-// [ClaimsStrings] type, which can either be a string or an array of string.
-func (m MapClaims) parseClaimsString(key string) (ClaimStrings, error) {
- var cs []string
- switch v := m[key].(type) {
- case string:
- cs = append(cs, v)
- case []string:
- cs = v
- case []interface{}:
- for _, a := range v {
- vs, ok := a.(string)
- if !ok {
- return nil, newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType)
- }
- cs = append(cs, vs)
- }
- }
-
- return cs, nil
-}
-
-// parseString tries to parse a key in the map claims type as a [string] type.
-// If the key does not exist, an empty string is returned. If the key has the
-// wrong type, an error is returned.
-func (m MapClaims) parseString(key string) (string, error) {
- var (
- ok bool
- raw interface{}
- iss string
- )
- raw, ok = m[key]
- if !ok {
- return "", nil
- }
-
- iss, ok = raw.(string)
- if !ok {
- return "", newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType)
- }
-
- return iss, nil
-}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/none.go b/vendor/github.com/golang-jwt/jwt/v5/none.go
deleted file mode 100644
index 685c2ea3..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/none.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package jwt
-
-// SigningMethodNone implements the none signing method. This is required by the spec
-// but you probably should never use it.
-var SigningMethodNone *signingMethodNone
-
-const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed"
-
-var NoneSignatureTypeDisallowedError error
-
-type signingMethodNone struct{}
-type unsafeNoneMagicConstant string
-
-func init() {
- SigningMethodNone = &signingMethodNone{}
- NoneSignatureTypeDisallowedError = newError("'none' signature type is not allowed", ErrTokenUnverifiable)
-
- RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod {
- return SigningMethodNone
- })
-}
-
-func (m *signingMethodNone) Alg() string {
- return "none"
-}
-
-// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key
-func (m *signingMethodNone) Verify(signingString string, sig []byte, key interface{}) (err error) {
- // Key must be UnsafeAllowNoneSignatureType to prevent accidentally
- // accepting 'none' signing method
- if _, ok := key.(unsafeNoneMagicConstant); !ok {
- return NoneSignatureTypeDisallowedError
- }
- // If signing method is none, signature must be an empty string
- if len(sig) != 0 {
- return newError("'none' signing method with non-empty signature", ErrTokenUnverifiable)
- }
-
- // Accept 'none' signing method.
- return nil
-}
-
-// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key
-func (m *signingMethodNone) Sign(signingString string, key interface{}) ([]byte, error) {
- if _, ok := key.(unsafeNoneMagicConstant); ok {
- return []byte{}, nil
- }
-
- return nil, NoneSignatureTypeDisallowedError
-}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/parser.go b/vendor/github.com/golang-jwt/jwt/v5/parser.go
deleted file mode 100644
index ecf99af7..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/parser.go
+++ /dev/null
@@ -1,238 +0,0 @@
-package jwt
-
-import (
- "bytes"
- "encoding/base64"
- "encoding/json"
- "fmt"
- "strings"
-)
-
-type Parser struct {
- // If populated, only these methods will be considered valid.
- validMethods []string
-
- // Use JSON Number format in JSON decoder.
- useJSONNumber bool
-
- // Skip claims validation during token parsing.
- skipClaimsValidation bool
-
- validator *Validator
-
- decodeStrict bool
-
- decodePaddingAllowed bool
-}
-
-// NewParser creates a new Parser with the specified options
-func NewParser(options ...ParserOption) *Parser {
- p := &Parser{
- validator: &Validator{},
- }
-
- // Loop through our parsing options and apply them
- for _, option := range options {
- option(p)
- }
-
- return p
-}
-
-// Parse parses, validates, verifies the signature and returns the parsed token.
-// keyFunc will receive the parsed token and should return the key for validating.
-func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
- return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
-}
-
-// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object implementing the Claims
-// interface. This provides default values which can be overridden and allows a caller to use their own type, rather
-// than the default MapClaims implementation of Claims.
-//
-// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims),
-// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the
-// proper memory for it before passing in the overall claims, otherwise you might run into a panic.
-func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
- token, parts, err := p.ParseUnverified(tokenString, claims)
- if err != nil {
- return token, err
- }
-
- // Verify signing method is in the required set
- if p.validMethods != nil {
- var signingMethodValid = false
- var alg = token.Method.Alg()
- for _, m := range p.validMethods {
- if m == alg {
- signingMethodValid = true
- break
- }
- }
- if !signingMethodValid {
- // signing method is not in the listed set
- return token, newError(fmt.Sprintf("signing method %v is invalid", alg), ErrTokenSignatureInvalid)
- }
- }
-
- // Decode signature
- token.Signature, err = p.DecodeSegment(parts[2])
- if err != nil {
- return token, newError("could not base64 decode signature", ErrTokenMalformed, err)
- }
- text := strings.Join(parts[0:2], ".")
-
- // Lookup key(s)
- if keyFunc == nil {
- // keyFunc was not provided. short circuiting validation
- return token, newError("no keyfunc was provided", ErrTokenUnverifiable)
- }
-
- got, err := keyFunc(token)
- if err != nil {
- return token, newError("error while executing keyfunc", ErrTokenUnverifiable, err)
- }
-
- switch have := got.(type) {
- case VerificationKeySet:
- if len(have.Keys) == 0 {
- return token, newError("keyfunc returned empty verification key set", ErrTokenUnverifiable)
- }
- // Iterate through keys and verify signature, skipping the rest when a match is found.
- // Return the last error if no match is found.
- for _, key := range have.Keys {
- if err = token.Method.Verify(text, token.Signature, key); err == nil {
- break
- }
- }
- default:
- err = token.Method.Verify(text, token.Signature, have)
- }
- if err != nil {
- return token, newError("", ErrTokenSignatureInvalid, err)
- }
-
- // Validate Claims
- if !p.skipClaimsValidation {
- // Make sure we have at least a default validator
- if p.validator == nil {
- p.validator = NewValidator()
- }
-
- if err := p.validator.Validate(claims); err != nil {
- return token, newError("", ErrTokenInvalidClaims, err)
- }
- }
-
- // No errors so far, token is valid.
- token.Valid = true
-
- return token, nil
-}
-
-// ParseUnverified parses the token but doesn't validate the signature.
-//
-// WARNING: Don't use this method unless you know what you're doing.
-//
-// It's only ever useful in cases where you know the signature is valid (since it has already
-// been or will be checked elsewhere in the stack) and you want to extract values from it.
-func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
- parts = strings.Split(tokenString, ".")
- if len(parts) != 3 {
- return nil, parts, newError("token contains an invalid number of segments", ErrTokenMalformed)
- }
-
- token = &Token{Raw: tokenString}
-
- // parse Header
- var headerBytes []byte
- if headerBytes, err = p.DecodeSegment(parts[0]); err != nil {
- return token, parts, newError("could not base64 decode header", ErrTokenMalformed, err)
- }
- if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
- return token, parts, newError("could not JSON decode header", ErrTokenMalformed, err)
- }
-
- // parse Claims
- token.Claims = claims
-
- claimBytes, err := p.DecodeSegment(parts[1])
- if err != nil {
- return token, parts, newError("could not base64 decode claim", ErrTokenMalformed, err)
- }
-
- // If `useJSONNumber` is enabled then we must use *json.Decoder to decode
- // the claims. However, this comes with a performance penalty so only use
- // it if we must and, otherwise, simple use json.Unmarshal.
- if !p.useJSONNumber {
- // JSON Unmarshal. Special case for map type to avoid weird pointer behavior.
- if c, ok := token.Claims.(MapClaims); ok {
- err = json.Unmarshal(claimBytes, &c)
- } else {
- err = json.Unmarshal(claimBytes, &claims)
- }
- } else {
- dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
- dec.UseNumber()
- // JSON Decode. Special case for map type to avoid weird pointer behavior.
- if c, ok := token.Claims.(MapClaims); ok {
- err = dec.Decode(&c)
- } else {
- err = dec.Decode(&claims)
- }
- }
- if err != nil {
- return token, parts, newError("could not JSON decode claim", ErrTokenMalformed, err)
- }
-
- // Lookup signature method
- if method, ok := token.Header["alg"].(string); ok {
- if token.Method = GetSigningMethod(method); token.Method == nil {
- return token, parts, newError("signing method (alg) is unavailable", ErrTokenUnverifiable)
- }
- } else {
- return token, parts, newError("signing method (alg) is unspecified", ErrTokenUnverifiable)
- }
-
- return token, parts, nil
-}
-
-// DecodeSegment decodes a JWT specific base64url encoding. This function will
-// take into account whether the [Parser] is configured with additional options,
-// such as [WithStrictDecoding] or [WithPaddingAllowed].
-func (p *Parser) DecodeSegment(seg string) ([]byte, error) {
- encoding := base64.RawURLEncoding
-
- if p.decodePaddingAllowed {
- if l := len(seg) % 4; l > 0 {
- seg += strings.Repeat("=", 4-l)
- }
- encoding = base64.URLEncoding
- }
-
- if p.decodeStrict {
- encoding = encoding.Strict()
- }
- return encoding.DecodeString(seg)
-}
-
-// Parse parses, validates, verifies the signature and returns the parsed token.
-// keyFunc will receive the parsed token and should return the cryptographic key
-// for verifying the signature. The caller is strongly encouraged to set the
-// WithValidMethods option to validate the 'alg' claim in the token matches the
-// expected algorithm. For more details about the importance of validating the
-// 'alg' claim, see
-// https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/
-func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token, error) {
- return NewParser(options...).Parse(tokenString, keyFunc)
-}
-
-// ParseWithClaims is a shortcut for NewParser().ParseWithClaims().
-//
-// Note: If you provide a custom claim implementation that embeds one of the
-// standard claims (such as RegisteredClaims), make sure that a) you either
-// embed a non-pointer version of the claims or b) if you are using a pointer,
-// allocate the proper memory for it before passing in the overall claims,
-// otherwise you might run into a panic.
-func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) {
- return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc)
-}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/parser_option.go b/vendor/github.com/golang-jwt/jwt/v5/parser_option.go
deleted file mode 100644
index 88a780fb..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/parser_option.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package jwt
-
-import "time"
-
-// ParserOption is used to implement functional-style options that modify the
-// behavior of the parser. To add new options, just create a function (ideally
-// beginning with With or Without) that returns an anonymous function that takes
-// a *Parser type as input and manipulates its configuration accordingly.
-type ParserOption func(*Parser)
-
-// WithValidMethods is an option to supply algorithm methods that the parser
-// will check. Only those methods will be considered valid. It is heavily
-// encouraged to use this option in order to prevent attacks such as
-// https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/.
-func WithValidMethods(methods []string) ParserOption {
- return func(p *Parser) {
- p.validMethods = methods
- }
-}
-
-// WithJSONNumber is an option to configure the underlying JSON parser with
-// UseNumber.
-func WithJSONNumber() ParserOption {
- return func(p *Parser) {
- p.useJSONNumber = true
- }
-}
-
-// WithoutClaimsValidation is an option to disable claims validation. This
-// option should only be used if you exactly know what you are doing.
-func WithoutClaimsValidation() ParserOption {
- return func(p *Parser) {
- p.skipClaimsValidation = true
- }
-}
-
-// WithLeeway returns the ParserOption for specifying the leeway window.
-func WithLeeway(leeway time.Duration) ParserOption {
- return func(p *Parser) {
- p.validator.leeway = leeway
- }
-}
-
-// WithTimeFunc returns the ParserOption for specifying the time func. The
-// primary use-case for this is testing. If you are looking for a way to account
-// for clock-skew, WithLeeway should be used instead.
-func WithTimeFunc(f func() time.Time) ParserOption {
- return func(p *Parser) {
- p.validator.timeFunc = f
- }
-}
-
-// WithIssuedAt returns the ParserOption to enable verification
-// of issued-at.
-func WithIssuedAt() ParserOption {
- return func(p *Parser) {
- p.validator.verifyIat = true
- }
-}
-
-// WithExpirationRequired returns the ParserOption to make exp claim required.
-// By default exp claim is optional.
-func WithExpirationRequired() ParserOption {
- return func(p *Parser) {
- p.validator.requireExp = true
- }
-}
-
-// WithAudience configures the validator to require the specified audience in
-// the `aud` claim. Validation will fail if the audience is not listed in the
-// token or the `aud` claim is missing.
-//
-// NOTE: While the `aud` claim is OPTIONAL in a JWT, the handling of it is
-// application-specific. Since this validation API is helping developers in
-// writing secure application, we decided to REQUIRE the existence of the claim,
-// if an audience is expected.
-func WithAudience(aud string) ParserOption {
- return func(p *Parser) {
- p.validator.expectedAud = aud
- }
-}
-
-// WithIssuer configures the validator to require the specified issuer in the
-// `iss` claim. Validation will fail if a different issuer is specified in the
-// token or the `iss` claim is missing.
-//
-// NOTE: While the `iss` claim is OPTIONAL in a JWT, the handling of it is
-// application-specific. Since this validation API is helping developers in
-// writing secure application, we decided to REQUIRE the existence of the claim,
-// if an issuer is expected.
-func WithIssuer(iss string) ParserOption {
- return func(p *Parser) {
- p.validator.expectedIss = iss
- }
-}
-
-// WithSubject configures the validator to require the specified subject in the
-// `sub` claim. Validation will fail if a different subject is specified in the
-// token or the `sub` claim is missing.
-//
-// NOTE: While the `sub` claim is OPTIONAL in a JWT, the handling of it is
-// application-specific. Since this validation API is helping developers in
-// writing secure application, we decided to REQUIRE the existence of the claim,
-// if a subject is expected.
-func WithSubject(sub string) ParserOption {
- return func(p *Parser) {
- p.validator.expectedSub = sub
- }
-}
-
-// WithPaddingAllowed will enable the codec used for decoding JWTs to allow
-// padding. Note that the JWS RFC7515 states that the tokens will utilize a
-// Base64url encoding with no padding. Unfortunately, some implementations of
-// JWT are producing non-standard tokens, and thus require support for decoding.
-func WithPaddingAllowed() ParserOption {
- return func(p *Parser) {
- p.decodePaddingAllowed = true
- }
-}
-
-// WithStrictDecoding will switch the codec used for decoding JWTs into strict
-// mode. In this mode, the decoder requires that trailing padding bits are zero,
-// as described in RFC 4648 section 3.5.
-func WithStrictDecoding() ParserOption {
- return func(p *Parser) {
- p.decodeStrict = true
- }
-}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go b/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go
deleted file mode 100644
index 77951a53..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package jwt
-
-// RegisteredClaims are a structured version of the JWT Claims Set,
-// restricted to Registered Claim Names, as referenced at
-// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1
-//
-// This type can be used on its own, but then additional private and
-// public claims embedded in the JWT will not be parsed. The typical use-case
-// therefore is to embedded this in a user-defined claim type.
-//
-// See examples for how to use this with your own claim types.
-type RegisteredClaims struct {
- // the `iss` (Issuer) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.1
- Issuer string `json:"iss,omitempty"`
-
- // the `sub` (Subject) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.2
- Subject string `json:"sub,omitempty"`
-
- // the `aud` (Audience) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.3
- Audience ClaimStrings `json:"aud,omitempty"`
-
- // the `exp` (Expiration Time) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.4
- ExpiresAt *NumericDate `json:"exp,omitempty"`
-
- // the `nbf` (Not Before) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.5
- NotBefore *NumericDate `json:"nbf,omitempty"`
-
- // the `iat` (Issued At) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.6
- IssuedAt *NumericDate `json:"iat,omitempty"`
-
- // the `jti` (JWT ID) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.7
- ID string `json:"jti,omitempty"`
-}
-
-// GetExpirationTime implements the Claims interface.
-func (c RegisteredClaims) GetExpirationTime() (*NumericDate, error) {
- return c.ExpiresAt, nil
-}
-
-// GetNotBefore implements the Claims interface.
-func (c RegisteredClaims) GetNotBefore() (*NumericDate, error) {
- return c.NotBefore, nil
-}
-
-// GetIssuedAt implements the Claims interface.
-func (c RegisteredClaims) GetIssuedAt() (*NumericDate, error) {
- return c.IssuedAt, nil
-}
-
-// GetAudience implements the Claims interface.
-func (c RegisteredClaims) GetAudience() (ClaimStrings, error) {
- return c.Audience, nil
-}
-
-// GetIssuer implements the Claims interface.
-func (c RegisteredClaims) GetIssuer() (string, error) {
- return c.Issuer, nil
-}
-
-// GetSubject implements the Claims interface.
-func (c RegisteredClaims) GetSubject() (string, error) {
- return c.Subject, nil
-}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa.go b/vendor/github.com/golang-jwt/jwt/v5/rsa.go
deleted file mode 100644
index 83cbee6a..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/rsa.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package jwt
-
-import (
- "crypto"
- "crypto/rand"
- "crypto/rsa"
-)
-
-// SigningMethodRSA implements the RSA family of signing methods.
-// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation
-type SigningMethodRSA struct {
- Name string
- Hash crypto.Hash
-}
-
-// Specific instances for RS256 and company
-var (
- SigningMethodRS256 *SigningMethodRSA
- SigningMethodRS384 *SigningMethodRSA
- SigningMethodRS512 *SigningMethodRSA
-)
-
-func init() {
- // RS256
- SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
- RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
- return SigningMethodRS256
- })
-
- // RS384
- SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
- RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
- return SigningMethodRS384
- })
-
- // RS512
- SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
- RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
- return SigningMethodRS512
- })
-}
-
-func (m *SigningMethodRSA) Alg() string {
- return m.Name
-}
-
-// Verify implements token verification for the SigningMethod
-// For this signing method, must be an *rsa.PublicKey structure.
-func (m *SigningMethodRSA) Verify(signingString string, sig []byte, key interface{}) error {
- var rsaKey *rsa.PublicKey
- var ok bool
-
- if rsaKey, ok = key.(*rsa.PublicKey); !ok {
- return newError("RSA verify expects *rsa.PublicKey", ErrInvalidKeyType)
- }
-
- // Create hasher
- if !m.Hash.Available() {
- return ErrHashUnavailable
- }
- hasher := m.Hash.New()
- hasher.Write([]byte(signingString))
-
- // Verify the signature
- return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
-}
-
-// Sign implements token signing for the SigningMethod
-// For this signing method, must be an *rsa.PrivateKey structure.
-func (m *SigningMethodRSA) Sign(signingString string, key interface{}) ([]byte, error) {
- var rsaKey *rsa.PrivateKey
- var ok bool
-
- // Validate type of key
- if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
- return nil, newError("RSA sign expects *rsa.PrivateKey", ErrInvalidKeyType)
- }
-
- // Create the hasher
- if !m.Hash.Available() {
- return nil, ErrHashUnavailable
- }
-
- hasher := m.Hash.New()
- hasher.Write([]byte(signingString))
-
- // Sign the string and return the encoded bytes
- if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
- return sigBytes, nil
- } else {
- return nil, err
- }
-}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go
deleted file mode 100644
index 28c386ec..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go
+++ /dev/null
@@ -1,135 +0,0 @@
-//go:build go1.4
-// +build go1.4
-
-package jwt
-
-import (
- "crypto"
- "crypto/rand"
- "crypto/rsa"
-)
-
-// SigningMethodRSAPSS implements the RSAPSS family of signing methods signing methods
-type SigningMethodRSAPSS struct {
- *SigningMethodRSA
- Options *rsa.PSSOptions
- // VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS.
- // Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow
- // https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously.
- // See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details.
- VerifyOptions *rsa.PSSOptions
-}
-
-// Specific instances for RS/PS and company.
-var (
- SigningMethodPS256 *SigningMethodRSAPSS
- SigningMethodPS384 *SigningMethodRSAPSS
- SigningMethodPS512 *SigningMethodRSAPSS
-)
-
-func init() {
- // PS256
- SigningMethodPS256 = &SigningMethodRSAPSS{
- SigningMethodRSA: &SigningMethodRSA{
- Name: "PS256",
- Hash: crypto.SHA256,
- },
- Options: &rsa.PSSOptions{
- SaltLength: rsa.PSSSaltLengthEqualsHash,
- },
- VerifyOptions: &rsa.PSSOptions{
- SaltLength: rsa.PSSSaltLengthAuto,
- },
- }
- RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
- return SigningMethodPS256
- })
-
- // PS384
- SigningMethodPS384 = &SigningMethodRSAPSS{
- SigningMethodRSA: &SigningMethodRSA{
- Name: "PS384",
- Hash: crypto.SHA384,
- },
- Options: &rsa.PSSOptions{
- SaltLength: rsa.PSSSaltLengthEqualsHash,
- },
- VerifyOptions: &rsa.PSSOptions{
- SaltLength: rsa.PSSSaltLengthAuto,
- },
- }
- RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
- return SigningMethodPS384
- })
-
- // PS512
- SigningMethodPS512 = &SigningMethodRSAPSS{
- SigningMethodRSA: &SigningMethodRSA{
- Name: "PS512",
- Hash: crypto.SHA512,
- },
- Options: &rsa.PSSOptions{
- SaltLength: rsa.PSSSaltLengthEqualsHash,
- },
- VerifyOptions: &rsa.PSSOptions{
- SaltLength: rsa.PSSSaltLengthAuto,
- },
- }
- RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
- return SigningMethodPS512
- })
-}
-
-// Verify implements token verification for the SigningMethod.
-// For this verify method, key must be an rsa.PublicKey struct
-func (m *SigningMethodRSAPSS) Verify(signingString string, sig []byte, key interface{}) error {
- var rsaKey *rsa.PublicKey
- switch k := key.(type) {
- case *rsa.PublicKey:
- rsaKey = k
- default:
- return newError("RSA-PSS verify expects *rsa.PublicKey", ErrInvalidKeyType)
- }
-
- // Create hasher
- if !m.Hash.Available() {
- return ErrHashUnavailable
- }
- hasher := m.Hash.New()
- hasher.Write([]byte(signingString))
-
- opts := m.Options
- if m.VerifyOptions != nil {
- opts = m.VerifyOptions
- }
-
- return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts)
-}
-
-// Sign implements token signing for the SigningMethod.
-// For this signing method, key must be an rsa.PrivateKey struct
-func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) ([]byte, error) {
- var rsaKey *rsa.PrivateKey
-
- switch k := key.(type) {
- case *rsa.PrivateKey:
- rsaKey = k
- default:
- return nil, newError("RSA-PSS sign expects *rsa.PrivateKey", ErrInvalidKeyType)
- }
-
- // Create the hasher
- if !m.Hash.Available() {
- return nil, ErrHashUnavailable
- }
-
- hasher := m.Hash.New()
- hasher.Write([]byte(signingString))
-
- // Sign the string and return the encoded bytes
- if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
- return sigBytes, nil
- } else {
- return nil, err
- }
-}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go
deleted file mode 100644
index b3aeebbe..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go
+++ /dev/null
@@ -1,107 +0,0 @@
-package jwt
-
-import (
- "crypto/rsa"
- "crypto/x509"
- "encoding/pem"
- "errors"
-)
-
-var (
- ErrKeyMustBePEMEncoded = errors.New("invalid key: Key must be a PEM encoded PKCS1 or PKCS8 key")
- ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key")
- ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key")
-)
-
-// ParseRSAPrivateKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 private key
-func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
- var err error
-
- // Parse PEM block
- var block *pem.Block
- if block, _ = pem.Decode(key); block == nil {
- return nil, ErrKeyMustBePEMEncoded
- }
-
- var parsedKey interface{}
- if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
- if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
- return nil, err
- }
- }
-
- var pkey *rsa.PrivateKey
- var ok bool
- if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
- return nil, ErrNotRSAPrivateKey
- }
-
- return pkey, nil
-}
-
-// ParseRSAPrivateKeyFromPEMWithPassword parses a PEM encoded PKCS1 or PKCS8 private key protected with password
-//
-// Deprecated: This function is deprecated and should not be used anymore. It uses the deprecated x509.DecryptPEMBlock
-// function, which was deprecated since RFC 1423 is regarded insecure by design. Unfortunately, there is no alternative
-// in the Go standard library for now. See https://github.com/golang/go/issues/8860.
-func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
- var err error
-
- // Parse PEM block
- var block *pem.Block
- if block, _ = pem.Decode(key); block == nil {
- return nil, ErrKeyMustBePEMEncoded
- }
-
- var parsedKey interface{}
-
- var blockDecrypted []byte
- if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
- return nil, err
- }
-
- if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil {
- if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil {
- return nil, err
- }
- }
-
- var pkey *rsa.PrivateKey
- var ok bool
- if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
- return nil, ErrNotRSAPrivateKey
- }
-
- return pkey, nil
-}
-
-// ParseRSAPublicKeyFromPEM parses a certificate or a PEM encoded PKCS1 or PKIX public key
-func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
- var err error
-
- // Parse PEM block
- var block *pem.Block
- if block, _ = pem.Decode(key); block == nil {
- return nil, ErrKeyMustBePEMEncoded
- }
-
- // Parse the key
- var parsedKey interface{}
- if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
- if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
- parsedKey = cert.PublicKey
- } else {
- if parsedKey, err = x509.ParsePKCS1PublicKey(block.Bytes); err != nil {
- return nil, err
- }
- }
- }
-
- var pkey *rsa.PublicKey
- var ok bool
- if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
- return nil, ErrNotRSAPublicKey
- }
-
- return pkey, nil
-}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/signing_method.go b/vendor/github.com/golang-jwt/jwt/v5/signing_method.go
deleted file mode 100644
index 0d73631c..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/signing_method.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package jwt
-
-import (
- "sync"
-)
-
-var signingMethods = map[string]func() SigningMethod{}
-var signingMethodLock = new(sync.RWMutex)
-
-// SigningMethod can be used add new methods for signing or verifying tokens. It
-// takes a decoded signature as an input in the Verify function and produces a
-// signature in Sign. The signature is then usually base64 encoded as part of a
-// JWT.
-type SigningMethod interface {
- Verify(signingString string, sig []byte, key interface{}) error // Returns nil if signature is valid
- Sign(signingString string, key interface{}) ([]byte, error) // Returns signature or error
- Alg() string // returns the alg identifier for this method (example: 'HS256')
-}
-
-// RegisterSigningMethod registers the "alg" name and a factory function for signing method.
-// This is typically done during init() in the method's implementation
-func RegisterSigningMethod(alg string, f func() SigningMethod) {
- signingMethodLock.Lock()
- defer signingMethodLock.Unlock()
-
- signingMethods[alg] = f
-}
-
-// GetSigningMethod retrieves a signing method from an "alg" string
-func GetSigningMethod(alg string) (method SigningMethod) {
- signingMethodLock.RLock()
- defer signingMethodLock.RUnlock()
-
- if methodF, ok := signingMethods[alg]; ok {
- method = methodF()
- }
- return
-}
-
-// GetAlgorithms returns a list of registered "alg" names
-func GetAlgorithms() (algs []string) {
- signingMethodLock.RLock()
- defer signingMethodLock.RUnlock()
-
- for alg := range signingMethods {
- algs = append(algs, alg)
- }
- return
-}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf b/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf
deleted file mode 100644
index 53745d51..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf
+++ /dev/null
@@ -1 +0,0 @@
-checks = ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1023"]
diff --git a/vendor/github.com/golang-jwt/jwt/v5/token.go b/vendor/github.com/golang-jwt/jwt/v5/token.go
deleted file mode 100644
index 352873a2..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/token.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package jwt
-
-import (
- "crypto"
- "encoding/base64"
- "encoding/json"
-)
-
-// Keyfunc will be used by the Parse methods as a callback function to supply
-// the key for verification. The function receives the parsed, but unverified
-// Token. This allows you to use properties in the Header of the token (such as
-// `kid`) to identify which key to use.
-//
-// The returned interface{} may be a single key or a VerificationKeySet containing
-// multiple keys.
-type Keyfunc func(*Token) (interface{}, error)
-
-// VerificationKey represents a public or secret key for verifying a token's signature.
-type VerificationKey interface {
- crypto.PublicKey | []uint8
-}
-
-// VerificationKeySet is a set of public or secret keys. It is used by the parser to verify a token.
-type VerificationKeySet struct {
- Keys []VerificationKey
-}
-
-// Token represents a JWT Token. Different fields will be used depending on
-// whether you're creating or parsing/verifying a token.
-type Token struct {
- Raw string // Raw contains the raw token. Populated when you [Parse] a token
- Method SigningMethod // Method is the signing method used or to be used
- Header map[string]interface{} // Header is the first segment of the token in decoded form
- Claims Claims // Claims is the second segment of the token in decoded form
- Signature []byte // Signature is the third segment of the token in decoded form. Populated when you Parse a token
- Valid bool // Valid specifies if the token is valid. Populated when you Parse/Verify a token
-}
-
-// New creates a new [Token] with the specified signing method and an empty map
-// of claims. Additional options can be specified, but are currently unused.
-func New(method SigningMethod, opts ...TokenOption) *Token {
- return NewWithClaims(method, MapClaims{}, opts...)
-}
-
-// NewWithClaims creates a new [Token] with the specified signing method and
-// claims. Additional options can be specified, but are currently unused.
-func NewWithClaims(method SigningMethod, claims Claims, opts ...TokenOption) *Token {
- return &Token{
- Header: map[string]interface{}{
- "typ": "JWT",
- "alg": method.Alg(),
- },
- Claims: claims,
- Method: method,
- }
-}
-
-// SignedString creates and returns a complete, signed JWT. The token is signed
-// using the SigningMethod specified in the token. Please refer to
-// https://golang-jwt.github.io/jwt/usage/signing_methods/#signing-methods-and-key-types
-// for an overview of the different signing methods and their respective key
-// types.
-func (t *Token) SignedString(key interface{}) (string, error) {
- sstr, err := t.SigningString()
- if err != nil {
- return "", err
- }
-
- sig, err := t.Method.Sign(sstr, key)
- if err != nil {
- return "", err
- }
-
- return sstr + "." + t.EncodeSegment(sig), nil
-}
-
-// SigningString generates the signing string. This is the most expensive part
-// of the whole deal. Unless you need this for something special, just go
-// straight for the SignedString.
-func (t *Token) SigningString() (string, error) {
- h, err := json.Marshal(t.Header)
- if err != nil {
- return "", err
- }
-
- c, err := json.Marshal(t.Claims)
- if err != nil {
- return "", err
- }
-
- return t.EncodeSegment(h) + "." + t.EncodeSegment(c), nil
-}
-
-// EncodeSegment encodes a JWT specific base64url encoding with padding
-// stripped. In the future, this function might take into account a
-// [TokenOption]. Therefore, this function exists as a method of [Token], rather
-// than a global function.
-func (*Token) EncodeSegment(seg []byte) string {
- return base64.RawURLEncoding.EncodeToString(seg)
-}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/token_option.go b/vendor/github.com/golang-jwt/jwt/v5/token_option.go
deleted file mode 100644
index b4ae3bad..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/token_option.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package jwt
-
-// TokenOption is a reserved type, which provides some forward compatibility,
-// if we ever want to introduce token creation-related options.
-type TokenOption func(*Token)
diff --git a/vendor/github.com/golang-jwt/jwt/v5/types.go b/vendor/github.com/golang-jwt/jwt/v5/types.go
deleted file mode 100644
index b2655a9e..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/types.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package jwt
-
-import (
- "encoding/json"
- "fmt"
- "math"
- "strconv"
- "time"
-)
-
-// TimePrecision sets the precision of times and dates within this library. This
-// has an influence on the precision of times when comparing expiry or other
-// related time fields. Furthermore, it is also the precision of times when
-// serializing.
-//
-// For backwards compatibility the default precision is set to seconds, so that
-// no fractional timestamps are generated.
-var TimePrecision = time.Second
-
-// MarshalSingleStringAsArray modifies the behavior of the ClaimStrings type,
-// especially its MarshalJSON function.
-//
-// If it is set to true (the default), it will always serialize the type as an
-// array of strings, even if it just contains one element, defaulting to the
-// behavior of the underlying []string. If it is set to false, it will serialize
-// to a single string, if it contains one element. Otherwise, it will serialize
-// to an array of strings.
-var MarshalSingleStringAsArray = true
-
-// NumericDate represents a JSON numeric date value, as referenced at
-// https://datatracker.ietf.org/doc/html/rfc7519#section-2.
-type NumericDate struct {
- time.Time
-}
-
-// NewNumericDate constructs a new *NumericDate from a standard library time.Time struct.
-// It will truncate the timestamp according to the precision specified in TimePrecision.
-func NewNumericDate(t time.Time) *NumericDate {
- return &NumericDate{t.Truncate(TimePrecision)}
-}
-
-// newNumericDateFromSeconds creates a new *NumericDate out of a float64 representing a
-// UNIX epoch with the float fraction representing non-integer seconds.
-func newNumericDateFromSeconds(f float64) *NumericDate {
- round, frac := math.Modf(f)
- return NewNumericDate(time.Unix(int64(round), int64(frac*1e9)))
-}
-
-// MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch
-// represented in NumericDate to a byte array, using the precision specified in TimePrecision.
-func (date NumericDate) MarshalJSON() (b []byte, err error) {
- var prec int
- if TimePrecision < time.Second {
- prec = int(math.Log10(float64(time.Second) / float64(TimePrecision)))
- }
- truncatedDate := date.Truncate(TimePrecision)
-
- // For very large timestamps, UnixNano would overflow an int64, but this
- // function requires nanosecond level precision, so we have to use the
- // following technique to get round the issue:
- //
- // 1. Take the normal unix timestamp to form the whole number part of the
- // output,
- // 2. Take the result of the Nanosecond function, which returns the offset
- // within the second of the particular unix time instance, to form the
- // decimal part of the output
- // 3. Concatenate them to produce the final result
- seconds := strconv.FormatInt(truncatedDate.Unix(), 10)
- nanosecondsOffset := strconv.FormatFloat(float64(truncatedDate.Nanosecond())/float64(time.Second), 'f', prec, 64)
-
- output := append([]byte(seconds), []byte(nanosecondsOffset)[1:]...)
-
- return output, nil
-}
-
-// UnmarshalJSON is an implementation of the json.RawMessage interface and
-// deserializes a [NumericDate] from a JSON representation, i.e. a
-// [json.Number]. This number represents an UNIX epoch with either integer or
-// non-integer seconds.
-func (date *NumericDate) UnmarshalJSON(b []byte) (err error) {
- var (
- number json.Number
- f float64
- )
-
- if err = json.Unmarshal(b, &number); err != nil {
- return fmt.Errorf("could not parse NumericData: %w", err)
- }
-
- if f, err = number.Float64(); err != nil {
- return fmt.Errorf("could not convert json number value to float: %w", err)
- }
-
- n := newNumericDateFromSeconds(f)
- *date = *n
-
- return nil
-}
-
-// ClaimStrings is basically just a slice of strings, but it can be either
-// serialized from a string array or just a string. This type is necessary,
-// since the "aud" claim can either be a single string or an array.
-type ClaimStrings []string
-
-func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) {
- var value interface{}
-
- if err = json.Unmarshal(data, &value); err != nil {
- return err
- }
-
- var aud []string
-
- switch v := value.(type) {
- case string:
- aud = append(aud, v)
- case []string:
- aud = ClaimStrings(v)
- case []interface{}:
- for _, vv := range v {
- vs, ok := vv.(string)
- if !ok {
- return ErrInvalidType
- }
- aud = append(aud, vs)
- }
- case nil:
- return nil
- default:
- return ErrInvalidType
- }
-
- *s = aud
-
- return
-}
-
-func (s ClaimStrings) MarshalJSON() (b []byte, err error) {
- // This handles a special case in the JWT RFC. If the string array, e.g.
- // used by the "aud" field, only contains one element, it MAY be serialized
- // as a single string. This may or may not be desired based on the ecosystem
- // of other JWT library used, so we make it configurable by the variable
- // MarshalSingleStringAsArray.
- if len(s) == 1 && !MarshalSingleStringAsArray {
- return json.Marshal(s[0])
- }
-
- return json.Marshal([]string(s))
-}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/validator.go b/vendor/github.com/golang-jwt/jwt/v5/validator.go
deleted file mode 100644
index 008ecd87..00000000
--- a/vendor/github.com/golang-jwt/jwt/v5/validator.go
+++ /dev/null
@@ -1,316 +0,0 @@
-package jwt
-
-import (
- "crypto/subtle"
- "fmt"
- "time"
-)
-
-// ClaimsValidator is an interface that can be implemented by custom claims who
-// wish to execute any additional claims validation based on
-// application-specific logic. The Validate function is then executed in
-// addition to the regular claims validation and any error returned is appended
-// to the final validation result.
-//
-// type MyCustomClaims struct {
-// Foo string `json:"foo"`
-// jwt.RegisteredClaims
-// }
-//
-// func (m MyCustomClaims) Validate() error {
-// if m.Foo != "bar" {
-// return errors.New("must be foobar")
-// }
-// return nil
-// }
-type ClaimsValidator interface {
- Claims
- Validate() error
-}
-
-// Validator is the core of the new Validation API. It is automatically used by
-// a [Parser] during parsing and can be modified with various parser options.
-//
-// The [NewValidator] function should be used to create an instance of this
-// struct.
-type Validator struct {
- // leeway is an optional leeway that can be provided to account for clock skew.
- leeway time.Duration
-
- // timeFunc is used to supply the current time that is needed for
- // validation. If unspecified, this defaults to time.Now.
- timeFunc func() time.Time
-
- // requireExp specifies whether the exp claim is required
- requireExp bool
-
- // verifyIat specifies whether the iat (Issued At) claim will be verified.
- // According to https://www.rfc-editor.org/rfc/rfc7519#section-4.1.6 this
- // only specifies the age of the token, but no validation check is
- // necessary. However, if wanted, it can be checked if the iat is
- // unrealistic, i.e., in the future.
- verifyIat bool
-
- // expectedAud contains the audience this token expects. Supplying an empty
- // string will disable aud checking.
- expectedAud string
-
- // expectedIss contains the issuer this token expects. Supplying an empty
- // string will disable iss checking.
- expectedIss string
-
- // expectedSub contains the subject this token expects. Supplying an empty
- // string will disable sub checking.
- expectedSub string
-}
-
-// NewValidator can be used to create a stand-alone validator with the supplied
-// options. This validator can then be used to validate already parsed claims.
-//
-// Note: Under normal circumstances, explicitly creating a validator is not
-// needed and can potentially be dangerous; instead functions of the [Parser]
-// class should be used.
-//
-// The [Validator] is only checking the *validity* of the claims, such as its
-// expiration time, but it does NOT perform *signature verification* of the
-// token.
-func NewValidator(opts ...ParserOption) *Validator {
- p := NewParser(opts...)
- return p.validator
-}
-
-// Validate validates the given claims. It will also perform any custom
-// validation if claims implements the [ClaimsValidator] interface.
-//
-// Note: It will NOT perform any *signature verification* on the token that
-// contains the claims and expects that the [Claim] was already successfully
-// verified.
-func (v *Validator) Validate(claims Claims) error {
- var (
- now time.Time
- errs []error = make([]error, 0, 6)
- err error
- )
-
- // Check, if we have a time func
- if v.timeFunc != nil {
- now = v.timeFunc()
- } else {
- now = time.Now()
- }
-
- // We always need to check the expiration time, but usage of the claim
- // itself is OPTIONAL by default. requireExp overrides this behavior
- // and makes the exp claim mandatory.
- if err = v.verifyExpiresAt(claims, now, v.requireExp); err != nil {
- errs = append(errs, err)
- }
-
- // We always need to check not-before, but usage of the claim itself is
- // OPTIONAL.
- if err = v.verifyNotBefore(claims, now, false); err != nil {
- errs = append(errs, err)
- }
-
- // Check issued-at if the option is enabled
- if v.verifyIat {
- if err = v.verifyIssuedAt(claims, now, false); err != nil {
- errs = append(errs, err)
- }
- }
-
- // If we have an expected audience, we also require the audience claim
- if v.expectedAud != "" {
- if err = v.verifyAudience(claims, v.expectedAud, true); err != nil {
- errs = append(errs, err)
- }
- }
-
- // If we have an expected issuer, we also require the issuer claim
- if v.expectedIss != "" {
- if err = v.verifyIssuer(claims, v.expectedIss, true); err != nil {
- errs = append(errs, err)
- }
- }
-
- // If we have an expected subject, we also require the subject claim
- if v.expectedSub != "" {
- if err = v.verifySubject(claims, v.expectedSub, true); err != nil {
- errs = append(errs, err)
- }
- }
-
- // Finally, we want to give the claim itself some possibility to do some
- // additional custom validation based on a custom Validate function.
- cvt, ok := claims.(ClaimsValidator)
- if ok {
- if err := cvt.Validate(); err != nil {
- errs = append(errs, err)
- }
- }
-
- if len(errs) == 0 {
- return nil
- }
-
- return joinErrors(errs...)
-}
-
-// verifyExpiresAt compares the exp claim in claims against cmp. This function
-// will succeed if cmp < exp. Additional leeway is taken into account.
-//
-// If exp is not set, it will succeed if the claim is not required,
-// otherwise ErrTokenRequiredClaimMissing will be returned.
-//
-// Additionally, if any error occurs while retrieving the claim, e.g., when its
-// the wrong type, an ErrTokenUnverifiable error will be returned.
-func (v *Validator) verifyExpiresAt(claims Claims, cmp time.Time, required bool) error {
- exp, err := claims.GetExpirationTime()
- if err != nil {
- return err
- }
-
- if exp == nil {
- return errorIfRequired(required, "exp")
- }
-
- return errorIfFalse(cmp.Before((exp.Time).Add(+v.leeway)), ErrTokenExpired)
-}
-
-// verifyIssuedAt compares the iat claim in claims against cmp. This function
-// will succeed if cmp >= iat. Additional leeway is taken into account.
-//
-// If iat is not set, it will succeed if the claim is not required,
-// otherwise ErrTokenRequiredClaimMissing will be returned.
-//
-// Additionally, if any error occurs while retrieving the claim, e.g., when its
-// the wrong type, an ErrTokenUnverifiable error will be returned.
-func (v *Validator) verifyIssuedAt(claims Claims, cmp time.Time, required bool) error {
- iat, err := claims.GetIssuedAt()
- if err != nil {
- return err
- }
-
- if iat == nil {
- return errorIfRequired(required, "iat")
- }
-
- return errorIfFalse(!cmp.Before(iat.Add(-v.leeway)), ErrTokenUsedBeforeIssued)
-}
-
-// verifyNotBefore compares the nbf claim in claims against cmp. This function
-// will return true if cmp >= nbf. Additional leeway is taken into account.
-//
-// If nbf is not set, it will succeed if the claim is not required,
-// otherwise ErrTokenRequiredClaimMissing will be returned.
-//
-// Additionally, if any error occurs while retrieving the claim, e.g., when its
-// the wrong type, an ErrTokenUnverifiable error will be returned.
-func (v *Validator) verifyNotBefore(claims Claims, cmp time.Time, required bool) error {
- nbf, err := claims.GetNotBefore()
- if err != nil {
- return err
- }
-
- if nbf == nil {
- return errorIfRequired(required, "nbf")
- }
-
- return errorIfFalse(!cmp.Before(nbf.Add(-v.leeway)), ErrTokenNotValidYet)
-}
-
-// verifyAudience compares the aud claim against cmp.
-//
-// If aud is not set or an empty list, it will succeed if the claim is not required,
-// otherwise ErrTokenRequiredClaimMissing will be returned.
-//
-// Additionally, if any error occurs while retrieving the claim, e.g., when its
-// the wrong type, an ErrTokenUnverifiable error will be returned.
-func (v *Validator) verifyAudience(claims Claims, cmp string, required bool) error {
- aud, err := claims.GetAudience()
- if err != nil {
- return err
- }
-
- if len(aud) == 0 {
- return errorIfRequired(required, "aud")
- }
-
- // use a var here to keep constant time compare when looping over a number of claims
- result := false
-
- var stringClaims string
- for _, a := range aud {
- if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 {
- result = true
- }
- stringClaims = stringClaims + a
- }
-
- // case where "" is sent in one or many aud claims
- if stringClaims == "" {
- return errorIfRequired(required, "aud")
- }
-
- return errorIfFalse(result, ErrTokenInvalidAudience)
-}
-
-// verifyIssuer compares the iss claim in claims against cmp.
-//
-// If iss is not set, it will succeed if the claim is not required,
-// otherwise ErrTokenRequiredClaimMissing will be returned.
-//
-// Additionally, if any error occurs while retrieving the claim, e.g., when its
-// the wrong type, an ErrTokenUnverifiable error will be returned.
-func (v *Validator) verifyIssuer(claims Claims, cmp string, required bool) error {
- iss, err := claims.GetIssuer()
- if err != nil {
- return err
- }
-
- if iss == "" {
- return errorIfRequired(required, "iss")
- }
-
- return errorIfFalse(iss == cmp, ErrTokenInvalidIssuer)
-}
-
-// verifySubject compares the sub claim against cmp.
-//
-// If sub is not set, it will succeed if the claim is not required,
-// otherwise ErrTokenRequiredClaimMissing will be returned.
-//
-// Additionally, if any error occurs while retrieving the claim, e.g., when its
-// the wrong type, an ErrTokenUnverifiable error will be returned.
-func (v *Validator) verifySubject(claims Claims, cmp string, required bool) error {
- sub, err := claims.GetSubject()
- if err != nil {
- return err
- }
-
- if sub == "" {
- return errorIfRequired(required, "sub")
- }
-
- return errorIfFalse(sub == cmp, ErrTokenInvalidSubject)
-}
-
-// errorIfFalse returns the error specified in err, if the value is true.
-// Otherwise, nil is returned.
-func errorIfFalse(value bool, err error) error {
- if value {
- return nil
- } else {
- return err
- }
-}
-
-// errorIfRequired returns an ErrTokenRequiredClaimMissing error if required is
-// true. Otherwise, nil is returned.
-func errorIfRequired(required bool, claim string) error {
- if required {
- return newError(fmt.Sprintf("%s claim is required", claim), ErrTokenRequiredClaimMissing)
- } else {
- return nil
- }
-}
diff --git a/vendor/github.com/golang/groupcache/LICENSE b/vendor/github.com/golang/groupcache/LICENSE
deleted file mode 100644
index 37ec93a1..00000000
--- a/vendor/github.com/golang/groupcache/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-Apache License
-Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and
-distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright
-owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities
-that control, are controlled by, or are under common control with that entity.
-For the purposes of this definition, "control" means (i) the power, direct or
-indirect, to cause the direction or management of such entity, whether by
-contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
-outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising
-permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including
-but not limited to software source code, documentation source, and configuration
-files.
-
-"Object" form shall mean any form resulting from mechanical transformation or
-translation of a Source form, including but not limited to compiled object code,
-generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made
-available under the License, as indicated by a copyright notice that is included
-in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that
-is based on (or derived from) the Work and for which the editorial revisions,
-annotations, elaborations, or other modifications represent, as a whole, an
-original work of authorship. For the purposes of this License, Derivative Works
-shall not include works that remain separable from, or merely link (or bind by
-name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version
-of the Work and any modifications or additions to that Work or Derivative Works
-thereof, that is intentionally submitted to Licensor for inclusion in the Work
-by the copyright owner or by an individual or Legal Entity authorized to submit
-on behalf of the copyright owner. For the purposes of this definition,
-"submitted" means any form of electronic, verbal, or written communication sent
-to the Licensor or its representatives, including but not limited to
-communication on electronic mailing lists, source code control systems, and
-issue tracking systems that are managed by, or on behalf of, the Licensor for
-the purpose of discussing and improving the Work, but excluding communication
-that is conspicuously marked or otherwise designated in writing by the copyright
-owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
-of whom a Contribution has been received by Licensor and subsequently
-incorporated within the Work.
-
-2. Grant of Copyright License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable copyright license to reproduce, prepare Derivative Works of,
-publicly display, publicly perform, sublicense, and distribute the Work and such
-Derivative Works in Source or Object form.
-
-3. Grant of Patent License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable (except as stated in this section) patent license to make, have
-made, use, offer to sell, sell, import, and otherwise transfer the Work, where
-such license applies only to those patent claims licensable by such Contributor
-that are necessarily infringed by their Contribution(s) alone or by combination
-of their Contribution(s) with the Work to which such Contribution(s) was
-submitted. If You institute patent litigation against any entity (including a
-cross-claim or counterclaim in a lawsuit) alleging that the Work or a
-Contribution incorporated within the Work constitutes direct or contributory
-patent infringement, then any patent licenses granted to You under this License
-for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution.
-
-You may reproduce and distribute copies of the Work or Derivative Works thereof
-in any medium, with or without modifications, and in Source or Object form,
-provided that You meet the following conditions:
-
-You must give any other recipients of the Work or Derivative Works a copy of
-this License; and
-You must cause any modified files to carry prominent notices stating that You
-changed the files; and
-You must retain, in the Source form of any Derivative Works that You distribute,
-all copyright, patent, trademark, and attribution notices from the Source form
-of the Work, excluding those notices that do not pertain to any part of the
-Derivative Works; and
-If the Work includes a "NOTICE" text file as part of its distribution, then any
-Derivative Works that You distribute must include a readable copy of the
-attribution notices contained within such NOTICE file, excluding those notices
-that do not pertain to any part of the Derivative Works, in at least one of the
-following places: within a NOTICE text file distributed as part of the
-Derivative Works; within the Source form or documentation, if provided along
-with the Derivative Works; or, within a display generated by the Derivative
-Works, if and wherever such third-party notices normally appear. The contents of
-the NOTICE file are for informational purposes only and do not modify the
-License. You may add Your own attribution notices within Derivative Works that
-You distribute, alongside or as an addendum to the NOTICE text from the Work,
-provided that such additional attribution notices cannot be construed as
-modifying the License.
-You may add Your own copyright statement to Your modifications and may provide
-additional or different license terms and conditions for use, reproduction, or
-distribution of Your modifications, or for any such Derivative Works as a whole,
-provided Your use, reproduction, and distribution of the Work otherwise complies
-with the conditions stated in this License.
-
-5. Submission of Contributions.
-
-Unless You explicitly state otherwise, any Contribution intentionally submitted
-for inclusion in the Work by You to the Licensor shall be under the terms and
-conditions of this License, without any additional terms or conditions.
-Notwithstanding the above, nothing herein shall supersede or modify the terms of
-any separate license agreement you may have executed with Licensor regarding
-such Contributions.
-
-6. Trademarks.
-
-This License does not grant permission to use the trade names, trademarks,
-service marks, or product names of the Licensor, except as required for
-reasonable and customary use in describing the origin of the Work and
-reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty.
-
-Unless required by applicable law or agreed to in writing, Licensor provides the
-Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
-including, without limitation, any warranties or conditions of TITLE,
-NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
-solely responsible for determining the appropriateness of using or
-redistributing the Work and assume any risks associated with Your exercise of
-permissions under this License.
-
-8. Limitation of Liability.
-
-In no event and under no legal theory, whether in tort (including negligence),
-contract, or otherwise, unless required by applicable law (such as deliberate
-and grossly negligent acts) or agreed to in writing, shall any Contributor be
-liable to You for damages, including any direct, indirect, special, incidental,
-or consequential damages of any character arising as a result of this License or
-out of the use or inability to use the Work (including but not limited to
-damages for loss of goodwill, work stoppage, computer failure or malfunction, or
-any and all other commercial damages or losses), even if such Contributor has
-been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability.
-
-While redistributing the Work or Derivative Works thereof, You may choose to
-offer, and charge a fee for, acceptance of support, warranty, indemnity, or
-other liability obligations and/or rights consistent with this License. However,
-in accepting such obligations, You may act only on Your own behalf and on Your
-sole responsibility, not on behalf of any other Contributor, and only if You
-agree to indemnify, defend, and hold each Contributor harmless for any liability
-incurred by, or claims asserted against, such Contributor by reason of your
-accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work
-
-To apply the Apache License to your work, attach the following boilerplate
-notice, with the fields enclosed by brackets "[]" replaced with your own
-identifying information. (Don't include the brackets!) The text should be
-enclosed in the appropriate comment syntax for the file format. We also
-recommend that a file or class name and description of purpose be included on
-the same "printed page" as the copyright notice for easier identification within
-third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go
deleted file mode 100644
index eac1c766..00000000
--- a/vendor/github.com/golang/groupcache/lru/lru.go
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
-Copyright 2013 Google Inc.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-// Package lru implements an LRU cache.
-package lru
-
-import "container/list"
-
-// Cache is an LRU cache. It is not safe for concurrent access.
-type Cache struct {
- // MaxEntries is the maximum number of cache entries before
- // an item is evicted. Zero means no limit.
- MaxEntries int
-
- // OnEvicted optionally specifies a callback function to be
- // executed when an entry is purged from the cache.
- OnEvicted func(key Key, value interface{})
-
- ll *list.List
- cache map[interface{}]*list.Element
-}
-
-// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators
-type Key interface{}
-
-type entry struct {
- key Key
- value interface{}
-}
-
-// New creates a new Cache.
-// If maxEntries is zero, the cache has no limit and it's assumed
-// that eviction is done by the caller.
-func New(maxEntries int) *Cache {
- return &Cache{
- MaxEntries: maxEntries,
- ll: list.New(),
- cache: make(map[interface{}]*list.Element),
- }
-}
-
-// Add adds a value to the cache.
-func (c *Cache) Add(key Key, value interface{}) {
- if c.cache == nil {
- c.cache = make(map[interface{}]*list.Element)
- c.ll = list.New()
- }
- if ee, ok := c.cache[key]; ok {
- c.ll.MoveToFront(ee)
- ee.Value.(*entry).value = value
- return
- }
- ele := c.ll.PushFront(&entry{key, value})
- c.cache[key] = ele
- if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries {
- c.RemoveOldest()
- }
-}
-
-// Get looks up a key's value from the cache.
-func (c *Cache) Get(key Key) (value interface{}, ok bool) {
- if c.cache == nil {
- return
- }
- if ele, hit := c.cache[key]; hit {
- c.ll.MoveToFront(ele)
- return ele.Value.(*entry).value, true
- }
- return
-}
-
-// Remove removes the provided key from the cache.
-func (c *Cache) Remove(key Key) {
- if c.cache == nil {
- return
- }
- if ele, hit := c.cache[key]; hit {
- c.removeElement(ele)
- }
-}
-
-// RemoveOldest removes the oldest item from the cache.
-func (c *Cache) RemoveOldest() {
- if c.cache == nil {
- return
- }
- ele := c.ll.Back()
- if ele != nil {
- c.removeElement(ele)
- }
-}
-
-func (c *Cache) removeElement(e *list.Element) {
- c.ll.Remove(e)
- kv := e.Value.(*entry)
- delete(c.cache, kv.key)
- if c.OnEvicted != nil {
- c.OnEvicted(kv.key, kv.value)
- }
-}
-
-// Len returns the number of items in the cache.
-func (c *Cache) Len() int {
- if c.cache == nil {
- return 0
- }
- return c.ll.Len()
-}
-
-// Clear purges all stored items from the cache.
-func (c *Cache) Clear() {
- if c.OnEvicted != nil {
- for _, e := range c.cache {
- kv := e.Value.(*entry)
- c.OnEvicted(kv.key, kv.value)
- }
- }
- c.ll = nil
- c.cache = nil
-}
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
deleted file mode 100644
index 15167cd7..00000000
--- a/vendor/github.com/golang/protobuf/AUTHORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code refers to The Go Authors for copyright purposes.
-# The master list of authors is in the main Go distribution,
-# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
deleted file mode 100644
index 1c4577e9..00000000
--- a/vendor/github.com/golang/protobuf/CONTRIBUTORS
+++ /dev/null
@@ -1,3 +0,0 @@
-# This source code was written by the Go contributors.
-# The master list of contributors is in the main Go distribution,
-# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
deleted file mode 100644
index 0f646931..00000000
--- a/vendor/github.com/golang/protobuf/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright 2010 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go
deleted file mode 100644
index e810e6fe..00000000
--- a/vendor/github.com/golang/protobuf/proto/buffer.go
+++ /dev/null
@@ -1,324 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "errors"
- "fmt"
-
- "google.golang.org/protobuf/encoding/prototext"
- "google.golang.org/protobuf/encoding/protowire"
- "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-const (
- WireVarint = 0
- WireFixed32 = 5
- WireFixed64 = 1
- WireBytes = 2
- WireStartGroup = 3
- WireEndGroup = 4
-)
-
-// EncodeVarint returns the varint encoded bytes of v.
-func EncodeVarint(v uint64) []byte {
- return protowire.AppendVarint(nil, v)
-}
-
-// SizeVarint returns the length of the varint encoded bytes of v.
-// This is equal to len(EncodeVarint(v)).
-func SizeVarint(v uint64) int {
- return protowire.SizeVarint(v)
-}
-
-// DecodeVarint parses a varint encoded integer from b,
-// returning the integer value and the length of the varint.
-// It returns (0, 0) if there is a parse error.
-func DecodeVarint(b []byte) (uint64, int) {
- v, n := protowire.ConsumeVarint(b)
- if n < 0 {
- return 0, 0
- }
- return v, n
-}
-
-// Buffer is a buffer for encoding and decoding the protobuf wire format.
-// It may be reused between invocations to reduce memory usage.
-type Buffer struct {
- buf []byte
- idx int
- deterministic bool
-}
-
-// NewBuffer allocates a new Buffer initialized with buf,
-// where the contents of buf are considered the unread portion of the buffer.
-func NewBuffer(buf []byte) *Buffer {
- return &Buffer{buf: buf}
-}
-
-// SetDeterministic specifies whether to use deterministic serialization.
-//
-// Deterministic serialization guarantees that for a given binary, equal
-// messages will always be serialized to the same bytes. This implies:
-//
-// - Repeated serialization of a message will return the same bytes.
-// - Different processes of the same binary (which may be executing on
-// different machines) will serialize equal messages to the same bytes.
-//
-// Note that the deterministic serialization is NOT canonical across
-// languages. It is not guaranteed to remain stable over time. It is unstable
-// across different builds with schema changes due to unknown fields.
-// Users who need canonical serialization (e.g., persistent storage in a
-// canonical form, fingerprinting, etc.) should define their own
-// canonicalization specification and implement their own serializer rather
-// than relying on this API.
-//
-// If deterministic serialization is requested, map entries will be sorted
-// by keys in lexographical order. This is an implementation detail and
-// subject to change.
-func (b *Buffer) SetDeterministic(deterministic bool) {
- b.deterministic = deterministic
-}
-
-// SetBuf sets buf as the internal buffer,
-// where the contents of buf are considered the unread portion of the buffer.
-func (b *Buffer) SetBuf(buf []byte) {
- b.buf = buf
- b.idx = 0
-}
-
-// Reset clears the internal buffer of all written and unread data.
-func (b *Buffer) Reset() {
- b.buf = b.buf[:0]
- b.idx = 0
-}
-
-// Bytes returns the internal buffer.
-func (b *Buffer) Bytes() []byte {
- return b.buf
-}
-
-// Unread returns the unread portion of the buffer.
-func (b *Buffer) Unread() []byte {
- return b.buf[b.idx:]
-}
-
-// Marshal appends the wire-format encoding of m to the buffer.
-func (b *Buffer) Marshal(m Message) error {
- var err error
- b.buf, err = marshalAppend(b.buf, m, b.deterministic)
- return err
-}
-
-// Unmarshal parses the wire-format message in the buffer and
-// places the decoded results in m.
-// It does not reset m before unmarshaling.
-func (b *Buffer) Unmarshal(m Message) error {
- err := UnmarshalMerge(b.Unread(), m)
- b.idx = len(b.buf)
- return err
-}
-
-type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
-
-func (m *unknownFields) String() string { panic("not implemented") }
-func (m *unknownFields) Reset() { panic("not implemented") }
-func (m *unknownFields) ProtoMessage() { panic("not implemented") }
-
-// DebugPrint dumps the encoded bytes of b with a header and footer including s
-// to stdout. This is only intended for debugging.
-func (*Buffer) DebugPrint(s string, b []byte) {
- m := MessageReflect(new(unknownFields))
- m.SetUnknown(b)
- b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
- fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
-}
-
-// EncodeVarint appends an unsigned varint encoding to the buffer.
-func (b *Buffer) EncodeVarint(v uint64) error {
- b.buf = protowire.AppendVarint(b.buf, v)
- return nil
-}
-
-// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
-func (b *Buffer) EncodeZigzag32(v uint64) error {
- return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
-}
-
-// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
-func (b *Buffer) EncodeZigzag64(v uint64) error {
- return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
-}
-
-// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
-func (b *Buffer) EncodeFixed32(v uint64) error {
- b.buf = protowire.AppendFixed32(b.buf, uint32(v))
- return nil
-}
-
-// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
-func (b *Buffer) EncodeFixed64(v uint64) error {
- b.buf = protowire.AppendFixed64(b.buf, uint64(v))
- return nil
-}
-
-// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
-func (b *Buffer) EncodeRawBytes(v []byte) error {
- b.buf = protowire.AppendBytes(b.buf, v)
- return nil
-}
-
-// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
-// It does not validate whether v contains valid UTF-8.
-func (b *Buffer) EncodeStringBytes(v string) error {
- b.buf = protowire.AppendString(b.buf, v)
- return nil
-}
-
-// EncodeMessage appends a length-prefixed encoded message to the buffer.
-func (b *Buffer) EncodeMessage(m Message) error {
- var err error
- b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
- b.buf, err = marshalAppend(b.buf, m, b.deterministic)
- return err
-}
-
-// DecodeVarint consumes an encoded unsigned varint from the buffer.
-func (b *Buffer) DecodeVarint() (uint64, error) {
- v, n := protowire.ConsumeVarint(b.buf[b.idx:])
- if n < 0 {
- return 0, protowire.ParseError(n)
- }
- b.idx += n
- return uint64(v), nil
-}
-
-// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
-func (b *Buffer) DecodeZigzag32() (uint64, error) {
- v, err := b.DecodeVarint()
- if err != nil {
- return 0, err
- }
- return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
-}
-
-// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
-func (b *Buffer) DecodeZigzag64() (uint64, error) {
- v, err := b.DecodeVarint()
- if err != nil {
- return 0, err
- }
- return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
-}
-
-// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
-func (b *Buffer) DecodeFixed32() (uint64, error) {
- v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
- if n < 0 {
- return 0, protowire.ParseError(n)
- }
- b.idx += n
- return uint64(v), nil
-}
-
-// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
-func (b *Buffer) DecodeFixed64() (uint64, error) {
- v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
- if n < 0 {
- return 0, protowire.ParseError(n)
- }
- b.idx += n
- return uint64(v), nil
-}
-
-// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
-// If alloc is specified, it returns a copy the raw bytes
-// rather than a sub-slice of the buffer.
-func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
- v, n := protowire.ConsumeBytes(b.buf[b.idx:])
- if n < 0 {
- return nil, protowire.ParseError(n)
- }
- b.idx += n
- if alloc {
- v = append([]byte(nil), v...)
- }
- return v, nil
-}
-
-// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
-// It does not validate whether the raw bytes contain valid UTF-8.
-func (b *Buffer) DecodeStringBytes() (string, error) {
- v, n := protowire.ConsumeString(b.buf[b.idx:])
- if n < 0 {
- return "", protowire.ParseError(n)
- }
- b.idx += n
- return v, nil
-}
-
-// DecodeMessage consumes a length-prefixed message from the buffer.
-// It does not reset m before unmarshaling.
-func (b *Buffer) DecodeMessage(m Message) error {
- v, err := b.DecodeRawBytes(false)
- if err != nil {
- return err
- }
- return UnmarshalMerge(v, m)
-}
-
-// DecodeGroup consumes a message group from the buffer.
-// It assumes that the start group marker has already been consumed and
-// consumes all bytes until (and including the end group marker).
-// It does not reset m before unmarshaling.
-func (b *Buffer) DecodeGroup(m Message) error {
- v, n, err := consumeGroup(b.buf[b.idx:])
- if err != nil {
- return err
- }
- b.idx += n
- return UnmarshalMerge(v, m)
-}
-
-// consumeGroup parses b until it finds an end group marker, returning
-// the raw bytes of the message (excluding the end group marker) and the
-// the total length of the message (including the end group marker).
-func consumeGroup(b []byte) ([]byte, int, error) {
- b0 := b
- depth := 1 // assume this follows a start group marker
- for {
- _, wtyp, tagLen := protowire.ConsumeTag(b)
- if tagLen < 0 {
- return nil, 0, protowire.ParseError(tagLen)
- }
- b = b[tagLen:]
-
- var valLen int
- switch wtyp {
- case protowire.VarintType:
- _, valLen = protowire.ConsumeVarint(b)
- case protowire.Fixed32Type:
- _, valLen = protowire.ConsumeFixed32(b)
- case protowire.Fixed64Type:
- _, valLen = protowire.ConsumeFixed64(b)
- case protowire.BytesType:
- _, valLen = protowire.ConsumeBytes(b)
- case protowire.StartGroupType:
- depth++
- case protowire.EndGroupType:
- depth--
- default:
- return nil, 0, errors.New("proto: cannot parse reserved wire type")
- }
- if valLen < 0 {
- return nil, 0, protowire.ParseError(valLen)
- }
- b = b[valLen:]
-
- if depth == 0 {
- return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
- }
- }
-}
diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go
deleted file mode 100644
index d399bf06..00000000
--- a/vendor/github.com/golang/protobuf/proto/defaults.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "google.golang.org/protobuf/reflect/protoreflect"
-)
-
-// SetDefaults sets unpopulated scalar fields to their default values.
-// Fields within a oneof are not set even if they have a default value.
-// SetDefaults is recursively called upon any populated message fields.
-func SetDefaults(m Message) {
- if m != nil {
- setDefaults(MessageReflect(m))
- }
-}
-
-func setDefaults(m protoreflect.Message) {
- fds := m.Descriptor().Fields()
- for i := 0; i < fds.Len(); i++ {
- fd := fds.Get(i)
- if !m.Has(fd) {
- if fd.HasDefault() && fd.ContainingOneof() == nil {
- v := fd.Default()
- if fd.Kind() == protoreflect.BytesKind {
- v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
- }
- m.Set(fd, v)
- }
- continue
- }
- }
-
- m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
- switch {
- // Handle singular message.
- case fd.Cardinality() != protoreflect.Repeated:
- if fd.Message() != nil {
- setDefaults(m.Get(fd).Message())
- }
- // Handle list of messages.
- case fd.IsList():
- if fd.Message() != nil {
- ls := m.Get(fd).List()
- for i := 0; i < ls.Len(); i++ {
- setDefaults(ls.Get(i).Message())
- }
- }
- // Handle map of messages.
- case fd.IsMap():
- if fd.MapValue().Message() != nil {
- ms := m.Get(fd).Map()
- ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
- setDefaults(v.Message())
- return true
- })
- }
- }
- return true
- })
-}
diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go
deleted file mode 100644
index e8db57e0..00000000
--- a/vendor/github.com/golang/protobuf/proto/deprecated.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "strconv"
-
- protoV2 "google.golang.org/protobuf/proto"
-)
-
-var (
- // Deprecated: No longer returned.
- ErrNil = errors.New("proto: Marshal called with nil")
-
- // Deprecated: No longer returned.
- ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
-
- // Deprecated: No longer returned.
- ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
-)
-
-// Deprecated: Do not use.
-type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
-
-// Deprecated: Do not use.
-func GetStats() Stats { return Stats{} }
-
-// Deprecated: Do not use.
-func MarshalMessageSet(interface{}) ([]byte, error) {
- return nil, errors.New("proto: not implemented")
-}
-
-// Deprecated: Do not use.
-func UnmarshalMessageSet([]byte, interface{}) error {
- return errors.New("proto: not implemented")
-}
-
-// Deprecated: Do not use.
-func MarshalMessageSetJSON(interface{}) ([]byte, error) {
- return nil, errors.New("proto: not implemented")
-}
-
-// Deprecated: Do not use.
-func UnmarshalMessageSetJSON([]byte, interface{}) error {
- return errors.New("proto: not implemented")
-}
-
-// Deprecated: Do not use.
-func RegisterMessageSetType(Message, int32, string) {}
-
-// Deprecated: Do not use.
-func EnumName(m map[int32]string, v int32) string {
- s, ok := m[v]
- if ok {
- return s
- }
- return strconv.Itoa(int(v))
-}
-
-// Deprecated: Do not use.
-func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
- if data[0] == '"' {
- // New style: enums are strings.
- var repr string
- if err := json.Unmarshal(data, &repr); err != nil {
- return -1, err
- }
- val, ok := m[repr]
- if !ok {
- return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
- }
- return val, nil
- }
- // Old style: enums are ints.
- var val int32
- if err := json.Unmarshal(data, &val); err != nil {
- return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
- }
- return val, nil
-}
-
-// Deprecated: Do not use; this type existed for intenal-use only.
-type InternalMessageInfo struct{}
-
-// Deprecated: Do not use; this method existed for intenal-use only.
-func (*InternalMessageInfo) DiscardUnknown(m Message) {
- DiscardUnknown(m)
-}
-
-// Deprecated: Do not use; this method existed for intenal-use only.
-func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
- return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
-}
-
-// Deprecated: Do not use; this method existed for intenal-use only.
-func (*InternalMessageInfo) Merge(dst, src Message) {
- protoV2.Merge(MessageV2(dst), MessageV2(src))
-}
-
-// Deprecated: Do not use; this method existed for intenal-use only.
-func (*InternalMessageInfo) Size(m Message) int {
- return protoV2.Size(MessageV2(m))
-}
-
-// Deprecated: Do not use; this method existed for intenal-use only.
-func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
- return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
-}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
deleted file mode 100644
index 2187e877..00000000
--- a/vendor/github.com/golang/protobuf/proto/discard.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "google.golang.org/protobuf/reflect/protoreflect"
-)
-
-// DiscardUnknown recursively discards all unknown fields from this message
-// and all embedded messages.
-//
-// When unmarshaling a message with unrecognized fields, the tags and values
-// of such fields are preserved in the Message. This allows a later call to
-// marshal to be able to produce a message that continues to have those
-// unrecognized fields. To avoid this, DiscardUnknown is used to
-// explicitly clear the unknown fields after unmarshaling.
-func DiscardUnknown(m Message) {
- if m != nil {
- discardUnknown(MessageReflect(m))
- }
-}
-
-func discardUnknown(m protoreflect.Message) {
- m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
- switch {
- // Handle singular message.
- case fd.Cardinality() != protoreflect.Repeated:
- if fd.Message() != nil {
- discardUnknown(m.Get(fd).Message())
- }
- // Handle list of messages.
- case fd.IsList():
- if fd.Message() != nil {
- ls := m.Get(fd).List()
- for i := 0; i < ls.Len(); i++ {
- discardUnknown(ls.Get(i).Message())
- }
- }
- // Handle map of messages.
- case fd.IsMap():
- if fd.MapValue().Message() != nil {
- ms := m.Get(fd).Map()
- ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
- discardUnknown(v.Message())
- return true
- })
- }
- }
- return true
- })
-
- // Discard unknown fields.
- if len(m.GetUnknown()) > 0 {
- m.SetUnknown(nil)
- }
-}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
deleted file mode 100644
index 42fc120c..00000000
--- a/vendor/github.com/golang/protobuf/proto/extensions.go
+++ /dev/null
@@ -1,356 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "errors"
- "fmt"
- "reflect"
-
- "google.golang.org/protobuf/encoding/protowire"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
- "google.golang.org/protobuf/runtime/protoiface"
- "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-type (
- // ExtensionDesc represents an extension descriptor and
- // is used to interact with an extension field in a message.
- //
- // Variables of this type are generated in code by protoc-gen-go.
- ExtensionDesc = protoimpl.ExtensionInfo
-
- // ExtensionRange represents a range of message extensions.
- // Used in code generated by protoc-gen-go.
- ExtensionRange = protoiface.ExtensionRangeV1
-
- // Deprecated: Do not use; this is an internal type.
- Extension = protoimpl.ExtensionFieldV1
-
- // Deprecated: Do not use; this is an internal type.
- XXX_InternalExtensions = protoimpl.ExtensionFields
-)
-
-// ErrMissingExtension reports whether the extension was not present.
-var ErrMissingExtension = errors.New("proto: missing extension")
-
-var errNotExtendable = errors.New("proto: not an extendable proto.Message")
-
-// HasExtension reports whether the extension field is present in m
-// either as an explicitly populated field or as an unknown field.
-func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return false
- }
-
- // Check whether any populated known field matches the field number.
- xtd := xt.TypeDescriptor()
- if isValidExtension(mr.Descriptor(), xtd) {
- has = mr.Has(xtd)
- } else {
- mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
- has = int32(fd.Number()) == xt.Field
- return !has
- })
- }
-
- // Check whether any unknown field matches the field number.
- for b := mr.GetUnknown(); !has && len(b) > 0; {
- num, _, n := protowire.ConsumeField(b)
- has = int32(num) == xt.Field
- b = b[n:]
- }
- return has
-}
-
-// ClearExtension removes the extension field from m
-// either as an explicitly populated field or as an unknown field.
-func ClearExtension(m Message, xt *ExtensionDesc) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return
- }
-
- xtd := xt.TypeDescriptor()
- if isValidExtension(mr.Descriptor(), xtd) {
- mr.Clear(xtd)
- } else {
- mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
- if int32(fd.Number()) == xt.Field {
- mr.Clear(fd)
- return false
- }
- return true
- })
- }
- clearUnknown(mr, fieldNum(xt.Field))
-}
-
-// ClearAllExtensions clears all extensions from m.
-// This includes populated fields and unknown fields in the extension range.
-func ClearAllExtensions(m Message) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return
- }
-
- mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
- if fd.IsExtension() {
- mr.Clear(fd)
- }
- return true
- })
- clearUnknown(mr, mr.Descriptor().ExtensionRanges())
-}
-
-// GetExtension retrieves a proto2 extended field from m.
-//
-// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
-// then GetExtension parses the encoded field and returns a Go value of the specified type.
-// If the field is not present, then the default value is returned (if one is specified),
-// otherwise ErrMissingExtension is reported.
-//
-// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
-// then GetExtension returns the raw encoded bytes for the extension field.
-func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
- return nil, errNotExtendable
- }
-
- // Retrieve the unknown fields for this extension field.
- var bo protoreflect.RawFields
- for bi := mr.GetUnknown(); len(bi) > 0; {
- num, _, n := protowire.ConsumeField(bi)
- if int32(num) == xt.Field {
- bo = append(bo, bi[:n]...)
- }
- bi = bi[n:]
- }
-
- // For type incomplete descriptors, only retrieve the unknown fields.
- if xt.ExtensionType == nil {
- return []byte(bo), nil
- }
-
- // If the extension field only exists as unknown fields, unmarshal it.
- // This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
- xtd := xt.TypeDescriptor()
- if !isValidExtension(mr.Descriptor(), xtd) {
- return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
- }
- if !mr.Has(xtd) && len(bo) > 0 {
- m2 := mr.New()
- if err := (proto.UnmarshalOptions{
- Resolver: extensionResolver{xt},
- }.Unmarshal(bo, m2.Interface())); err != nil {
- return nil, err
- }
- if m2.Has(xtd) {
- mr.Set(xtd, m2.Get(xtd))
- clearUnknown(mr, fieldNum(xt.Field))
- }
- }
-
- // Check whether the message has the extension field set or a default.
- var pv protoreflect.Value
- switch {
- case mr.Has(xtd):
- pv = mr.Get(xtd)
- case xtd.HasDefault():
- pv = xtd.Default()
- default:
- return nil, ErrMissingExtension
- }
-
- v := xt.InterfaceOf(pv)
- rv := reflect.ValueOf(v)
- if isScalarKind(rv.Kind()) {
- rv2 := reflect.New(rv.Type())
- rv2.Elem().Set(rv)
- v = rv2.Interface()
- }
- return v, nil
-}
-
-// extensionResolver is a custom extension resolver that stores a single
-// extension type that takes precedence over the global registry.
-type extensionResolver struct{ xt protoreflect.ExtensionType }
-
-func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
- if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
- return r.xt, nil
- }
- return protoregistry.GlobalTypes.FindExtensionByName(field)
-}
-
-func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
- if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
- return r.xt, nil
- }
- return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
-}
-
-// GetExtensions returns a list of the extensions values present in m,
-// corresponding with the provided list of extension descriptors, xts.
-// If an extension is missing in m, the corresponding value is nil.
-func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return nil, errNotExtendable
- }
-
- vs := make([]interface{}, len(xts))
- for i, xt := range xts {
- v, err := GetExtension(m, xt)
- if err != nil {
- if err == ErrMissingExtension {
- continue
- }
- return vs, err
- }
- vs[i] = v
- }
- return vs, nil
-}
-
-// SetExtension sets an extension field in m to the provided value.
-func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
- return errNotExtendable
- }
-
- rv := reflect.ValueOf(v)
- if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
- return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
- }
- if rv.Kind() == reflect.Ptr {
- if rv.IsNil() {
- return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
- }
- if isScalarKind(rv.Elem().Kind()) {
- v = rv.Elem().Interface()
- }
- }
-
- xtd := xt.TypeDescriptor()
- if !isValidExtension(mr.Descriptor(), xtd) {
- return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
- }
- mr.Set(xtd, xt.ValueOf(v))
- clearUnknown(mr, fieldNum(xt.Field))
- return nil
-}
-
-// SetRawExtension inserts b into the unknown fields of m.
-//
-// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
-func SetRawExtension(m Message, fnum int32, b []byte) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return
- }
-
- // Verify that the raw field is valid.
- for b0 := b; len(b0) > 0; {
- num, _, n := protowire.ConsumeField(b0)
- if int32(num) != fnum {
- panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
- }
- b0 = b0[n:]
- }
-
- ClearExtension(m, &ExtensionDesc{Field: fnum})
- mr.SetUnknown(append(mr.GetUnknown(), b...))
-}
-
-// ExtensionDescs returns a list of extension descriptors found in m,
-// containing descriptors for both populated extension fields in m and
-// also unknown fields of m that are in the extension range.
-// For the later case, an type incomplete descriptor is provided where only
-// the ExtensionDesc.Field field is populated.
-// The order of the extension descriptors is undefined.
-func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
- return nil, errNotExtendable
- }
-
- // Collect a set of known extension descriptors.
- extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
- mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
- if fd.IsExtension() {
- xt := fd.(protoreflect.ExtensionTypeDescriptor)
- if xd, ok := xt.Type().(*ExtensionDesc); ok {
- extDescs[fd.Number()] = xd
- }
- }
- return true
- })
-
- // Collect a set of unknown extension descriptors.
- extRanges := mr.Descriptor().ExtensionRanges()
- for b := mr.GetUnknown(); len(b) > 0; {
- num, _, n := protowire.ConsumeField(b)
- if extRanges.Has(num) && extDescs[num] == nil {
- extDescs[num] = nil
- }
- b = b[n:]
- }
-
- // Transpose the set of descriptors into a list.
- var xts []*ExtensionDesc
- for num, xt := range extDescs {
- if xt == nil {
- xt = &ExtensionDesc{Field: int32(num)}
- }
- xts = append(xts, xt)
- }
- return xts, nil
-}
-
-// isValidExtension reports whether xtd is a valid extension descriptor for md.
-func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
- return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
-}
-
-// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
-// This function exists for historical reasons since the representation of
-// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
-func isScalarKind(k reflect.Kind) bool {
- switch k {
- case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
- return true
- default:
- return false
- }
-}
-
-// clearUnknown removes unknown fields from m where remover.Has reports true.
-func clearUnknown(m protoreflect.Message, remover interface {
- Has(protoreflect.FieldNumber) bool
-}) {
- var bo protoreflect.RawFields
- for bi := m.GetUnknown(); len(bi) > 0; {
- num, _, n := protowire.ConsumeField(bi)
- if !remover.Has(num) {
- bo = append(bo, bi[:n]...)
- }
- bi = bi[n:]
- }
- if bi := m.GetUnknown(); len(bi) != len(bo) {
- m.SetUnknown(bo)
- }
-}
-
-type fieldNum protoreflect.FieldNumber
-
-func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
- return protoreflect.FieldNumber(n1) == n2
-}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
deleted file mode 100644
index dcdc2202..00000000
--- a/vendor/github.com/golang/protobuf/proto/properties.go
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "fmt"
- "reflect"
- "strconv"
- "strings"
- "sync"
-
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-// StructProperties represents protocol buffer type information for a
-// generated protobuf message in the open-struct API.
-//
-// Deprecated: Do not use.
-type StructProperties struct {
- // Prop are the properties for each field.
- //
- // Fields belonging to a oneof are stored in OneofTypes instead, with a
- // single Properties representing the parent oneof held here.
- //
- // The order of Prop matches the order of fields in the Go struct.
- // Struct fields that are not related to protobufs have a "XXX_" prefix
- // in the Properties.Name and must be ignored by the user.
- Prop []*Properties
-
- // OneofTypes contains information about the oneof fields in this message.
- // It is keyed by the protobuf field name.
- OneofTypes map[string]*OneofProperties
-}
-
-// Properties represents the type information for a protobuf message field.
-//
-// Deprecated: Do not use.
-type Properties struct {
- // Name is a placeholder name with little meaningful semantic value.
- // If the name has an "XXX_" prefix, the entire Properties must be ignored.
- Name string
- // OrigName is the protobuf field name or oneof name.
- OrigName string
- // JSONName is the JSON name for the protobuf field.
- JSONName string
- // Enum is a placeholder name for enums.
- // For historical reasons, this is neither the Go name for the enum,
- // nor the protobuf name for the enum.
- Enum string // Deprecated: Do not use.
- // Weak contains the full name of the weakly referenced message.
- Weak string
- // Wire is a string representation of the wire type.
- Wire string
- // WireType is the protobuf wire type for the field.
- WireType int
- // Tag is the protobuf field number.
- Tag int
- // Required reports whether this is a required field.
- Required bool
- // Optional reports whether this is a optional field.
- Optional bool
- // Repeated reports whether this is a repeated field.
- Repeated bool
- // Packed reports whether this is a packed repeated field of scalars.
- Packed bool
- // Proto3 reports whether this field operates under the proto3 syntax.
- Proto3 bool
- // Oneof reports whether this field belongs within a oneof.
- Oneof bool
-
- // Default is the default value in string form.
- Default string
- // HasDefault reports whether the field has a default value.
- HasDefault bool
-
- // MapKeyProp is the properties for the key field for a map field.
- MapKeyProp *Properties
- // MapValProp is the properties for the value field for a map field.
- MapValProp *Properties
-}
-
-// OneofProperties represents the type information for a protobuf oneof.
-//
-// Deprecated: Do not use.
-type OneofProperties struct {
- // Type is a pointer to the generated wrapper type for the field value.
- // This is nil for messages that are not in the open-struct API.
- Type reflect.Type
- // Field is the index into StructProperties.Prop for the containing oneof.
- Field int
- // Prop is the properties for the field.
- Prop *Properties
-}
-
-// String formats the properties in the protobuf struct field tag style.
-func (p *Properties) String() string {
- s := p.Wire
- s += "," + strconv.Itoa(p.Tag)
- if p.Required {
- s += ",req"
- }
- if p.Optional {
- s += ",opt"
- }
- if p.Repeated {
- s += ",rep"
- }
- if p.Packed {
- s += ",packed"
- }
- s += ",name=" + p.OrigName
- if p.JSONName != "" {
- s += ",json=" + p.JSONName
- }
- if len(p.Enum) > 0 {
- s += ",enum=" + p.Enum
- }
- if len(p.Weak) > 0 {
- s += ",weak=" + p.Weak
- }
- if p.Proto3 {
- s += ",proto3"
- }
- if p.Oneof {
- s += ",oneof"
- }
- if p.HasDefault {
- s += ",def=" + p.Default
- }
- return s
-}
-
-// Parse populates p by parsing a string in the protobuf struct field tag style.
-func (p *Properties) Parse(tag string) {
- // For example: "bytes,49,opt,name=foo,def=hello!"
- for len(tag) > 0 {
- i := strings.IndexByte(tag, ',')
- if i < 0 {
- i = len(tag)
- }
- switch s := tag[:i]; {
- case strings.HasPrefix(s, "name="):
- p.OrigName = s[len("name="):]
- case strings.HasPrefix(s, "json="):
- p.JSONName = s[len("json="):]
- case strings.HasPrefix(s, "enum="):
- p.Enum = s[len("enum="):]
- case strings.HasPrefix(s, "weak="):
- p.Weak = s[len("weak="):]
- case strings.Trim(s, "0123456789") == "":
- n, _ := strconv.ParseUint(s, 10, 32)
- p.Tag = int(n)
- case s == "opt":
- p.Optional = true
- case s == "req":
- p.Required = true
- case s == "rep":
- p.Repeated = true
- case s == "varint" || s == "zigzag32" || s == "zigzag64":
- p.Wire = s
- p.WireType = WireVarint
- case s == "fixed32":
- p.Wire = s
- p.WireType = WireFixed32
- case s == "fixed64":
- p.Wire = s
- p.WireType = WireFixed64
- case s == "bytes":
- p.Wire = s
- p.WireType = WireBytes
- case s == "group":
- p.Wire = s
- p.WireType = WireStartGroup
- case s == "packed":
- p.Packed = true
- case s == "proto3":
- p.Proto3 = true
- case s == "oneof":
- p.Oneof = true
- case strings.HasPrefix(s, "def="):
- // The default tag is special in that everything afterwards is the
- // default regardless of the presence of commas.
- p.HasDefault = true
- p.Default, i = tag[len("def="):], len(tag)
- }
- tag = strings.TrimPrefix(tag[i:], ",")
- }
-}
-
-// Init populates the properties from a protocol buffer struct tag.
-//
-// Deprecated: Do not use.
-func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
- p.Name = name
- p.OrigName = name
- if tag == "" {
- return
- }
- p.Parse(tag)
-
- if typ != nil && typ.Kind() == reflect.Map {
- p.MapKeyProp = new(Properties)
- p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
- p.MapValProp = new(Properties)
- p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
- }
-}
-
-var propertiesCache sync.Map // map[reflect.Type]*StructProperties
-
-// GetProperties returns the list of properties for the type represented by t,
-// which must be a generated protocol buffer message in the open-struct API,
-// where protobuf message fields are represented by exported Go struct fields.
-//
-// Deprecated: Use protobuf reflection instead.
-func GetProperties(t reflect.Type) *StructProperties {
- if p, ok := propertiesCache.Load(t); ok {
- return p.(*StructProperties)
- }
- p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
- return p.(*StructProperties)
-}
-
-func newProperties(t reflect.Type) *StructProperties {
- if t.Kind() != reflect.Struct {
- panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
- }
-
- var hasOneof bool
- prop := new(StructProperties)
-
- // Construct a list of properties for each field in the struct.
- for i := 0; i < t.NumField(); i++ {
- p := new(Properties)
- f := t.Field(i)
- tagField := f.Tag.Get("protobuf")
- p.Init(f.Type, f.Name, tagField, &f)
-
- tagOneof := f.Tag.Get("protobuf_oneof")
- if tagOneof != "" {
- hasOneof = true
- p.OrigName = tagOneof
- }
-
- // Rename unrelated struct fields with the "XXX_" prefix since so much
- // user code simply checks for this to exclude special fields.
- if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
- p.Name = "XXX_" + p.Name
- p.OrigName = "XXX_" + p.OrigName
- } else if p.Weak != "" {
- p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
- }
-
- prop.Prop = append(prop.Prop, p)
- }
-
- // Construct a mapping of oneof field names to properties.
- if hasOneof {
- var oneofWrappers []interface{}
- if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
- oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
- }
- if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
- oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
- }
- if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
- if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
- oneofWrappers = m.ProtoMessageInfo().OneofWrappers
- }
- }
-
- prop.OneofTypes = make(map[string]*OneofProperties)
- for _, wrapper := range oneofWrappers {
- p := &OneofProperties{
- Type: reflect.ValueOf(wrapper).Type(), // *T
- Prop: new(Properties),
- }
- f := p.Type.Elem().Field(0)
- p.Prop.Name = f.Name
- p.Prop.Parse(f.Tag.Get("protobuf"))
-
- // Determine the struct field that contains this oneof.
- // Each wrapper is assignable to exactly one parent field.
- var foundOneof bool
- for i := 0; i < t.NumField() && !foundOneof; i++ {
- if p.Type.AssignableTo(t.Field(i).Type) {
- p.Field = i
- foundOneof = true
- }
- }
- if !foundOneof {
- panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
- }
- prop.OneofTypes[p.Prop.OrigName] = p
- }
- }
-
- return prop
-}
-
-func (sp *StructProperties) Len() int { return len(sp.Prop) }
-func (sp *StructProperties) Less(i, j int) bool { return false }
-func (sp *StructProperties) Swap(i, j int) { return }
diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go
deleted file mode 100644
index 5aee89c3..00000000
--- a/vendor/github.com/golang/protobuf/proto/proto.go
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package proto provides functionality for handling protocol buffer messages.
-// In particular, it provides marshaling and unmarshaling between a protobuf
-// message and the binary wire format.
-//
-// See https://developers.google.com/protocol-buffers/docs/gotutorial for
-// more information.
-//
-// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
-package proto
-
-import (
- protoV2 "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/runtime/protoiface"
- "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-const (
- ProtoPackageIsVersion1 = true
- ProtoPackageIsVersion2 = true
- ProtoPackageIsVersion3 = true
- ProtoPackageIsVersion4 = true
-)
-
-// GeneratedEnum is any enum type generated by protoc-gen-go
-// which is a named int32 kind.
-// This type exists for documentation purposes.
-type GeneratedEnum interface{}
-
-// GeneratedMessage is any message type generated by protoc-gen-go
-// which is a pointer to a named struct kind.
-// This type exists for documentation purposes.
-type GeneratedMessage interface{}
-
-// Message is a protocol buffer message.
-//
-// This is the v1 version of the message interface and is marginally better
-// than an empty interface as it lacks any method to programatically interact
-// with the contents of the message.
-//
-// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
-// exposes protobuf reflection as a first-class feature of the interface.
-//
-// To convert a v1 message to a v2 message, use the MessageV2 function.
-// To convert a v2 message to a v1 message, use the MessageV1 function.
-type Message = protoiface.MessageV1
-
-// MessageV1 converts either a v1 or v2 message to a v1 message.
-// It returns nil if m is nil.
-func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
- return protoimpl.X.ProtoMessageV1Of(m)
-}
-
-// MessageV2 converts either a v1 or v2 message to a v2 message.
-// It returns nil if m is nil.
-func MessageV2(m GeneratedMessage) protoV2.Message {
- return protoimpl.X.ProtoMessageV2Of(m)
-}
-
-// MessageReflect returns a reflective view for a message.
-// It returns nil if m is nil.
-func MessageReflect(m Message) protoreflect.Message {
- return protoimpl.X.MessageOf(m)
-}
-
-// Marshaler is implemented by messages that can marshal themselves.
-// This interface is used by the following functions: Size, Marshal,
-// Buffer.Marshal, and Buffer.EncodeMessage.
-//
-// Deprecated: Do not implement.
-type Marshaler interface {
- // Marshal formats the encoded bytes of the message.
- // It should be deterministic and emit valid protobuf wire data.
- // The caller takes ownership of the returned buffer.
- Marshal() ([]byte, error)
-}
-
-// Unmarshaler is implemented by messages that can unmarshal themselves.
-// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
-// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
-//
-// Deprecated: Do not implement.
-type Unmarshaler interface {
- // Unmarshal parses the encoded bytes of the protobuf wire input.
- // The provided buffer is only valid for during method call.
- // It should not reset the receiver message.
- Unmarshal([]byte) error
-}
-
-// Merger is implemented by messages that can merge themselves.
-// This interface is used by the following functions: Clone and Merge.
-//
-// Deprecated: Do not implement.
-type Merger interface {
- // Merge merges the contents of src into the receiver message.
- // It clones all data structures in src such that it aliases no mutable
- // memory referenced by src.
- Merge(src Message)
-}
-
-// RequiredNotSetError is an error type returned when
-// marshaling or unmarshaling a message with missing required fields.
-type RequiredNotSetError struct {
- err error
-}
-
-func (e *RequiredNotSetError) Error() string {
- if e.err != nil {
- return e.err.Error()
- }
- return "proto: required field not set"
-}
-func (e *RequiredNotSetError) RequiredNotSet() bool {
- return true
-}
-
-func checkRequiredNotSet(m protoV2.Message) error {
- if err := protoV2.CheckInitialized(m); err != nil {
- return &RequiredNotSetError{err: err}
- }
- return nil
-}
-
-// Clone returns a deep copy of src.
-func Clone(src Message) Message {
- return MessageV1(protoV2.Clone(MessageV2(src)))
-}
-
-// Merge merges src into dst, which must be messages of the same type.
-//
-// Populated scalar fields in src are copied to dst, while populated
-// singular messages in src are merged into dst by recursively calling Merge.
-// The elements of every list field in src is appended to the corresponded
-// list fields in dst. The entries of every map field in src is copied into
-// the corresponding map field in dst, possibly replacing existing entries.
-// The unknown fields of src are appended to the unknown fields of dst.
-func Merge(dst, src Message) {
- protoV2.Merge(MessageV2(dst), MessageV2(src))
-}
-
-// Equal reports whether two messages are equal.
-// If two messages marshal to the same bytes under deterministic serialization,
-// then Equal is guaranteed to report true.
-//
-// Two messages are equal if they are the same protobuf message type,
-// have the same set of populated known and extension field values,
-// and the same set of unknown fields values.
-//
-// Scalar values are compared with the equivalent of the == operator in Go,
-// except bytes values which are compared using bytes.Equal and
-// floating point values which specially treat NaNs as equal.
-// Message values are compared by recursively calling Equal.
-// Lists are equal if each element value is also equal.
-// Maps are equal if they have the same set of keys, where the pair of values
-// for each key is also equal.
-func Equal(x, y Message) bool {
- return protoV2.Equal(MessageV2(x), MessageV2(y))
-}
-
-func isMessageSet(md protoreflect.MessageDescriptor) bool {
- ms, ok := md.(interface{ IsMessageSet() bool })
- return ok && ms.IsMessageSet()
-}
diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go
deleted file mode 100644
index 066b4323..00000000
--- a/vendor/github.com/golang/protobuf/proto/registry.go
+++ /dev/null
@@ -1,317 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "bytes"
- "compress/gzip"
- "fmt"
- "io/ioutil"
- "reflect"
- "strings"
- "sync"
-
- "google.golang.org/protobuf/reflect/protodesc"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
- "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-// filePath is the path to the proto source file.
-type filePath = string // e.g., "google/protobuf/descriptor.proto"
-
-// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
-type fileDescGZIP = []byte
-
-var fileCache sync.Map // map[filePath]fileDescGZIP
-
-// RegisterFile is called from generated code to register the compressed
-// FileDescriptorProto with the file path for a proto source file.
-//
-// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
-func RegisterFile(s filePath, d fileDescGZIP) {
- // Decompress the descriptor.
- zr, err := gzip.NewReader(bytes.NewReader(d))
- if err != nil {
- panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
- }
- b, err := ioutil.ReadAll(zr)
- if err != nil {
- panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
- }
-
- // Construct a protoreflect.FileDescriptor from the raw descriptor.
- // Note that DescBuilder.Build automatically registers the constructed
- // file descriptor with the v2 registry.
- protoimpl.DescBuilder{RawDescriptor: b}.Build()
-
- // Locally cache the raw descriptor form for the file.
- fileCache.Store(s, d)
-}
-
-// FileDescriptor returns the compressed FileDescriptorProto given the file path
-// for a proto source file. It returns nil if not found.
-//
-// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
-func FileDescriptor(s filePath) fileDescGZIP {
- if v, ok := fileCache.Load(s); ok {
- return v.(fileDescGZIP)
- }
-
- // Find the descriptor in the v2 registry.
- var b []byte
- if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
- b, _ = Marshal(protodesc.ToFileDescriptorProto(fd))
- }
-
- // Locally cache the raw descriptor form for the file.
- if len(b) > 0 {
- v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
- return v.(fileDescGZIP)
- }
- return nil
-}
-
-// enumName is the name of an enum. For historical reasons, the enum name is
-// neither the full Go name nor the full protobuf name of the enum.
-// The name is the dot-separated combination of just the proto package that the
-// enum is declared within followed by the Go type name of the generated enum.
-type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
-
-// enumsByName maps enum values by name to their numeric counterpart.
-type enumsByName = map[string]int32
-
-// enumsByNumber maps enum values by number to their name counterpart.
-type enumsByNumber = map[int32]string
-
-var enumCache sync.Map // map[enumName]enumsByName
-var numFilesCache sync.Map // map[protoreflect.FullName]int
-
-// RegisterEnum is called from the generated code to register the mapping of
-// enum value names to enum numbers for the enum identified by s.
-//
-// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
-func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
- if _, ok := enumCache.Load(s); ok {
- panic("proto: duplicate enum registered: " + s)
- }
- enumCache.Store(s, m)
-
- // This does not forward registration to the v2 registry since this API
- // lacks sufficient information to construct a complete v2 enum descriptor.
-}
-
-// EnumValueMap returns the mapping from enum value names to enum numbers for
-// the enum of the given name. It returns nil if not found.
-//
-// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
-func EnumValueMap(s enumName) enumsByName {
- if v, ok := enumCache.Load(s); ok {
- return v.(enumsByName)
- }
-
- // Check whether the cache is stale. If the number of files in the current
- // package differs, then it means that some enums may have been recently
- // registered upstream that we do not know about.
- var protoPkg protoreflect.FullName
- if i := strings.LastIndexByte(s, '.'); i >= 0 {
- protoPkg = protoreflect.FullName(s[:i])
- }
- v, _ := numFilesCache.Load(protoPkg)
- numFiles, _ := v.(int)
- if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
- return nil // cache is up-to-date; was not found earlier
- }
-
- // Update the enum cache for all enums declared in the given proto package.
- numFiles = 0
- protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
- walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
- name := protoimpl.X.LegacyEnumName(ed)
- if _, ok := enumCache.Load(name); !ok {
- m := make(enumsByName)
- evs := ed.Values()
- for i := evs.Len() - 1; i >= 0; i-- {
- ev := evs.Get(i)
- m[string(ev.Name())] = int32(ev.Number())
- }
- enumCache.LoadOrStore(name, m)
- }
- })
- numFiles++
- return true
- })
- numFilesCache.Store(protoPkg, numFiles)
-
- // Check cache again for enum map.
- if v, ok := enumCache.Load(s); ok {
- return v.(enumsByName)
- }
- return nil
-}
-
-// walkEnums recursively walks all enums declared in d.
-func walkEnums(d interface {
- Enums() protoreflect.EnumDescriptors
- Messages() protoreflect.MessageDescriptors
-}, f func(protoreflect.EnumDescriptor)) {
- eds := d.Enums()
- for i := eds.Len() - 1; i >= 0; i-- {
- f(eds.Get(i))
- }
- mds := d.Messages()
- for i := mds.Len() - 1; i >= 0; i-- {
- walkEnums(mds.Get(i), f)
- }
-}
-
-// messageName is the full name of protobuf message.
-type messageName = string
-
-var messageTypeCache sync.Map // map[messageName]reflect.Type
-
-// RegisterType is called from generated code to register the message Go type
-// for a message of the given name.
-//
-// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
-func RegisterType(m Message, s messageName) {
- mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
- if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
- panic(err)
- }
- messageTypeCache.Store(s, reflect.TypeOf(m))
-}
-
-// RegisterMapType is called from generated code to register the Go map type
-// for a protobuf message representing a map entry.
-//
-// Deprecated: Do not use.
-func RegisterMapType(m interface{}, s messageName) {
- t := reflect.TypeOf(m)
- if t.Kind() != reflect.Map {
- panic(fmt.Sprintf("invalid map kind: %v", t))
- }
- if _, ok := messageTypeCache.Load(s); ok {
- panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
- }
- messageTypeCache.Store(s, t)
-}
-
-// MessageType returns the message type for a named message.
-// It returns nil if not found.
-//
-// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
-func MessageType(s messageName) reflect.Type {
- if v, ok := messageTypeCache.Load(s); ok {
- return v.(reflect.Type)
- }
-
- // Derive the message type from the v2 registry.
- var t reflect.Type
- if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
- t = messageGoType(mt)
- }
-
- // If we could not get a concrete type, it is possible that it is a
- // pseudo-message for a map entry.
- if t == nil {
- d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
- if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
- kt := goTypeForField(md.Fields().ByNumber(1))
- vt := goTypeForField(md.Fields().ByNumber(2))
- t = reflect.MapOf(kt, vt)
- }
- }
-
- // Locally cache the message type for the given name.
- if t != nil {
- v, _ := messageTypeCache.LoadOrStore(s, t)
- return v.(reflect.Type)
- }
- return nil
-}
-
-func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
- switch k := fd.Kind(); k {
- case protoreflect.EnumKind:
- if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
- return enumGoType(et)
- }
- return reflect.TypeOf(protoreflect.EnumNumber(0))
- case protoreflect.MessageKind, protoreflect.GroupKind:
- if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
- return messageGoType(mt)
- }
- return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
- default:
- return reflect.TypeOf(fd.Default().Interface())
- }
-}
-
-func enumGoType(et protoreflect.EnumType) reflect.Type {
- return reflect.TypeOf(et.New(0))
-}
-
-func messageGoType(mt protoreflect.MessageType) reflect.Type {
- return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
-}
-
-// MessageName returns the full protobuf name for the given message type.
-//
-// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
-func MessageName(m Message) messageName {
- if m == nil {
- return ""
- }
- if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
- return m.XXX_MessageName()
- }
- return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
-}
-
-// RegisterExtension is called from the generated code to register
-// the extension descriptor.
-//
-// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
-func RegisterExtension(d *ExtensionDesc) {
- if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
- panic(err)
- }
-}
-
-type extensionsByNumber = map[int32]*ExtensionDesc
-
-var extensionCache sync.Map // map[messageName]extensionsByNumber
-
-// RegisteredExtensions returns a map of the registered extensions for the
-// provided protobuf message, indexed by the extension field number.
-//
-// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
-func RegisteredExtensions(m Message) extensionsByNumber {
- // Check whether the cache is stale. If the number of extensions for
- // the given message differs, then it means that some extensions were
- // recently registered upstream that we do not know about.
- s := MessageName(m)
- v, _ := extensionCache.Load(s)
- xs, _ := v.(extensionsByNumber)
- if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
- return xs // cache is up-to-date
- }
-
- // Cache is stale, re-compute the extensions map.
- xs = make(extensionsByNumber)
- protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
- if xd, ok := xt.(*ExtensionDesc); ok {
- xs[int32(xt.TypeDescriptor().Number())] = xd
- } else {
- // TODO: This implies that the protoreflect.ExtensionType is a
- // custom type not generated by protoc-gen-go. We could try and
- // convert the type to an ExtensionDesc.
- }
- return true
- })
- extensionCache.Store(s, xs)
- return xs
-}
diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go
deleted file mode 100644
index 47eb3e44..00000000
--- a/vendor/github.com/golang/protobuf/proto/text_decode.go
+++ /dev/null
@@ -1,801 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "encoding"
- "errors"
- "fmt"
- "reflect"
- "strconv"
- "strings"
- "unicode/utf8"
-
- "google.golang.org/protobuf/encoding/prototext"
- protoV2 "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
-)
-
-const wrapTextUnmarshalV2 = false
-
-// ParseError is returned by UnmarshalText.
-type ParseError struct {
- Message string
-
- // Deprecated: Do not use.
- Line, Offset int
-}
-
-func (e *ParseError) Error() string {
- if wrapTextUnmarshalV2 {
- return e.Message
- }
- if e.Line == 1 {
- return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
- }
- return fmt.Sprintf("line %d: %v", e.Line, e.Message)
-}
-
-// UnmarshalText parses a proto text formatted string into m.
-func UnmarshalText(s string, m Message) error {
- if u, ok := m.(encoding.TextUnmarshaler); ok {
- return u.UnmarshalText([]byte(s))
- }
-
- m.Reset()
- mi := MessageV2(m)
-
- if wrapTextUnmarshalV2 {
- err := prototext.UnmarshalOptions{
- AllowPartial: true,
- }.Unmarshal([]byte(s), mi)
- if err != nil {
- return &ParseError{Message: err.Error()}
- }
- return checkRequiredNotSet(mi)
- } else {
- if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
- return err
- }
- return checkRequiredNotSet(mi)
- }
-}
-
-type textParser struct {
- s string // remaining input
- done bool // whether the parsing is finished (success or error)
- backed bool // whether back() was called
- offset, line int
- cur token
-}
-
-type token struct {
- value string
- err *ParseError
- line int // line number
- offset int // byte number from start of input, not start of line
- unquoted string // the unquoted version of value, if it was a quoted string
-}
-
-func newTextParser(s string) *textParser {
- p := new(textParser)
- p.s = s
- p.line = 1
- p.cur.line = 1
- return p
-}
-
-func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
- md := m.Descriptor()
- fds := md.Fields()
-
- // A struct is a sequence of "name: value", terminated by one of
- // '>' or '}', or the end of the input. A name may also be
- // "[extension]" or "[type/url]".
- //
- // The whole struct can also be an expanded Any message, like:
- // [type/url] < ... struct contents ... >
- seen := make(map[protoreflect.FieldNumber]bool)
- for {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value == terminator {
- break
- }
- if tok.value == "[" {
- if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
- return err
- }
- continue
- }
-
- // This is a normal, non-extension field.
- name := protoreflect.Name(tok.value)
- fd := fds.ByName(name)
- switch {
- case fd == nil:
- gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
- if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
- fd = gd
- }
- case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
- fd = nil
- case fd.IsWeak() && fd.Message().IsPlaceholder():
- fd = nil
- }
- if fd == nil {
- typeName := string(md.FullName())
- if m, ok := m.Interface().(Message); ok {
- t := reflect.TypeOf(m)
- if t.Kind() == reflect.Ptr {
- typeName = t.Elem().String()
- }
- }
- return p.errorf("unknown field name %q in %v", name, typeName)
- }
- if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
- return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
- }
- if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
- return p.errorf("non-repeated field %q was repeated", fd.Name())
- }
- seen[fd.Number()] = true
-
- // Consume any colon.
- if err := p.checkForColon(fd); err != nil {
- return err
- }
-
- // Parse into the field.
- v := m.Get(fd)
- if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
- v = m.Mutable(fd)
- }
- if v, err = p.unmarshalValue(v, fd); err != nil {
- return err
- }
- m.Set(fd, v)
-
- if err := p.consumeOptionalSeparator(); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
- name, err := p.consumeExtensionOrAnyName()
- if err != nil {
- return err
- }
-
- // If it contains a slash, it's an Any type URL.
- if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- // consume an optional colon
- if tok.value == ":" {
- tok = p.next()
- if tok.err != nil {
- return tok.err
- }
- }
-
- var terminator string
- switch tok.value {
- case "<":
- terminator = ">"
- case "{":
- terminator = "}"
- default:
- return p.errorf("expected '{' or '<', found %q", tok.value)
- }
-
- mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
- if err != nil {
- return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
- }
- m2 := mt.New()
- if err := p.unmarshalMessage(m2, terminator); err != nil {
- return err
- }
- b, err := protoV2.Marshal(m2.Interface())
- if err != nil {
- return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
- }
-
- urlFD := m.Descriptor().Fields().ByName("type_url")
- valFD := m.Descriptor().Fields().ByName("value")
- if seen[urlFD.Number()] {
- return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
- }
- if seen[valFD.Number()] {
- return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
- }
- m.Set(urlFD, protoreflect.ValueOfString(name))
- m.Set(valFD, protoreflect.ValueOfBytes(b))
- seen[urlFD.Number()] = true
- seen[valFD.Number()] = true
- return nil
- }
-
- xname := protoreflect.FullName(name)
- xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
- if xt == nil && isMessageSet(m.Descriptor()) {
- xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
- }
- if xt == nil {
- return p.errorf("unrecognized extension %q", name)
- }
- fd := xt.TypeDescriptor()
- if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
- return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
- }
-
- if err := p.checkForColon(fd); err != nil {
- return err
- }
-
- v := m.Get(fd)
- if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
- v = m.Mutable(fd)
- }
- v, err = p.unmarshalValue(v, fd)
- if err != nil {
- return err
- }
- m.Set(fd, v)
- return p.consumeOptionalSeparator()
-}
-
-func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
- tok := p.next()
- if tok.err != nil {
- return v, tok.err
- }
- if tok.value == "" {
- return v, p.errorf("unexpected EOF")
- }
-
- switch {
- case fd.IsList():
- lv := v.List()
- var err error
- if tok.value == "[" {
- // Repeated field with list notation, like [1,2,3].
- for {
- vv := lv.NewElement()
- vv, err = p.unmarshalSingularValue(vv, fd)
- if err != nil {
- return v, err
- }
- lv.Append(vv)
-
- tok := p.next()
- if tok.err != nil {
- return v, tok.err
- }
- if tok.value == "]" {
- break
- }
- if tok.value != "," {
- return v, p.errorf("Expected ']' or ',' found %q", tok.value)
- }
- }
- return v, nil
- }
-
- // One value of the repeated field.
- p.back()
- vv := lv.NewElement()
- vv, err = p.unmarshalSingularValue(vv, fd)
- if err != nil {
- return v, err
- }
- lv.Append(vv)
- return v, nil
- case fd.IsMap():
- // The map entry should be this sequence of tokens:
- // < key : KEY value : VALUE >
- // However, implementations may omit key or value, and technically
- // we should support them in any order.
- var terminator string
- switch tok.value {
- case "<":
- terminator = ">"
- case "{":
- terminator = "}"
- default:
- return v, p.errorf("expected '{' or '<', found %q", tok.value)
- }
-
- keyFD := fd.MapKey()
- valFD := fd.MapValue()
-
- mv := v.Map()
- kv := keyFD.Default()
- vv := mv.NewValue()
- for {
- tok := p.next()
- if tok.err != nil {
- return v, tok.err
- }
- if tok.value == terminator {
- break
- }
- var err error
- switch tok.value {
- case "key":
- if err := p.consumeToken(":"); err != nil {
- return v, err
- }
- if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
- return v, err
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return v, err
- }
- case "value":
- if err := p.checkForColon(valFD); err != nil {
- return v, err
- }
- if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
- return v, err
- }
- if err := p.consumeOptionalSeparator(); err != nil {
- return v, err
- }
- default:
- p.back()
- return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
- }
- }
- mv.Set(kv.MapKey(), vv)
- return v, nil
- default:
- p.back()
- return p.unmarshalSingularValue(v, fd)
- }
-}
-
-func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
- tok := p.next()
- if tok.err != nil {
- return v, tok.err
- }
- if tok.value == "" {
- return v, p.errorf("unexpected EOF")
- }
-
- switch fd.Kind() {
- case protoreflect.BoolKind:
- switch tok.value {
- case "true", "1", "t", "True":
- return protoreflect.ValueOfBool(true), nil
- case "false", "0", "f", "False":
- return protoreflect.ValueOfBool(false), nil
- }
- case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
- if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
- return protoreflect.ValueOfInt32(int32(x)), nil
- }
-
- // The C++ parser accepts large positive hex numbers that uses
- // two's complement arithmetic to represent negative numbers.
- // This feature is here for backwards compatibility with C++.
- if strings.HasPrefix(tok.value, "0x") {
- if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
- return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
- }
- }
- case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
- if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
- return protoreflect.ValueOfInt64(int64(x)), nil
- }
-
- // The C++ parser accepts large positive hex numbers that uses
- // two's complement arithmetic to represent negative numbers.
- // This feature is here for backwards compatibility with C++.
- if strings.HasPrefix(tok.value, "0x") {
- if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
- return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
- }
- }
- case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
- if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
- return protoreflect.ValueOfUint32(uint32(x)), nil
- }
- case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
- if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
- return protoreflect.ValueOfUint64(uint64(x)), nil
- }
- case protoreflect.FloatKind:
- // Ignore 'f' for compatibility with output generated by C++,
- // but don't remove 'f' when the value is "-inf" or "inf".
- v := tok.value
- if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
- v = v[:len(v)-len("f")]
- }
- if x, err := strconv.ParseFloat(v, 32); err == nil {
- return protoreflect.ValueOfFloat32(float32(x)), nil
- }
- case protoreflect.DoubleKind:
- // Ignore 'f' for compatibility with output generated by C++,
- // but don't remove 'f' when the value is "-inf" or "inf".
- v := tok.value
- if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
- v = v[:len(v)-len("f")]
- }
- if x, err := strconv.ParseFloat(v, 64); err == nil {
- return protoreflect.ValueOfFloat64(float64(x)), nil
- }
- case protoreflect.StringKind:
- if isQuote(tok.value[0]) {
- return protoreflect.ValueOfString(tok.unquoted), nil
- }
- case protoreflect.BytesKind:
- if isQuote(tok.value[0]) {
- return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
- }
- case protoreflect.EnumKind:
- if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
- return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
- }
- vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
- if vd != nil {
- return protoreflect.ValueOfEnum(vd.Number()), nil
- }
- case protoreflect.MessageKind, protoreflect.GroupKind:
- var terminator string
- switch tok.value {
- case "{":
- terminator = "}"
- case "<":
- terminator = ">"
- default:
- return v, p.errorf("expected '{' or '<', found %q", tok.value)
- }
- err := p.unmarshalMessage(v.Message(), terminator)
- return v, err
- default:
- panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
- }
- return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
-}
-
-// Consume a ':' from the input stream (if the next token is a colon),
-// returning an error if a colon is needed but not present.
-func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != ":" {
- if fd.Message() == nil {
- return p.errorf("expected ':', found %q", tok.value)
- }
- p.back()
- }
- return nil
-}
-
-// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
-// the following ']'. It returns the name or URL consumed.
-func (p *textParser) consumeExtensionOrAnyName() (string, error) {
- tok := p.next()
- if tok.err != nil {
- return "", tok.err
- }
-
- // If extension name or type url is quoted, it's a single token.
- if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
- name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
- if err != nil {
- return "", err
- }
- return name, p.consumeToken("]")
- }
-
- // Consume everything up to "]"
- var parts []string
- for tok.value != "]" {
- parts = append(parts, tok.value)
- tok = p.next()
- if tok.err != nil {
- return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
- }
- if p.done && tok.value != "]" {
- return "", p.errorf("unclosed type_url or extension name")
- }
- }
- return strings.Join(parts, ""), nil
-}
-
-// consumeOptionalSeparator consumes an optional semicolon or comma.
-// It is used in unmarshalMessage to provide backward compatibility.
-func (p *textParser) consumeOptionalSeparator() error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != ";" && tok.value != "," {
- p.back()
- }
- return nil
-}
-
-func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
- pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
- p.cur.err = pe
- p.done = true
- return pe
-}
-
-func (p *textParser) skipWhitespace() {
- i := 0
- for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
- if p.s[i] == '#' {
- // comment; skip to end of line or input
- for i < len(p.s) && p.s[i] != '\n' {
- i++
- }
- if i == len(p.s) {
- break
- }
- }
- if p.s[i] == '\n' {
- p.line++
- }
- i++
- }
- p.offset += i
- p.s = p.s[i:len(p.s)]
- if len(p.s) == 0 {
- p.done = true
- }
-}
-
-func (p *textParser) advance() {
- // Skip whitespace
- p.skipWhitespace()
- if p.done {
- return
- }
-
- // Start of non-whitespace
- p.cur.err = nil
- p.cur.offset, p.cur.line = p.offset, p.line
- p.cur.unquoted = ""
- switch p.s[0] {
- case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
- // Single symbol
- p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
- case '"', '\'':
- // Quoted string
- i := 1
- for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
- if p.s[i] == '\\' && i+1 < len(p.s) {
- // skip escaped char
- i++
- }
- i++
- }
- if i >= len(p.s) || p.s[i] != p.s[0] {
- p.errorf("unmatched quote")
- return
- }
- unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
- if err != nil {
- p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
- return
- }
- p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
- p.cur.unquoted = unq
- default:
- i := 0
- for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
- i++
- }
- if i == 0 {
- p.errorf("unexpected byte %#x", p.s[0])
- return
- }
- p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
- }
- p.offset += len(p.cur.value)
-}
-
-// Back off the parser by one token. Can only be done between calls to next().
-// It makes the next advance() a no-op.
-func (p *textParser) back() { p.backed = true }
-
-// Advances the parser and returns the new current token.
-func (p *textParser) next() *token {
- if p.backed || p.done {
- p.backed = false
- return &p.cur
- }
- p.advance()
- if p.done {
- p.cur.value = ""
- } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
- // Look for multiple quoted strings separated by whitespace,
- // and concatenate them.
- cat := p.cur
- for {
- p.skipWhitespace()
- if p.done || !isQuote(p.s[0]) {
- break
- }
- p.advance()
- if p.cur.err != nil {
- return &p.cur
- }
- cat.value += " " + p.cur.value
- cat.unquoted += p.cur.unquoted
- }
- p.done = false // parser may have seen EOF, but we want to return cat
- p.cur = cat
- }
- return &p.cur
-}
-
-func (p *textParser) consumeToken(s string) error {
- tok := p.next()
- if tok.err != nil {
- return tok.err
- }
- if tok.value != s {
- p.back()
- return p.errorf("expected %q, found %q", s, tok.value)
- }
- return nil
-}
-
-var errBadUTF8 = errors.New("proto: bad UTF-8")
-
-func unquoteC(s string, quote rune) (string, error) {
- // This is based on C++'s tokenizer.cc.
- // Despite its name, this is *not* parsing C syntax.
- // For instance, "\0" is an invalid quoted string.
-
- // Avoid allocation in trivial cases.
- simple := true
- for _, r := range s {
- if r == '\\' || r == quote {
- simple = false
- break
- }
- }
- if simple {
- return s, nil
- }
-
- buf := make([]byte, 0, 3*len(s)/2)
- for len(s) > 0 {
- r, n := utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && n == 1 {
- return "", errBadUTF8
- }
- s = s[n:]
- if r != '\\' {
- if r < utf8.RuneSelf {
- buf = append(buf, byte(r))
- } else {
- buf = append(buf, string(r)...)
- }
- continue
- }
-
- ch, tail, err := unescape(s)
- if err != nil {
- return "", err
- }
- buf = append(buf, ch...)
- s = tail
- }
- return string(buf), nil
-}
-
-func unescape(s string) (ch string, tail string, err error) {
- r, n := utf8.DecodeRuneInString(s)
- if r == utf8.RuneError && n == 1 {
- return "", "", errBadUTF8
- }
- s = s[n:]
- switch r {
- case 'a':
- return "\a", s, nil
- case 'b':
- return "\b", s, nil
- case 'f':
- return "\f", s, nil
- case 'n':
- return "\n", s, nil
- case 'r':
- return "\r", s, nil
- case 't':
- return "\t", s, nil
- case 'v':
- return "\v", s, nil
- case '?':
- return "?", s, nil // trigraph workaround
- case '\'', '"', '\\':
- return string(r), s, nil
- case '0', '1', '2', '3', '4', '5', '6', '7':
- if len(s) < 2 {
- return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
- }
- ss := string(r) + s[:2]
- s = s[2:]
- i, err := strconv.ParseUint(ss, 8, 8)
- if err != nil {
- return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
- }
- return string([]byte{byte(i)}), s, nil
- case 'x', 'X', 'u', 'U':
- var n int
- switch r {
- case 'x', 'X':
- n = 2
- case 'u':
- n = 4
- case 'U':
- n = 8
- }
- if len(s) < n {
- return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
- }
- ss := s[:n]
- s = s[n:]
- i, err := strconv.ParseUint(ss, 16, 64)
- if err != nil {
- return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
- }
- if r == 'x' || r == 'X' {
- return string([]byte{byte(i)}), s, nil
- }
- if i > utf8.MaxRune {
- return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
- }
- return string(rune(i)), s, nil
- }
- return "", "", fmt.Errorf(`unknown escape \%c`, r)
-}
-
-func isIdentOrNumberChar(c byte) bool {
- switch {
- case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
- return true
- case '0' <= c && c <= '9':
- return true
- }
- switch c {
- case '-', '+', '.', '_':
- return true
- }
- return false
-}
-
-func isWhitespace(c byte) bool {
- switch c {
- case ' ', '\t', '\n', '\r':
- return true
- }
- return false
-}
-
-func isQuote(c byte) bool {
- switch c {
- case '"', '\'':
- return true
- }
- return false
-}
diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go
deleted file mode 100644
index a31134ee..00000000
--- a/vendor/github.com/golang/protobuf/proto/text_encode.go
+++ /dev/null
@@ -1,560 +0,0 @@
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- "bytes"
- "encoding"
- "fmt"
- "io"
- "math"
- "sort"
- "strings"
-
- "google.golang.org/protobuf/encoding/prototext"
- "google.golang.org/protobuf/encoding/protowire"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
-)
-
-const wrapTextMarshalV2 = false
-
-// TextMarshaler is a configurable text format marshaler.
-type TextMarshaler struct {
- Compact bool // use compact text format (one line)
- ExpandAny bool // expand google.protobuf.Any messages of known types
-}
-
-// Marshal writes the proto text format of m to w.
-func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
- b, err := tm.marshal(m)
- if len(b) > 0 {
- if _, err := w.Write(b); err != nil {
- return err
- }
- }
- return err
-}
-
-// Text returns a proto text formatted string of m.
-func (tm *TextMarshaler) Text(m Message) string {
- b, _ := tm.marshal(m)
- return string(b)
-}
-
-func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
- mr := MessageReflect(m)
- if mr == nil || !mr.IsValid() {
- return []byte(""), nil
- }
-
- if wrapTextMarshalV2 {
- if m, ok := m.(encoding.TextMarshaler); ok {
- return m.MarshalText()
- }
-
- opts := prototext.MarshalOptions{
- AllowPartial: true,
- EmitUnknown: true,
- }
- if !tm.Compact {
- opts.Indent = " "
- }
- if !tm.ExpandAny {
- opts.Resolver = (*protoregistry.Types)(nil)
- }
- return opts.Marshal(mr.Interface())
- } else {
- w := &textWriter{
- compact: tm.Compact,
- expandAny: tm.ExpandAny,
- complete: true,
- }
-
- if m, ok := m.(encoding.TextMarshaler); ok {
- b, err := m.MarshalText()
- if err != nil {
- return nil, err
- }
- w.Write(b)
- return w.buf, nil
- }
-
- err := w.writeMessage(mr)
- return w.buf, err
- }
-}
-
-var (
- defaultTextMarshaler = TextMarshaler{}
- compactTextMarshaler = TextMarshaler{Compact: true}
-)
-
-// MarshalText writes the proto text format of m to w.
-func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
-
-// MarshalTextString returns a proto text formatted string of m.
-func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
-
-// CompactText writes the compact proto text format of m to w.
-func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
-
-// CompactTextString returns a compact proto text formatted string of m.
-func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
-
-var (
- newline = []byte("\n")
- endBraceNewline = []byte("}\n")
- posInf = []byte("inf")
- negInf = []byte("-inf")
- nan = []byte("nan")
-)
-
-// textWriter is an io.Writer that tracks its indentation level.
-type textWriter struct {
- compact bool // same as TextMarshaler.Compact
- expandAny bool // same as TextMarshaler.ExpandAny
- complete bool // whether the current position is a complete line
- indent int // indentation level; never negative
- buf []byte
-}
-
-func (w *textWriter) Write(p []byte) (n int, _ error) {
- newlines := bytes.Count(p, newline)
- if newlines == 0 {
- if !w.compact && w.complete {
- w.writeIndent()
- }
- w.buf = append(w.buf, p...)
- w.complete = false
- return len(p), nil
- }
-
- frags := bytes.SplitN(p, newline, newlines+1)
- if w.compact {
- for i, frag := range frags {
- if i > 0 {
- w.buf = append(w.buf, ' ')
- n++
- }
- w.buf = append(w.buf, frag...)
- n += len(frag)
- }
- return n, nil
- }
-
- for i, frag := range frags {
- if w.complete {
- w.writeIndent()
- }
- w.buf = append(w.buf, frag...)
- n += len(frag)
- if i+1 < len(frags) {
- w.buf = append(w.buf, '\n')
- n++
- }
- }
- w.complete = len(frags[len(frags)-1]) == 0
- return n, nil
-}
-
-func (w *textWriter) WriteByte(c byte) error {
- if w.compact && c == '\n' {
- c = ' '
- }
- if !w.compact && w.complete {
- w.writeIndent()
- }
- w.buf = append(w.buf, c)
- w.complete = c == '\n'
- return nil
-}
-
-func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
- if !w.compact && w.complete {
- w.writeIndent()
- }
- w.complete = false
-
- if fd.Kind() != protoreflect.GroupKind {
- w.buf = append(w.buf, fd.Name()...)
- w.WriteByte(':')
- } else {
- // Use message type name for group field name.
- w.buf = append(w.buf, fd.Message().Name()...)
- }
-
- if !w.compact {
- w.WriteByte(' ')
- }
-}
-
-func requiresQuotes(u string) bool {
- // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
- for _, ch := range u {
- switch {
- case ch == '.' || ch == '/' || ch == '_':
- continue
- case '0' <= ch && ch <= '9':
- continue
- case 'A' <= ch && ch <= 'Z':
- continue
- case 'a' <= ch && ch <= 'z':
- continue
- default:
- return true
- }
- }
- return false
-}
-
-// writeProto3Any writes an expanded google.protobuf.Any message.
-//
-// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
-// required messages are not linked in).
-//
-// It returns (true, error) when sv was written in expanded format or an error
-// was encountered.
-func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
- md := m.Descriptor()
- fdURL := md.Fields().ByName("type_url")
- fdVal := md.Fields().ByName("value")
-
- url := m.Get(fdURL).String()
- mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
- if err != nil {
- return false, nil
- }
-
- b := m.Get(fdVal).Bytes()
- m2 := mt.New()
- if err := proto.Unmarshal(b, m2.Interface()); err != nil {
- return false, nil
- }
- w.Write([]byte("["))
- if requiresQuotes(url) {
- w.writeQuotedString(url)
- } else {
- w.Write([]byte(url))
- }
- if w.compact {
- w.Write([]byte("]:<"))
- } else {
- w.Write([]byte("]: <\n"))
- w.indent++
- }
- if err := w.writeMessage(m2); err != nil {
- return true, err
- }
- if w.compact {
- w.Write([]byte("> "))
- } else {
- w.indent--
- w.Write([]byte(">\n"))
- }
- return true, nil
-}
-
-func (w *textWriter) writeMessage(m protoreflect.Message) error {
- md := m.Descriptor()
- if w.expandAny && md.FullName() == "google.protobuf.Any" {
- if canExpand, err := w.writeProto3Any(m); canExpand {
- return err
- }
- }
-
- fds := md.Fields()
- for i := 0; i < fds.Len(); {
- fd := fds.Get(i)
- if od := fd.ContainingOneof(); od != nil {
- fd = m.WhichOneof(od)
- i += od.Fields().Len()
- } else {
- i++
- }
- if fd == nil || !m.Has(fd) {
- continue
- }
-
- switch {
- case fd.IsList():
- lv := m.Get(fd).List()
- for j := 0; j < lv.Len(); j++ {
- w.writeName(fd)
- v := lv.Get(j)
- if err := w.writeSingularValue(v, fd); err != nil {
- return err
- }
- w.WriteByte('\n')
- }
- case fd.IsMap():
- kfd := fd.MapKey()
- vfd := fd.MapValue()
- mv := m.Get(fd).Map()
-
- type entry struct{ key, val protoreflect.Value }
- var entries []entry
- mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
- entries = append(entries, entry{k.Value(), v})
- return true
- })
- sort.Slice(entries, func(i, j int) bool {
- switch kfd.Kind() {
- case protoreflect.BoolKind:
- return !entries[i].key.Bool() && entries[j].key.Bool()
- case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
- return entries[i].key.Int() < entries[j].key.Int()
- case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
- return entries[i].key.Uint() < entries[j].key.Uint()
- case protoreflect.StringKind:
- return entries[i].key.String() < entries[j].key.String()
- default:
- panic("invalid kind")
- }
- })
- for _, entry := range entries {
- w.writeName(fd)
- w.WriteByte('<')
- if !w.compact {
- w.WriteByte('\n')
- }
- w.indent++
- w.writeName(kfd)
- if err := w.writeSingularValue(entry.key, kfd); err != nil {
- return err
- }
- w.WriteByte('\n')
- w.writeName(vfd)
- if err := w.writeSingularValue(entry.val, vfd); err != nil {
- return err
- }
- w.WriteByte('\n')
- w.indent--
- w.WriteByte('>')
- w.WriteByte('\n')
- }
- default:
- w.writeName(fd)
- if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
- return err
- }
- w.WriteByte('\n')
- }
- }
-
- if b := m.GetUnknown(); len(b) > 0 {
- w.writeUnknownFields(b)
- }
- return w.writeExtensions(m)
-}
-
-func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
- switch fd.Kind() {
- case protoreflect.FloatKind, protoreflect.DoubleKind:
- switch vf := v.Float(); {
- case math.IsInf(vf, +1):
- w.Write(posInf)
- case math.IsInf(vf, -1):
- w.Write(negInf)
- case math.IsNaN(vf):
- w.Write(nan)
- default:
- fmt.Fprint(w, v.Interface())
- }
- case protoreflect.StringKind:
- // NOTE: This does not validate UTF-8 for historical reasons.
- w.writeQuotedString(string(v.String()))
- case protoreflect.BytesKind:
- w.writeQuotedString(string(v.Bytes()))
- case protoreflect.MessageKind, protoreflect.GroupKind:
- var bra, ket byte = '<', '>'
- if fd.Kind() == protoreflect.GroupKind {
- bra, ket = '{', '}'
- }
- w.WriteByte(bra)
- if !w.compact {
- w.WriteByte('\n')
- }
- w.indent++
- m := v.Message()
- if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
- b, err := m2.MarshalText()
- if err != nil {
- return err
- }
- w.Write(b)
- } else {
- w.writeMessage(m)
- }
- w.indent--
- w.WriteByte(ket)
- case protoreflect.EnumKind:
- if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
- fmt.Fprint(w, ev.Name())
- } else {
- fmt.Fprint(w, v.Enum())
- }
- default:
- fmt.Fprint(w, v.Interface())
- }
- return nil
-}
-
-// writeQuotedString writes a quoted string in the protocol buffer text format.
-func (w *textWriter) writeQuotedString(s string) {
- w.WriteByte('"')
- for i := 0; i < len(s); i++ {
- switch c := s[i]; c {
- case '\n':
- w.buf = append(w.buf, `\n`...)
- case '\r':
- w.buf = append(w.buf, `\r`...)
- case '\t':
- w.buf = append(w.buf, `\t`...)
- case '"':
- w.buf = append(w.buf, `\"`...)
- case '\\':
- w.buf = append(w.buf, `\\`...)
- default:
- if isPrint := c >= 0x20 && c < 0x7f; isPrint {
- w.buf = append(w.buf, c)
- } else {
- w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
- }
- }
- }
- w.WriteByte('"')
-}
-
-func (w *textWriter) writeUnknownFields(b []byte) {
- if !w.compact {
- fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
- }
-
- for len(b) > 0 {
- num, wtyp, n := protowire.ConsumeTag(b)
- if n < 0 {
- return
- }
- b = b[n:]
-
- if wtyp == protowire.EndGroupType {
- w.indent--
- w.Write(endBraceNewline)
- continue
- }
- fmt.Fprint(w, num)
- if wtyp != protowire.StartGroupType {
- w.WriteByte(':')
- }
- if !w.compact || wtyp == protowire.StartGroupType {
- w.WriteByte(' ')
- }
- switch wtyp {
- case protowire.VarintType:
- v, n := protowire.ConsumeVarint(b)
- if n < 0 {
- return
- }
- b = b[n:]
- fmt.Fprint(w, v)
- case protowire.Fixed32Type:
- v, n := protowire.ConsumeFixed32(b)
- if n < 0 {
- return
- }
- b = b[n:]
- fmt.Fprint(w, v)
- case protowire.Fixed64Type:
- v, n := protowire.ConsumeFixed64(b)
- if n < 0 {
- return
- }
- b = b[n:]
- fmt.Fprint(w, v)
- case protowire.BytesType:
- v, n := protowire.ConsumeBytes(b)
- if n < 0 {
- return
- }
- b = b[n:]
- fmt.Fprintf(w, "%q", v)
- case protowire.StartGroupType:
- w.WriteByte('{')
- w.indent++
- default:
- fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
- }
- w.WriteByte('\n')
- }
-}
-
-// writeExtensions writes all the extensions in m.
-func (w *textWriter) writeExtensions(m protoreflect.Message) error {
- md := m.Descriptor()
- if md.ExtensionRanges().Len() == 0 {
- return nil
- }
-
- type ext struct {
- desc protoreflect.FieldDescriptor
- val protoreflect.Value
- }
- var exts []ext
- m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
- if fd.IsExtension() {
- exts = append(exts, ext{fd, v})
- }
- return true
- })
- sort.Slice(exts, func(i, j int) bool {
- return exts[i].desc.Number() < exts[j].desc.Number()
- })
-
- for _, ext := range exts {
- // For message set, use the name of the message as the extension name.
- name := string(ext.desc.FullName())
- if isMessageSet(ext.desc.ContainingMessage()) {
- name = strings.TrimSuffix(name, ".message_set_extension")
- }
-
- if !ext.desc.IsList() {
- if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
- return err
- }
- } else {
- lv := ext.val.List()
- for i := 0; i < lv.Len(); i++ {
- if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
- return err
- }
- }
- }
- }
- return nil
-}
-
-func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
- fmt.Fprintf(w, "[%s]:", name)
- if !w.compact {
- w.WriteByte(' ')
- }
- if err := w.writeSingularValue(v, fd); err != nil {
- return err
- }
- w.WriteByte('\n')
- return nil
-}
-
-func (w *textWriter) writeIndent() {
- if !w.complete {
- return
- }
- for i := 0; i < w.indent*2; i++ {
- w.buf = append(w.buf, ' ')
- }
- w.complete = false
-}
diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go
deleted file mode 100644
index d7c28da5..00000000
--- a/vendor/github.com/golang/protobuf/proto/wire.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-import (
- protoV2 "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/runtime/protoiface"
-)
-
-// Size returns the size in bytes of the wire-format encoding of m.
-func Size(m Message) int {
- if m == nil {
- return 0
- }
- mi := MessageV2(m)
- return protoV2.Size(mi)
-}
-
-// Marshal returns the wire-format encoding of m.
-func Marshal(m Message) ([]byte, error) {
- b, err := marshalAppend(nil, m, false)
- if b == nil {
- b = zeroBytes
- }
- return b, err
-}
-
-var zeroBytes = make([]byte, 0, 0)
-
-func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
- if m == nil {
- return nil, ErrNil
- }
- mi := MessageV2(m)
- nbuf, err := protoV2.MarshalOptions{
- Deterministic: deterministic,
- AllowPartial: true,
- }.MarshalAppend(buf, mi)
- if err != nil {
- return buf, err
- }
- if len(buf) == len(nbuf) {
- if !mi.ProtoReflect().IsValid() {
- return buf, ErrNil
- }
- }
- return nbuf, checkRequiredNotSet(mi)
-}
-
-// Unmarshal parses a wire-format message in b and places the decoded results in m.
-//
-// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
-// removed. Use UnmarshalMerge to preserve and append to existing data.
-func Unmarshal(b []byte, m Message) error {
- m.Reset()
- return UnmarshalMerge(b, m)
-}
-
-// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
-func UnmarshalMerge(b []byte, m Message) error {
- mi := MessageV2(m)
- out, err := protoV2.UnmarshalOptions{
- AllowPartial: true,
- Merge: true,
- }.UnmarshalState(protoiface.UnmarshalInput{
- Buf: b,
- Message: mi.ProtoReflect(),
- })
- if err != nil {
- return err
- }
- if out.Flags&protoiface.UnmarshalInitialized > 0 {
- return nil
- }
- return checkRequiredNotSet(mi)
-}
diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go
deleted file mode 100644
index 398e3485..00000000
--- a/vendor/github.com/golang/protobuf/proto/wrappers.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package proto
-
-// Bool stores v in a new bool value and returns a pointer to it.
-func Bool(v bool) *bool { return &v }
-
-// Int stores v in a new int32 value and returns a pointer to it.
-//
-// Deprecated: Use Int32 instead.
-func Int(v int) *int32 { return Int32(int32(v)) }
-
-// Int32 stores v in a new int32 value and returns a pointer to it.
-func Int32(v int32) *int32 { return &v }
-
-// Int64 stores v in a new int64 value and returns a pointer to it.
-func Int64(v int64) *int64 { return &v }
-
-// Uint32 stores v in a new uint32 value and returns a pointer to it.
-func Uint32(v uint32) *uint32 { return &v }
-
-// Uint64 stores v in a new uint64 value and returns a pointer to it.
-func Uint64(v uint64) *uint64 { return &v }
-
-// Float32 stores v in a new float32 value and returns a pointer to it.
-func Float32(v float32) *float32 { return &v }
-
-// Float64 stores v in a new float64 value and returns a pointer to it.
-func Float64(v float64) *float64 { return &v }
-
-// String stores v in a new string value and returns a pointer to it.
-func String(v string) *string { return &v }
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
deleted file mode 100644
index 16686a65..00000000
--- a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/empty/empty.proto
-
-package empty
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- emptypb "google.golang.org/protobuf/types/known/emptypb"
- reflect "reflect"
-)
-
-// Symbols defined in public import of google/protobuf/empty.proto.
-
-type Empty = emptypb.Empty
-
-var File_github_com_golang_protobuf_ptypes_empty_empty_proto protoreflect.FileDescriptor
-
-var file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = []byte{
- 0x0a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
- 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
- 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x3b, 0x65, 0x6d,
- 0x70, 0x74, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = []interface{}{}
-var file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() }
-func file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() {
- if File_github_com_golang_protobuf_ptypes_empty_empty_proto != nil {
- return
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 0,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes,
- DependencyIndexes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs,
- }.Build()
- File_github_com_golang_protobuf_ptypes_empty_empty_proto = out.File
- file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = nil
- file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = nil
- file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/google/go-cmp/LICENSE b/vendor/github.com/google/go-cmp/LICENSE
deleted file mode 100644
index 32017f8f..00000000
--- a/vendor/github.com/google/go-cmp/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2017 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go
deleted file mode 100644
index 0f5b8a48..00000000
--- a/vendor/github.com/google/go-cmp/cmp/compare.go
+++ /dev/null
@@ -1,671 +0,0 @@
-// Copyright 2017, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package cmp determines equality of values.
-//
-// This package is intended to be a more powerful and safer alternative to
-// [reflect.DeepEqual] for comparing whether two values are semantically equal.
-// It is intended to only be used in tests, as performance is not a goal and
-// it may panic if it cannot compare the values. Its propensity towards
-// panicking means that its unsuitable for production environments where a
-// spurious panic may be fatal.
-//
-// The primary features of cmp are:
-//
-// - When the default behavior of equality does not suit the test's needs,
-// custom equality functions can override the equality operation.
-// For example, an equality function may report floats as equal so long as
-// they are within some tolerance of each other.
-//
-// - Types with an Equal method (e.g., [time.Time.Equal]) may use that method
-// to determine equality. This allows package authors to determine
-// the equality operation for the types that they define.
-//
-// - If no custom equality functions are used and no Equal method is defined,
-// equality is determined by recursively comparing the primitive kinds on
-// both values, much like [reflect.DeepEqual]. Unlike [reflect.DeepEqual],
-// unexported fields are not compared by default; they result in panics
-// unless suppressed by using an [Ignore] option
-// (see [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported])
-// or explicitly compared using the [Exporter] option.
-package cmp
-
-import (
- "fmt"
- "reflect"
- "strings"
-
- "github.com/google/go-cmp/cmp/internal/diff"
- "github.com/google/go-cmp/cmp/internal/function"
- "github.com/google/go-cmp/cmp/internal/value"
-)
-
-// TODO(≥go1.18): Use any instead of interface{}.
-
-// Equal reports whether x and y are equal by recursively applying the
-// following rules in the given order to x and y and all of their sub-values:
-//
-// - Let S be the set of all [Ignore], [Transformer], and [Comparer] options that
-// remain after applying all path filters, value filters, and type filters.
-// If at least one [Ignore] exists in S, then the comparison is ignored.
-// If the number of [Transformer] and [Comparer] options in S is non-zero,
-// then Equal panics because it is ambiguous which option to use.
-// If S contains a single [Transformer], then use that to transform
-// the current values and recursively call Equal on the output values.
-// If S contains a single [Comparer], then use that to compare the current values.
-// Otherwise, evaluation proceeds to the next rule.
-//
-// - If the values have an Equal method of the form "(T) Equal(T) bool" or
-// "(T) Equal(I) bool" where T is assignable to I, then use the result of
-// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
-// evaluation proceeds to the next rule.
-//
-// - Lastly, try to compare x and y based on their basic kinds.
-// Simple kinds like booleans, integers, floats, complex numbers, strings,
-// and channels are compared using the equivalent of the == operator in Go.
-// Functions are only equal if they are both nil, otherwise they are unequal.
-//
-// Structs are equal if recursively calling Equal on all fields report equal.
-// If a struct contains unexported fields, Equal panics unless an [Ignore] option
-// (e.g., [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) ignores that field
-// or the [Exporter] option explicitly permits comparing the unexported field.
-//
-// Slices are equal if they are both nil or both non-nil, where recursively
-// calling Equal on all non-ignored slice or array elements report equal.
-// Empty non-nil slices and nil slices are not equal; to equate empty slices,
-// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
-//
-// Maps are equal if they are both nil or both non-nil, where recursively
-// calling Equal on all non-ignored map entries report equal.
-// Map keys are equal according to the == operator.
-// To use custom comparisons for map keys, consider using
-// [github.com/google/go-cmp/cmp/cmpopts.SortMaps].
-// Empty non-nil maps and nil maps are not equal; to equate empty maps,
-// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
-//
-// Pointers and interfaces are equal if they are both nil or both non-nil,
-// where they have the same underlying concrete type and recursively
-// calling Equal on the underlying values reports equal.
-//
-// Before recursing into a pointer, slice element, or map, the current path
-// is checked to detect whether the address has already been visited.
-// If there is a cycle, then the pointed at values are considered equal
-// only if both addresses were previously visited in the same path step.
-func Equal(x, y interface{}, opts ...Option) bool {
- s := newState(opts)
- s.compareAny(rootStep(x, y))
- return s.result.Equal()
-}
-
-// Diff returns a human-readable report of the differences between two values:
-// y - x. It returns an empty string if and only if Equal returns true for the
-// same input values and options.
-//
-// The output is displayed as a literal in pseudo-Go syntax.
-// At the start of each line, a "-" prefix indicates an element removed from x,
-// a "+" prefix to indicates an element added from y, and the lack of a prefix
-// indicates an element common to both x and y. If possible, the output
-// uses fmt.Stringer.String or error.Error methods to produce more humanly
-// readable outputs. In such cases, the string is prefixed with either an
-// 's' or 'e' character, respectively, to indicate that the method was called.
-//
-// Do not depend on this output being stable. If you need the ability to
-// programmatically interpret the difference, consider using a custom Reporter.
-func Diff(x, y interface{}, opts ...Option) string {
- s := newState(opts)
-
- // Optimization: If there are no other reporters, we can optimize for the
- // common case where the result is equal (and thus no reported difference).
- // This avoids the expensive construction of a difference tree.
- if len(s.reporters) == 0 {
- s.compareAny(rootStep(x, y))
- if s.result.Equal() {
- return ""
- }
- s.result = diff.Result{} // Reset results
- }
-
- r := new(defaultReporter)
- s.reporters = append(s.reporters, reporter{r})
- s.compareAny(rootStep(x, y))
- d := r.String()
- if (d == "") != s.result.Equal() {
- panic("inconsistent difference and equality results")
- }
- return d
-}
-
-// rootStep constructs the first path step. If x and y have differing types,
-// then they are stored within an empty interface type.
-func rootStep(x, y interface{}) PathStep {
- vx := reflect.ValueOf(x)
- vy := reflect.ValueOf(y)
-
- // If the inputs are different types, auto-wrap them in an empty interface
- // so that they have the same parent type.
- var t reflect.Type
- if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() {
- t = anyType
- if vx.IsValid() {
- vvx := reflect.New(t).Elem()
- vvx.Set(vx)
- vx = vvx
- }
- if vy.IsValid() {
- vvy := reflect.New(t).Elem()
- vvy.Set(vy)
- vy = vvy
- }
- } else {
- t = vx.Type()
- }
-
- return &pathStep{t, vx, vy}
-}
-
-type state struct {
- // These fields represent the "comparison state".
- // Calling statelessCompare must not result in observable changes to these.
- result diff.Result // The current result of comparison
- curPath Path // The current path in the value tree
- curPtrs pointerPath // The current set of visited pointers
- reporters []reporter // Optional reporters
-
- // recChecker checks for infinite cycles applying the same set of
- // transformers upon the output of itself.
- recChecker recChecker
-
- // dynChecker triggers pseudo-random checks for option correctness.
- // It is safe for statelessCompare to mutate this value.
- dynChecker dynChecker
-
- // These fields, once set by processOption, will not change.
- exporters []exporter // List of exporters for structs with unexported fields
- opts Options // List of all fundamental and filter options
-}
-
-func newState(opts []Option) *state {
- // Always ensure a validator option exists to validate the inputs.
- s := &state{opts: Options{validator{}}}
- s.curPtrs.Init()
- s.processOption(Options(opts))
- return s
-}
-
-func (s *state) processOption(opt Option) {
- switch opt := opt.(type) {
- case nil:
- case Options:
- for _, o := range opt {
- s.processOption(o)
- }
- case coreOption:
- type filtered interface {
- isFiltered() bool
- }
- if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() {
- panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt))
- }
- s.opts = append(s.opts, opt)
- case exporter:
- s.exporters = append(s.exporters, opt)
- case reporter:
- s.reporters = append(s.reporters, opt)
- default:
- panic(fmt.Sprintf("unknown option %T", opt))
- }
-}
-
-// statelessCompare compares two values and returns the result.
-// This function is stateless in that it does not alter the current result,
-// or output to any registered reporters.
-func (s *state) statelessCompare(step PathStep) diff.Result {
- // We do not save and restore curPath and curPtrs because all of the
- // compareX methods should properly push and pop from them.
- // It is an implementation bug if the contents of the paths differ from
- // when calling this function to when returning from it.
-
- oldResult, oldReporters := s.result, s.reporters
- s.result = diff.Result{} // Reset result
- s.reporters = nil // Remove reporters to avoid spurious printouts
- s.compareAny(step)
- res := s.result
- s.result, s.reporters = oldResult, oldReporters
- return res
-}
-
-func (s *state) compareAny(step PathStep) {
- // Update the path stack.
- s.curPath.push(step)
- defer s.curPath.pop()
- for _, r := range s.reporters {
- r.PushStep(step)
- defer r.PopStep()
- }
- s.recChecker.Check(s.curPath)
-
- // Cycle-detection for slice elements (see NOTE in compareSlice).
- t := step.Type()
- vx, vy := step.Values()
- if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() {
- px, py := vx.Addr(), vy.Addr()
- if eq, visited := s.curPtrs.Push(px, py); visited {
- s.report(eq, reportByCycle)
- return
- }
- defer s.curPtrs.Pop(px, py)
- }
-
- // Rule 1: Check whether an option applies on this node in the value tree.
- if s.tryOptions(t, vx, vy) {
- return
- }
-
- // Rule 2: Check whether the type has a valid Equal method.
- if s.tryMethod(t, vx, vy) {
- return
- }
-
- // Rule 3: Compare based on the underlying kind.
- switch t.Kind() {
- case reflect.Bool:
- s.report(vx.Bool() == vy.Bool(), 0)
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- s.report(vx.Int() == vy.Int(), 0)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- s.report(vx.Uint() == vy.Uint(), 0)
- case reflect.Float32, reflect.Float64:
- s.report(vx.Float() == vy.Float(), 0)
- case reflect.Complex64, reflect.Complex128:
- s.report(vx.Complex() == vy.Complex(), 0)
- case reflect.String:
- s.report(vx.String() == vy.String(), 0)
- case reflect.Chan, reflect.UnsafePointer:
- s.report(vx.Pointer() == vy.Pointer(), 0)
- case reflect.Func:
- s.report(vx.IsNil() && vy.IsNil(), 0)
- case reflect.Struct:
- s.compareStruct(t, vx, vy)
- case reflect.Slice, reflect.Array:
- s.compareSlice(t, vx, vy)
- case reflect.Map:
- s.compareMap(t, vx, vy)
- case reflect.Ptr:
- s.comparePtr(t, vx, vy)
- case reflect.Interface:
- s.compareInterface(t, vx, vy)
- default:
- panic(fmt.Sprintf("%v kind not handled", t.Kind()))
- }
-}
-
-func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool {
- // Evaluate all filters and apply the remaining options.
- if opt := s.opts.filter(s, t, vx, vy); opt != nil {
- opt.apply(s, vx, vy)
- return true
- }
- return false
-}
-
-func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool {
- // Check if this type even has an Equal method.
- m, ok := t.MethodByName("Equal")
- if !ok || !function.IsType(m.Type, function.EqualAssignable) {
- return false
- }
-
- eq := s.callTTBFunc(m.Func, vx, vy)
- s.report(eq, reportByMethod)
- return true
-}
-
-func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value {
- if !s.dynChecker.Next() {
- return f.Call([]reflect.Value{v})[0]
- }
-
- // Run the function twice and ensure that we get the same results back.
- // We run in goroutines so that the race detector (if enabled) can detect
- // unsafe mutations to the input.
- c := make(chan reflect.Value)
- go detectRaces(c, f, v)
- got := <-c
- want := f.Call([]reflect.Value{v})[0]
- if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() {
- // To avoid false-positives with non-reflexive equality operations,
- // we sanity check whether a value is equal to itself.
- if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() {
- return want
- }
- panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f)))
- }
- return want
-}
-
-func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
- if !s.dynChecker.Next() {
- return f.Call([]reflect.Value{x, y})[0].Bool()
- }
-
- // Swapping the input arguments is sufficient to check that
- // f is symmetric and deterministic.
- // We run in goroutines so that the race detector (if enabled) can detect
- // unsafe mutations to the input.
- c := make(chan reflect.Value)
- go detectRaces(c, f, y, x)
- got := <-c
- want := f.Call([]reflect.Value{x, y})[0].Bool()
- if !got.IsValid() || got.Bool() != want {
- panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f)))
- }
- return want
-}
-
-func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
- var ret reflect.Value
- defer func() {
- recover() // Ignore panics, let the other call to f panic instead
- c <- ret
- }()
- ret = f.Call(vs)[0]
-}
-
-func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
- var addr bool
- var vax, vay reflect.Value // Addressable versions of vx and vy
-
- var mayForce, mayForceInit bool
- step := StructField{&structField{}}
- for i := 0; i < t.NumField(); i++ {
- step.typ = t.Field(i).Type
- step.vx = vx.Field(i)
- step.vy = vy.Field(i)
- step.name = t.Field(i).Name
- step.idx = i
- step.unexported = !isExported(step.name)
- if step.unexported {
- if step.name == "_" {
- continue
- }
- // Defer checking of unexported fields until later to give an
- // Ignore a chance to ignore the field.
- if !vax.IsValid() || !vay.IsValid() {
- // For retrieveUnexportedField to work, the parent struct must
- // be addressable. Create a new copy of the values if
- // necessary to make them addressable.
- addr = vx.CanAddr() || vy.CanAddr()
- vax = makeAddressable(vx)
- vay = makeAddressable(vy)
- }
- if !mayForceInit {
- for _, xf := range s.exporters {
- mayForce = mayForce || xf(t)
- }
- mayForceInit = true
- }
- step.mayForce = mayForce
- step.paddr = addr
- step.pvx = vax
- step.pvy = vay
- step.field = t.Field(i)
- }
- s.compareAny(step)
- }
-}
-
-func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) {
- isSlice := t.Kind() == reflect.Slice
- if isSlice && (vx.IsNil() || vy.IsNil()) {
- s.report(vx.IsNil() && vy.IsNil(), 0)
- return
- }
-
- // NOTE: It is incorrect to call curPtrs.Push on the slice header pointer
- // since slices represents a list of pointers, rather than a single pointer.
- // The pointer checking logic must be handled on a per-element basis
- // in compareAny.
- //
- // A slice header (see reflect.SliceHeader) in Go is a tuple of a starting
- // pointer P, a length N, and a capacity C. Supposing each slice element has
- // a memory size of M, then the slice is equivalent to the list of pointers:
- // [P+i*M for i in range(N)]
- //
- // For example, v[:0] and v[:1] are slices with the same starting pointer,
- // but they are clearly different values. Using the slice pointer alone
- // violates the assumption that equal pointers implies equal values.
-
- step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}}
- withIndexes := func(ix, iy int) SliceIndex {
- if ix >= 0 {
- step.vx, step.xkey = vx.Index(ix), ix
- } else {
- step.vx, step.xkey = reflect.Value{}, -1
- }
- if iy >= 0 {
- step.vy, step.ykey = vy.Index(iy), iy
- } else {
- step.vy, step.ykey = reflect.Value{}, -1
- }
- return step
- }
-
- // Ignore options are able to ignore missing elements in a slice.
- // However, detecting these reliably requires an optimal differencing
- // algorithm, for which diff.Difference is not.
- //
- // Instead, we first iterate through both slices to detect which elements
- // would be ignored if standing alone. The index of non-discarded elements
- // are stored in a separate slice, which diffing is then performed on.
- var indexesX, indexesY []int
- var ignoredX, ignoredY []bool
- for ix := 0; ix < vx.Len(); ix++ {
- ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0
- if !ignored {
- indexesX = append(indexesX, ix)
- }
- ignoredX = append(ignoredX, ignored)
- }
- for iy := 0; iy < vy.Len(); iy++ {
- ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0
- if !ignored {
- indexesY = append(indexesY, iy)
- }
- ignoredY = append(ignoredY, ignored)
- }
-
- // Compute an edit-script for slices vx and vy (excluding ignored elements).
- edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result {
- return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy]))
- })
-
- // Replay the ignore-scripts and the edit-script.
- var ix, iy int
- for ix < vx.Len() || iy < vy.Len() {
- var e diff.EditType
- switch {
- case ix < len(ignoredX) && ignoredX[ix]:
- e = diff.UniqueX
- case iy < len(ignoredY) && ignoredY[iy]:
- e = diff.UniqueY
- default:
- e, edits = edits[0], edits[1:]
- }
- switch e {
- case diff.UniqueX:
- s.compareAny(withIndexes(ix, -1))
- ix++
- case diff.UniqueY:
- s.compareAny(withIndexes(-1, iy))
- iy++
- default:
- s.compareAny(withIndexes(ix, iy))
- ix++
- iy++
- }
- }
-}
-
-func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) {
- if vx.IsNil() || vy.IsNil() {
- s.report(vx.IsNil() && vy.IsNil(), 0)
- return
- }
-
- // Cycle-detection for maps.
- if eq, visited := s.curPtrs.Push(vx, vy); visited {
- s.report(eq, reportByCycle)
- return
- }
- defer s.curPtrs.Pop(vx, vy)
-
- // We combine and sort the two map keys so that we can perform the
- // comparisons in a deterministic order.
- step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}}
- for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) {
- step.vx = vx.MapIndex(k)
- step.vy = vy.MapIndex(k)
- step.key = k
- if !step.vx.IsValid() && !step.vy.IsValid() {
- // It is possible for both vx and vy to be invalid if the
- // key contained a NaN value in it.
- //
- // Even with the ability to retrieve NaN keys in Go 1.12,
- // there still isn't a sensible way to compare the values since
- // a NaN key may map to multiple unordered values.
- // The most reasonable way to compare NaNs would be to compare the
- // set of values. However, this is impossible to do efficiently
- // since set equality is provably an O(n^2) operation given only
- // an Equal function. If we had a Less function or Hash function,
- // this could be done in O(n*log(n)) or O(n), respectively.
- //
- // Rather than adding complex logic to deal with NaNs, make it
- // the user's responsibility to compare such obscure maps.
- const help = "consider providing a Comparer to compare the map"
- panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help))
- }
- s.compareAny(step)
- }
-}
-
-func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) {
- if vx.IsNil() || vy.IsNil() {
- s.report(vx.IsNil() && vy.IsNil(), 0)
- return
- }
-
- // Cycle-detection for pointers.
- if eq, visited := s.curPtrs.Push(vx, vy); visited {
- s.report(eq, reportByCycle)
- return
- }
- defer s.curPtrs.Pop(vx, vy)
-
- vx, vy = vx.Elem(), vy.Elem()
- s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}})
-}
-
-func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) {
- if vx.IsNil() || vy.IsNil() {
- s.report(vx.IsNil() && vy.IsNil(), 0)
- return
- }
- vx, vy = vx.Elem(), vy.Elem()
- if vx.Type() != vy.Type() {
- s.report(false, 0)
- return
- }
- s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}})
-}
-
-func (s *state) report(eq bool, rf resultFlags) {
- if rf&reportByIgnore == 0 {
- if eq {
- s.result.NumSame++
- rf |= reportEqual
- } else {
- s.result.NumDiff++
- rf |= reportUnequal
- }
- }
- for _, r := range s.reporters {
- r.Report(Result{flags: rf})
- }
-}
-
-// recChecker tracks the state needed to periodically perform checks that
-// user provided transformers are not stuck in an infinitely recursive cycle.
-type recChecker struct{ next int }
-
-// Check scans the Path for any recursive transformers and panics when any
-// recursive transformers are detected. Note that the presence of a
-// recursive Transformer does not necessarily imply an infinite cycle.
-// As such, this check only activates after some minimal number of path steps.
-func (rc *recChecker) Check(p Path) {
- const minLen = 1 << 16
- if rc.next == 0 {
- rc.next = minLen
- }
- if len(p) < rc.next {
- return
- }
- rc.next <<= 1
-
- // Check whether the same transformer has appeared at least twice.
- var ss []string
- m := map[Option]int{}
- for _, ps := range p {
- if t, ok := ps.(Transform); ok {
- t := t.Option()
- if m[t] == 1 { // Transformer was used exactly once before
- tf := t.(*transformer).fnc.Type()
- ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0)))
- }
- m[t]++
- }
- }
- if len(ss) > 0 {
- const warning = "recursive set of Transformers detected"
- const help = "consider using cmpopts.AcyclicTransformer"
- set := strings.Join(ss, "\n\t")
- panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help))
- }
-}
-
-// dynChecker tracks the state needed to periodically perform checks that
-// user provided functions are symmetric and deterministic.
-// The zero value is safe for immediate use.
-type dynChecker struct{ curr, next int }
-
-// Next increments the state and reports whether a check should be performed.
-//
-// Checks occur every Nth function call, where N is a triangular number:
-//
-// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
-//
-// See https://en.wikipedia.org/wiki/Triangular_number
-//
-// This sequence ensures that the cost of checks drops significantly as
-// the number of functions calls grows larger.
-func (dc *dynChecker) Next() bool {
- ok := dc.curr == dc.next
- if ok {
- dc.curr = 0
- dc.next++
- }
- dc.curr++
- return ok
-}
-
-// makeAddressable returns a value that is always addressable.
-// It returns the input verbatim if it is already addressable,
-// otherwise it creates a new value and returns an addressable copy.
-func makeAddressable(v reflect.Value) reflect.Value {
- if v.CanAddr() {
- return v
- }
- vc := reflect.New(v.Type()).Elem()
- vc.Set(v)
- return vc
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/export.go b/vendor/github.com/google/go-cmp/cmp/export.go
deleted file mode 100644
index 29f82fe6..00000000
--- a/vendor/github.com/google/go-cmp/cmp/export.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2017, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmp
-
-import (
- "reflect"
- "unsafe"
-)
-
-// retrieveUnexportedField uses unsafe to forcibly retrieve any field from
-// a struct such that the value has read-write permissions.
-//
-// The parent struct, v, must be addressable, while f must be a StructField
-// describing the field to retrieve. If addr is false,
-// then the returned value will be shallowed copied to be non-addressable.
-func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value {
- ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem()
- if !addr {
- // A field is addressable if and only if the struct is addressable.
- // If the original parent value was not addressable, shallow copy the
- // value to make it non-addressable to avoid leaking an implementation
- // detail of how forcibly exporting a field works.
- if ve.Kind() == reflect.Interface && ve.IsNil() {
- return reflect.Zero(f.Type)
- }
- return reflect.ValueOf(ve.Interface()).Convert(f.Type)
- }
- return ve
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
deleted file mode 100644
index 36062a60..00000000
--- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2017, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !cmp_debug
-// +build !cmp_debug
-
-package diff
-
-var debug debugger
-
-type debugger struct{}
-
-func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc {
- return f
-}
-func (debugger) Update() {}
-func (debugger) Finish() {}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
deleted file mode 100644
index a3b97a1a..00000000
--- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Copyright 2017, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build cmp_debug
-// +build cmp_debug
-
-package diff
-
-import (
- "fmt"
- "strings"
- "sync"
- "time"
-)
-
-// The algorithm can be seen running in real-time by enabling debugging:
-// go test -tags=cmp_debug -v
-//
-// Example output:
-// === RUN TestDifference/#34
-// ┌───────────────────────────────┐
-// │ \ · · · · · · · · · · · · · · │
-// │ · # · · · · · · · · · · · · · │
-// │ · \ · · · · · · · · · · · · · │
-// │ · · \ · · · · · · · · · · · · │
-// │ · · · X # · · · · · · · · · · │
-// │ · · · # \ · · · · · · · · · · │
-// │ · · · · · # # · · · · · · · · │
-// │ · · · · · # \ · · · · · · · · │
-// │ · · · · · · · \ · · · · · · · │
-// │ · · · · · · · · \ · · · · · · │
-// │ · · · · · · · · · \ · · · · · │
-// │ · · · · · · · · · · \ · · # · │
-// │ · · · · · · · · · · · \ # # · │
-// │ · · · · · · · · · · · # # # · │
-// │ · · · · · · · · · · # # # # · │
-// │ · · · · · · · · · # # # # # · │
-// │ · · · · · · · · · · · · · · \ │
-// └───────────────────────────────┘
-// [.Y..M.XY......YXYXY.|]
-//
-// The grid represents the edit-graph where the horizontal axis represents
-// list X and the vertical axis represents list Y. The start of the two lists
-// is the top-left, while the ends are the bottom-right. The '·' represents
-// an unexplored node in the graph. The '\' indicates that the two symbols
-// from list X and Y are equal. The 'X' indicates that two symbols are similar
-// (but not exactly equal) to each other. The '#' indicates that the two symbols
-// are different (and not similar). The algorithm traverses this graph trying to
-// make the paths starting in the top-left and the bottom-right connect.
-//
-// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents
-// the currently established path from the forward and reverse searches,
-// separated by a '|' character.
-
-const (
- updateDelay = 100 * time.Millisecond
- finishDelay = 500 * time.Millisecond
- ansiTerminal = true // ANSI escape codes used to move terminal cursor
-)
-
-var debug debugger
-
-type debugger struct {
- sync.Mutex
- p1, p2 EditScript
- fwdPath, revPath *EditScript
- grid []byte
- lines int
-}
-
-func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc {
- dbg.Lock()
- dbg.fwdPath, dbg.revPath = p1, p2
- top := "┌─" + strings.Repeat("──", nx) + "┐\n"
- row := "│ " + strings.Repeat("· ", nx) + "│\n"
- btm := "└─" + strings.Repeat("──", nx) + "┘\n"
- dbg.grid = []byte(top + strings.Repeat(row, ny) + btm)
- dbg.lines = strings.Count(dbg.String(), "\n")
- fmt.Print(dbg)
-
- // Wrap the EqualFunc so that we can intercept each result.
- return func(ix, iy int) (r Result) {
- cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")]
- for i := range cell {
- cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot
- }
- switch r = f(ix, iy); {
- case r.Equal():
- cell[0] = '\\'
- case r.Similar():
- cell[0] = 'X'
- default:
- cell[0] = '#'
- }
- return
- }
-}
-
-func (dbg *debugger) Update() {
- dbg.print(updateDelay)
-}
-
-func (dbg *debugger) Finish() {
- dbg.print(finishDelay)
- dbg.Unlock()
-}
-
-func (dbg *debugger) String() string {
- dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0]
- for i := len(*dbg.revPath) - 1; i >= 0; i-- {
- dbg.p2 = append(dbg.p2, (*dbg.revPath)[i])
- }
- return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2)
-}
-
-func (dbg *debugger) print(d time.Duration) {
- if ansiTerminal {
- fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor
- }
- fmt.Print(dbg)
- time.Sleep(d)
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
deleted file mode 100644
index a248e543..00000000
--- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
+++ /dev/null
@@ -1,402 +0,0 @@
-// Copyright 2017, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package diff implements an algorithm for producing edit-scripts.
-// The edit-script is a sequence of operations needed to transform one list
-// of symbols into another (or vice-versa). The edits allowed are insertions,
-// deletions, and modifications. The summation of all edits is called the
-// Levenshtein distance as this problem is well-known in computer science.
-//
-// This package prioritizes performance over accuracy. That is, the run time
-// is more important than obtaining a minimal Levenshtein distance.
-package diff
-
-import (
- "math/rand"
- "time"
-
- "github.com/google/go-cmp/cmp/internal/flags"
-)
-
-// EditType represents a single operation within an edit-script.
-type EditType uint8
-
-const (
- // Identity indicates that a symbol pair is identical in both list X and Y.
- Identity EditType = iota
- // UniqueX indicates that a symbol only exists in X and not Y.
- UniqueX
- // UniqueY indicates that a symbol only exists in Y and not X.
- UniqueY
- // Modified indicates that a symbol pair is a modification of each other.
- Modified
-)
-
-// EditScript represents the series of differences between two lists.
-type EditScript []EditType
-
-// String returns a human-readable string representing the edit-script where
-// Identity, UniqueX, UniqueY, and Modified are represented by the
-// '.', 'X', 'Y', and 'M' characters, respectively.
-func (es EditScript) String() string {
- b := make([]byte, len(es))
- for i, e := range es {
- switch e {
- case Identity:
- b[i] = '.'
- case UniqueX:
- b[i] = 'X'
- case UniqueY:
- b[i] = 'Y'
- case Modified:
- b[i] = 'M'
- default:
- panic("invalid edit-type")
- }
- }
- return string(b)
-}
-
-// stats returns a histogram of the number of each type of edit operation.
-func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) {
- for _, e := range es {
- switch e {
- case Identity:
- s.NI++
- case UniqueX:
- s.NX++
- case UniqueY:
- s.NY++
- case Modified:
- s.NM++
- default:
- panic("invalid edit-type")
- }
- }
- return
-}
-
-// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if
-// lists X and Y are equal.
-func (es EditScript) Dist() int { return len(es) - es.stats().NI }
-
-// LenX is the length of the X list.
-func (es EditScript) LenX() int { return len(es) - es.stats().NY }
-
-// LenY is the length of the Y list.
-func (es EditScript) LenY() int { return len(es) - es.stats().NX }
-
-// EqualFunc reports whether the symbols at indexes ix and iy are equal.
-// When called by Difference, the index is guaranteed to be within nx and ny.
-type EqualFunc func(ix int, iy int) Result
-
-// Result is the result of comparison.
-// NumSame is the number of sub-elements that are equal.
-// NumDiff is the number of sub-elements that are not equal.
-type Result struct{ NumSame, NumDiff int }
-
-// BoolResult returns a Result that is either Equal or not Equal.
-func BoolResult(b bool) Result {
- if b {
- return Result{NumSame: 1} // Equal, Similar
- } else {
- return Result{NumDiff: 2} // Not Equal, not Similar
- }
-}
-
-// Equal indicates whether the symbols are equal. Two symbols are equal
-// if and only if NumDiff == 0. If Equal, then they are also Similar.
-func (r Result) Equal() bool { return r.NumDiff == 0 }
-
-// Similar indicates whether two symbols are similar and may be represented
-// by using the Modified type. As a special case, we consider binary comparisons
-// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar.
-//
-// The exact ratio of NumSame to NumDiff to determine similarity may change.
-func (r Result) Similar() bool {
- // Use NumSame+1 to offset NumSame so that binary comparisons are similar.
- return r.NumSame+1 >= r.NumDiff
-}
-
-var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
-
-// Difference reports whether two lists of lengths nx and ny are equal
-// given the definition of equality provided as f.
-//
-// This function returns an edit-script, which is a sequence of operations
-// needed to convert one list into the other. The following invariants for
-// the edit-script are maintained:
-// - eq == (es.Dist()==0)
-// - nx == es.LenX()
-// - ny == es.LenY()
-//
-// This algorithm is not guaranteed to be an optimal solution (i.e., one that
-// produces an edit-script with a minimal Levenshtein distance). This algorithm
-// favors performance over optimality. The exact output is not guaranteed to
-// be stable and may change over time.
-func Difference(nx, ny int, f EqualFunc) (es EditScript) {
- // This algorithm is based on traversing what is known as an "edit-graph".
- // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
- // by Eugene W. Myers. Since D can be as large as N itself, this is
- // effectively O(N^2). Unlike the algorithm from that paper, we are not
- // interested in the optimal path, but at least some "decent" path.
- //
- // For example, let X and Y be lists of symbols:
- // X = [A B C A B B A]
- // Y = [C B A B A C]
- //
- // The edit-graph can be drawn as the following:
- // A B C A B B A
- // ┌─────────────┐
- // C │_|_|\|_|_|_|_│ 0
- // B │_|\|_|_|\|\|_│ 1
- // A │\|_|_|\|_|_|\│ 2
- // B │_|\|_|_|\|\|_│ 3
- // A │\|_|_|\|_|_|\│ 4
- // C │ | |\| | | | │ 5
- // └─────────────┘ 6
- // 0 1 2 3 4 5 6 7
- //
- // List X is written along the horizontal axis, while list Y is written
- // along the vertical axis. At any point on this grid, if the symbol in
- // list X matches the corresponding symbol in list Y, then a '\' is drawn.
- // The goal of any minimal edit-script algorithm is to find a path from the
- // top-left corner to the bottom-right corner, while traveling through the
- // fewest horizontal or vertical edges.
- // A horizontal edge is equivalent to inserting a symbol from list X.
- // A vertical edge is equivalent to inserting a symbol from list Y.
- // A diagonal edge is equivalent to a matching symbol between both X and Y.
-
- // Invariants:
- // - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
- // - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
- //
- // In general:
- // - fwdFrontier.X < revFrontier.X
- // - fwdFrontier.Y < revFrontier.Y
- //
- // Unless, it is time for the algorithm to terminate.
- fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
- revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
- fwdFrontier := fwdPath.point // Forward search frontier
- revFrontier := revPath.point // Reverse search frontier
-
- // Search budget bounds the cost of searching for better paths.
- // The longest sequence of non-matching symbols that can be tolerated is
- // approximately the square-root of the search budget.
- searchBudget := 4 * (nx + ny) // O(n)
-
- // Running the tests with the "cmp_debug" build tag prints a visualization
- // of the algorithm running in real-time. This is educational for
- // understanding how the algorithm works. See debug_enable.go.
- f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
-
- // The algorithm below is a greedy, meet-in-the-middle algorithm for
- // computing sub-optimal edit-scripts between two lists.
- //
- // The algorithm is approximately as follows:
- // - Searching for differences switches back-and-forth between
- // a search that starts at the beginning (the top-left corner), and
- // a search that starts at the end (the bottom-right corner).
- // The goal of the search is connect with the search
- // from the opposite corner.
- // - As we search, we build a path in a greedy manner,
- // where the first match seen is added to the path (this is sub-optimal,
- // but provides a decent result in practice). When matches are found,
- // we try the next pair of symbols in the lists and follow all matches
- // as far as possible.
- // - When searching for matches, we search along a diagonal going through
- // through the "frontier" point. If no matches are found,
- // we advance the frontier towards the opposite corner.
- // - This algorithm terminates when either the X coordinates or the
- // Y coordinates of the forward and reverse frontier points ever intersect.
-
- // This algorithm is correct even if searching only in the forward direction
- // or in the reverse direction. We do both because it is commonly observed
- // that two lists commonly differ because elements were added to the front
- // or end of the other list.
- //
- // Non-deterministically start with either the forward or reverse direction
- // to introduce some deliberate instability so that we have the flexibility
- // to change this algorithm in the future.
- if flags.Deterministic || randBool {
- goto forwardSearch
- } else {
- goto reverseSearch
- }
-
-forwardSearch:
- {
- // Forward search from the beginning.
- if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
- goto finishSearch
- }
- for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
- // Search in a diagonal pattern for a match.
- z := zigzag(i)
- p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
- switch {
- case p.X >= revPath.X || p.Y < fwdPath.Y:
- stop1 = true // Hit top-right corner
- case p.Y >= revPath.Y || p.X < fwdPath.X:
- stop2 = true // Hit bottom-left corner
- case f(p.X, p.Y).Equal():
- // Match found, so connect the path to this point.
- fwdPath.connect(p, f)
- fwdPath.append(Identity)
- // Follow sequence of matches as far as possible.
- for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
- if !f(fwdPath.X, fwdPath.Y).Equal() {
- break
- }
- fwdPath.append(Identity)
- }
- fwdFrontier = fwdPath.point
- stop1, stop2 = true, true
- default:
- searchBudget-- // Match not found
- }
- debug.Update()
- }
- // Advance the frontier towards reverse point.
- if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
- fwdFrontier.X++
- } else {
- fwdFrontier.Y++
- }
- goto reverseSearch
- }
-
-reverseSearch:
- {
- // Reverse search from the end.
- if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
- goto finishSearch
- }
- for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
- // Search in a diagonal pattern for a match.
- z := zigzag(i)
- p := point{revFrontier.X - z, revFrontier.Y + z}
- switch {
- case fwdPath.X >= p.X || revPath.Y < p.Y:
- stop1 = true // Hit bottom-left corner
- case fwdPath.Y >= p.Y || revPath.X < p.X:
- stop2 = true // Hit top-right corner
- case f(p.X-1, p.Y-1).Equal():
- // Match found, so connect the path to this point.
- revPath.connect(p, f)
- revPath.append(Identity)
- // Follow sequence of matches as far as possible.
- for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
- if !f(revPath.X-1, revPath.Y-1).Equal() {
- break
- }
- revPath.append(Identity)
- }
- revFrontier = revPath.point
- stop1, stop2 = true, true
- default:
- searchBudget-- // Match not found
- }
- debug.Update()
- }
- // Advance the frontier towards forward point.
- if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
- revFrontier.X--
- } else {
- revFrontier.Y--
- }
- goto forwardSearch
- }
-
-finishSearch:
- // Join the forward and reverse paths and then append the reverse path.
- fwdPath.connect(revPath.point, f)
- for i := len(revPath.es) - 1; i >= 0; i-- {
- t := revPath.es[i]
- revPath.es = revPath.es[:i]
- fwdPath.append(t)
- }
- debug.Finish()
- return fwdPath.es
-}
-
-type path struct {
- dir int // +1 if forward, -1 if reverse
- point // Leading point of the EditScript path
- es EditScript
-}
-
-// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types
-// to the edit-script to connect p.point to dst.
-func (p *path) connect(dst point, f EqualFunc) {
- if p.dir > 0 {
- // Connect in forward direction.
- for dst.X > p.X && dst.Y > p.Y {
- switch r := f(p.X, p.Y); {
- case r.Equal():
- p.append(Identity)
- case r.Similar():
- p.append(Modified)
- case dst.X-p.X >= dst.Y-p.Y:
- p.append(UniqueX)
- default:
- p.append(UniqueY)
- }
- }
- for dst.X > p.X {
- p.append(UniqueX)
- }
- for dst.Y > p.Y {
- p.append(UniqueY)
- }
- } else {
- // Connect in reverse direction.
- for p.X > dst.X && p.Y > dst.Y {
- switch r := f(p.X-1, p.Y-1); {
- case r.Equal():
- p.append(Identity)
- case r.Similar():
- p.append(Modified)
- case p.Y-dst.Y >= p.X-dst.X:
- p.append(UniqueY)
- default:
- p.append(UniqueX)
- }
- }
- for p.X > dst.X {
- p.append(UniqueX)
- }
- for p.Y > dst.Y {
- p.append(UniqueY)
- }
- }
-}
-
-func (p *path) append(t EditType) {
- p.es = append(p.es, t)
- switch t {
- case Identity, Modified:
- p.add(p.dir, p.dir)
- case UniqueX:
- p.add(p.dir, 0)
- case UniqueY:
- p.add(0, p.dir)
- }
- debug.Update()
-}
-
-type point struct{ X, Y int }
-
-func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
-
-// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
-//
-// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
-func zigzag(x int) int {
- if x&1 != 0 {
- x = ^x
- }
- return x >> 1
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
deleted file mode 100644
index d8e459c9..00000000
--- a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2019, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package flags
-
-// Deterministic controls whether the output of Diff should be deterministic.
-// This is only used for testing.
-var Deterministic bool
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
deleted file mode 100644
index d127d436..00000000
--- a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2017, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package function provides functionality for identifying function types.
-package function
-
-import (
- "reflect"
- "regexp"
- "runtime"
- "strings"
-)
-
-type funcType int
-
-const (
- _ funcType = iota
-
- tbFunc // func(T) bool
- ttbFunc // func(T, T) bool
- trbFunc // func(T, R) bool
- tibFunc // func(T, I) bool
- trFunc // func(T) R
-
- Equal = ttbFunc // func(T, T) bool
- EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
- Transformer = trFunc // func(T) R
- ValueFilter = ttbFunc // func(T, T) bool
- Less = ttbFunc // func(T, T) bool
- ValuePredicate = tbFunc // func(T) bool
- KeyValuePredicate = trbFunc // func(T, R) bool
-)
-
-var boolType = reflect.TypeOf(true)
-
-// IsType reports whether the reflect.Type is of the specified function type.
-func IsType(t reflect.Type, ft funcType) bool {
- if t == nil || t.Kind() != reflect.Func || t.IsVariadic() {
- return false
- }
- ni, no := t.NumIn(), t.NumOut()
- switch ft {
- case tbFunc: // func(T) bool
- if ni == 1 && no == 1 && t.Out(0) == boolType {
- return true
- }
- case ttbFunc: // func(T, T) bool
- if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
- return true
- }
- case trbFunc: // func(T, R) bool
- if ni == 2 && no == 1 && t.Out(0) == boolType {
- return true
- }
- case tibFunc: // func(T, I) bool
- if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType {
- return true
- }
- case trFunc: // func(T) R
- if ni == 1 && no == 1 {
- return true
- }
- }
- return false
-}
-
-var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`)
-
-// NameOf returns the name of the function value.
-func NameOf(v reflect.Value) string {
- fnc := runtime.FuncForPC(v.Pointer())
- if fnc == nil {
- return ""
- }
- fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm"
-
- // Method closures have a "-fm" suffix.
- fullName = strings.TrimSuffix(fullName, "-fm")
-
- var name string
- for len(fullName) > 0 {
- inParen := strings.HasSuffix(fullName, ")")
- fullName = strings.TrimSuffix(fullName, ")")
-
- s := lastIdentRx.FindString(fullName)
- if s == "" {
- break
- }
- name = s + "." + name
- fullName = strings.TrimSuffix(fullName, s)
-
- if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 {
- fullName = fullName[:i]
- }
- fullName = strings.TrimSuffix(fullName, ".")
- }
- return strings.TrimSuffix(name, ".")
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go
deleted file mode 100644
index 7b498bb2..00000000
--- a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2020, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package value
-
-import (
- "reflect"
- "strconv"
-)
-
-var anyType = reflect.TypeOf((*interface{})(nil)).Elem()
-
-// TypeString is nearly identical to reflect.Type.String,
-// but has an additional option to specify that full type names be used.
-func TypeString(t reflect.Type, qualified bool) string {
- return string(appendTypeName(nil, t, qualified, false))
-}
-
-func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte {
- // BUG: Go reflection provides no way to disambiguate two named types
- // of the same name and within the same package,
- // but declared within the namespace of different functions.
-
- // Use the "any" alias instead of "interface{}" for better readability.
- if t == anyType {
- return append(b, "any"...)
- }
-
- // Named type.
- if t.Name() != "" {
- if qualified && t.PkgPath() != "" {
- b = append(b, '"')
- b = append(b, t.PkgPath()...)
- b = append(b, '"')
- b = append(b, '.')
- b = append(b, t.Name()...)
- } else {
- b = append(b, t.String()...)
- }
- return b
- }
-
- // Unnamed type.
- switch k := t.Kind(); k {
- case reflect.Bool, reflect.String, reflect.UnsafePointer,
- reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
- reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
- b = append(b, k.String()...)
- case reflect.Chan:
- if t.ChanDir() == reflect.RecvDir {
- b = append(b, "<-"...)
- }
- b = append(b, "chan"...)
- if t.ChanDir() == reflect.SendDir {
- b = append(b, "<-"...)
- }
- b = append(b, ' ')
- b = appendTypeName(b, t.Elem(), qualified, false)
- case reflect.Func:
- if !elideFunc {
- b = append(b, "func"...)
- }
- b = append(b, '(')
- for i := 0; i < t.NumIn(); i++ {
- if i > 0 {
- b = append(b, ", "...)
- }
- if i == t.NumIn()-1 && t.IsVariadic() {
- b = append(b, "..."...)
- b = appendTypeName(b, t.In(i).Elem(), qualified, false)
- } else {
- b = appendTypeName(b, t.In(i), qualified, false)
- }
- }
- b = append(b, ')')
- switch t.NumOut() {
- case 0:
- // Do nothing
- case 1:
- b = append(b, ' ')
- b = appendTypeName(b, t.Out(0), qualified, false)
- default:
- b = append(b, " ("...)
- for i := 0; i < t.NumOut(); i++ {
- if i > 0 {
- b = append(b, ", "...)
- }
- b = appendTypeName(b, t.Out(i), qualified, false)
- }
- b = append(b, ')')
- }
- case reflect.Struct:
- b = append(b, "struct{ "...)
- for i := 0; i < t.NumField(); i++ {
- if i > 0 {
- b = append(b, "; "...)
- }
- sf := t.Field(i)
- if !sf.Anonymous {
- if qualified && sf.PkgPath != "" {
- b = append(b, '"')
- b = append(b, sf.PkgPath...)
- b = append(b, '"')
- b = append(b, '.')
- }
- b = append(b, sf.Name...)
- b = append(b, ' ')
- }
- b = appendTypeName(b, sf.Type, qualified, false)
- if sf.Tag != "" {
- b = append(b, ' ')
- b = strconv.AppendQuote(b, string(sf.Tag))
- }
- }
- if b[len(b)-1] == ' ' {
- b = b[:len(b)-1]
- } else {
- b = append(b, ' ')
- }
- b = append(b, '}')
- case reflect.Slice, reflect.Array:
- b = append(b, '[')
- if k == reflect.Array {
- b = strconv.AppendUint(b, uint64(t.Len()), 10)
- }
- b = append(b, ']')
- b = appendTypeName(b, t.Elem(), qualified, false)
- case reflect.Map:
- b = append(b, "map["...)
- b = appendTypeName(b, t.Key(), qualified, false)
- b = append(b, ']')
- b = appendTypeName(b, t.Elem(), qualified, false)
- case reflect.Ptr:
- b = append(b, '*')
- b = appendTypeName(b, t.Elem(), qualified, false)
- case reflect.Interface:
- b = append(b, "interface{ "...)
- for i := 0; i < t.NumMethod(); i++ {
- if i > 0 {
- b = append(b, "; "...)
- }
- m := t.Method(i)
- if qualified && m.PkgPath != "" {
- b = append(b, '"')
- b = append(b, m.PkgPath...)
- b = append(b, '"')
- b = append(b, '.')
- }
- b = append(b, m.Name...)
- b = appendTypeName(b, m.Type, qualified, true)
- }
- if b[len(b)-1] == ' ' {
- b = b[:len(b)-1]
- } else {
- b = append(b, ' ')
- }
- b = append(b, '}')
- default:
- panic("invalid kind: " + k.String())
- }
- return b
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go
deleted file mode 100644
index e5dfff69..00000000
--- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2018, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package value
-
-import (
- "reflect"
- "unsafe"
-)
-
-// Pointer is an opaque typed pointer and is guaranteed to be comparable.
-type Pointer struct {
- p unsafe.Pointer
- t reflect.Type
-}
-
-// PointerOf returns a Pointer from v, which must be a
-// reflect.Ptr, reflect.Slice, or reflect.Map.
-func PointerOf(v reflect.Value) Pointer {
- // The proper representation of a pointer is unsafe.Pointer,
- // which is necessary if the GC ever uses a moving collector.
- return Pointer{unsafe.Pointer(v.Pointer()), v.Type()}
-}
-
-// IsNil reports whether the pointer is nil.
-func (p Pointer) IsNil() bool {
- return p.p == nil
-}
-
-// Uintptr returns the pointer as a uintptr.
-func (p Pointer) Uintptr() uintptr {
- return uintptr(p.p)
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
deleted file mode 100644
index 98533b03..00000000
--- a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2017, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package value
-
-import (
- "fmt"
- "math"
- "reflect"
- "sort"
-)
-
-// SortKeys sorts a list of map keys, deduplicating keys if necessary.
-// The type of each value must be comparable.
-func SortKeys(vs []reflect.Value) []reflect.Value {
- if len(vs) == 0 {
- return vs
- }
-
- // Sort the map keys.
- sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) })
-
- // Deduplicate keys (fails for NaNs).
- vs2 := vs[:1]
- for _, v := range vs[1:] {
- if isLess(vs2[len(vs2)-1], v) {
- vs2 = append(vs2, v)
- }
- }
- return vs2
-}
-
-// isLess is a generic function for sorting arbitrary map keys.
-// The inputs must be of the same type and must be comparable.
-func isLess(x, y reflect.Value) bool {
- switch x.Type().Kind() {
- case reflect.Bool:
- return !x.Bool() && y.Bool()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return x.Int() < y.Int()
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return x.Uint() < y.Uint()
- case reflect.Float32, reflect.Float64:
- // NOTE: This does not sort -0 as less than +0
- // since Go maps treat -0 and +0 as equal keys.
- fx, fy := x.Float(), y.Float()
- return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy)
- case reflect.Complex64, reflect.Complex128:
- cx, cy := x.Complex(), y.Complex()
- rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy)
- if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) {
- return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy)
- }
- return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry)
- case reflect.Ptr, reflect.UnsafePointer, reflect.Chan:
- return x.Pointer() < y.Pointer()
- case reflect.String:
- return x.String() < y.String()
- case reflect.Array:
- for i := 0; i < x.Len(); i++ {
- if isLess(x.Index(i), y.Index(i)) {
- return true
- }
- if isLess(y.Index(i), x.Index(i)) {
- return false
- }
- }
- return false
- case reflect.Struct:
- for i := 0; i < x.NumField(); i++ {
- if isLess(x.Field(i), y.Field(i)) {
- return true
- }
- if isLess(y.Field(i), x.Field(i)) {
- return false
- }
- }
- return false
- case reflect.Interface:
- vx, vy := x.Elem(), y.Elem()
- if !vx.IsValid() || !vy.IsValid() {
- return !vx.IsValid() && vy.IsValid()
- }
- tx, ty := vx.Type(), vy.Type()
- if tx == ty {
- return isLess(x.Elem(), y.Elem())
- }
- if tx.Kind() != ty.Kind() {
- return vx.Kind() < vy.Kind()
- }
- if tx.String() != ty.String() {
- return tx.String() < ty.String()
- }
- if tx.PkgPath() != ty.PkgPath() {
- return tx.PkgPath() < ty.PkgPath()
- }
- // This can happen in rare situations, so we fallback to just comparing
- // the unique pointer for a reflect.Type. This guarantees deterministic
- // ordering within a program, but it is obviously not stable.
- return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer()
- default:
- // Must be Func, Map, or Slice; which are not comparable.
- panic(fmt.Sprintf("%T is not comparable", x.Type()))
- }
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go
deleted file mode 100644
index 754496f3..00000000
--- a/vendor/github.com/google/go-cmp/cmp/options.go
+++ /dev/null
@@ -1,554 +0,0 @@
-// Copyright 2017, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmp
-
-import (
- "fmt"
- "reflect"
- "regexp"
- "strings"
-
- "github.com/google/go-cmp/cmp/internal/function"
-)
-
-// Option configures for specific behavior of [Equal] and [Diff]. In particular,
-// the fundamental Option functions ([Ignore], [Transformer], and [Comparer]),
-// configure how equality is determined.
-//
-// The fundamental options may be composed with filters ([FilterPath] and
-// [FilterValues]) to control the scope over which they are applied.
-//
-// The [github.com/google/go-cmp/cmp/cmpopts] package provides helper functions
-// for creating options that may be used with [Equal] and [Diff].
-type Option interface {
- // filter applies all filters and returns the option that remains.
- // Each option may only read s.curPath and call s.callTTBFunc.
- //
- // An Options is returned only if multiple comparers or transformers
- // can apply simultaneously and will only contain values of those types
- // or sub-Options containing values of those types.
- filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption
-}
-
-// applicableOption represents the following types:
-//
-// Fundamental: ignore | validator | *comparer | *transformer
-// Grouping: Options
-type applicableOption interface {
- Option
-
- // apply executes the option, which may mutate s or panic.
- apply(s *state, vx, vy reflect.Value)
-}
-
-// coreOption represents the following types:
-//
-// Fundamental: ignore | validator | *comparer | *transformer
-// Filters: *pathFilter | *valuesFilter
-type coreOption interface {
- Option
- isCore()
-}
-
-type core struct{}
-
-func (core) isCore() {}
-
-// Options is a list of [Option] values that also satisfies the [Option] interface.
-// Helper comparison packages may return an Options value when packing multiple
-// [Option] values into a single [Option]. When this package processes an Options,
-// it will be implicitly expanded into a flat list.
-//
-// Applying a filter on an Options is equivalent to applying that same filter
-// on all individual options held within.
-type Options []Option
-
-func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) {
- for _, opt := range opts {
- switch opt := opt.filter(s, t, vx, vy); opt.(type) {
- case ignore:
- return ignore{} // Only ignore can short-circuit evaluation
- case validator:
- out = validator{} // Takes precedence over comparer or transformer
- case *comparer, *transformer, Options:
- switch out.(type) {
- case nil:
- out = opt
- case validator:
- // Keep validator
- case *comparer, *transformer, Options:
- out = Options{out, opt} // Conflicting comparers or transformers
- }
- }
- }
- return out
-}
-
-func (opts Options) apply(s *state, _, _ reflect.Value) {
- const warning = "ambiguous set of applicable options"
- const help = "consider using filters to ensure at most one Comparer or Transformer may apply"
- var ss []string
- for _, opt := range flattenOptions(nil, opts) {
- ss = append(ss, fmt.Sprint(opt))
- }
- set := strings.Join(ss, "\n\t")
- panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help))
-}
-
-func (opts Options) String() string {
- var ss []string
- for _, opt := range opts {
- ss = append(ss, fmt.Sprint(opt))
- }
- return fmt.Sprintf("Options{%s}", strings.Join(ss, ", "))
-}
-
-// FilterPath returns a new [Option] where opt is only evaluated if filter f
-// returns true for the current [Path] in the value tree.
-//
-// This filter is called even if a slice element or map entry is missing and
-// provides an opportunity to ignore such cases. The filter function must be
-// symmetric such that the filter result is identical regardless of whether the
-// missing value is from x or y.
-//
-// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
-// a previously filtered [Option].
-func FilterPath(f func(Path) bool, opt Option) Option {
- if f == nil {
- panic("invalid path filter function")
- }
- if opt := normalizeOption(opt); opt != nil {
- return &pathFilter{fnc: f, opt: opt}
- }
- return nil
-}
-
-type pathFilter struct {
- core
- fnc func(Path) bool
- opt Option
-}
-
-func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
- if f.fnc(s.curPath) {
- return f.opt.filter(s, t, vx, vy)
- }
- return nil
-}
-
-func (f pathFilter) String() string {
- return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt)
-}
-
-// FilterValues returns a new [Option] where opt is only evaluated if filter f,
-// which is a function of the form "func(T, T) bool", returns true for the
-// current pair of values being compared. If either value is invalid or
-// the type of the values is not assignable to T, then this filter implicitly
-// returns false.
-//
-// The filter function must be
-// symmetric (i.e., agnostic to the order of the inputs) and
-// deterministic (i.e., produces the same result when given the same inputs).
-// If T is an interface, it is possible that f is called with two values with
-// different concrete types that both implement T.
-//
-// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
-// a previously filtered [Option].
-func FilterValues(f interface{}, opt Option) Option {
- v := reflect.ValueOf(f)
- if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() {
- panic(fmt.Sprintf("invalid values filter function: %T", f))
- }
- if opt := normalizeOption(opt); opt != nil {
- vf := &valuesFilter{fnc: v, opt: opt}
- if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
- vf.typ = ti
- }
- return vf
- }
- return nil
-}
-
-type valuesFilter struct {
- core
- typ reflect.Type // T
- fnc reflect.Value // func(T, T) bool
- opt Option
-}
-
-func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
- if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() {
- return nil
- }
- if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) {
- return f.opt.filter(s, t, vx, vy)
- }
- return nil
-}
-
-func (f valuesFilter) String() string {
- return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt)
-}
-
-// Ignore is an [Option] that causes all comparisons to be ignored.
-// This value is intended to be combined with [FilterPath] or [FilterValues].
-// It is an error to pass an unfiltered Ignore option to [Equal].
-func Ignore() Option { return ignore{} }
-
-type ignore struct{ core }
-
-func (ignore) isFiltered() bool { return false }
-func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} }
-func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) }
-func (ignore) String() string { return "Ignore()" }
-
-// validator is a sentinel Option type to indicate that some options could not
-// be evaluated due to unexported fields, missing slice elements, or
-// missing map entries. Both values are validator only for unexported fields.
-type validator struct{ core }
-
-func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption {
- if !vx.IsValid() || !vy.IsValid() {
- return validator{}
- }
- if !vx.CanInterface() || !vy.CanInterface() {
- return validator{}
- }
- return nil
-}
-func (validator) apply(s *state, vx, vy reflect.Value) {
- // Implies missing slice element or map entry.
- if !vx.IsValid() || !vy.IsValid() {
- s.report(vx.IsValid() == vy.IsValid(), 0)
- return
- }
-
- // Unable to Interface implies unexported field without visibility access.
- if !vx.CanInterface() || !vy.CanInterface() {
- help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported"
- var name string
- if t := s.curPath.Index(-2).Type(); t.Name() != "" {
- // Named type with unexported fields.
- name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
- if _, ok := reflect.New(t).Interface().(error); ok {
- help = "consider using cmpopts.EquateErrors to compare error values"
- } else if t.Comparable() {
- help = "consider using cmpopts.EquateComparable to compare comparable Go types"
- }
- } else {
- // Unnamed type with unexported fields. Derive PkgPath from field.
- var pkgPath string
- for i := 0; i < t.NumField() && pkgPath == ""; i++ {
- pkgPath = t.Field(i).PkgPath
- }
- name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int })
- }
- panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help))
- }
-
- panic("not reachable")
-}
-
-// identRx represents a valid identifier according to the Go specification.
-const identRx = `[_\p{L}][_\p{L}\p{N}]*`
-
-var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`)
-
-// Transformer returns an [Option] that applies a transformation function that
-// converts values of a certain type into that of another.
-//
-// The transformer f must be a function "func(T) R" that converts values of
-// type T to those of type R and is implicitly filtered to input values
-// assignable to T. The transformer must not mutate T in any way.
-//
-// To help prevent some cases of infinite recursive cycles applying the
-// same transform to the output of itself (e.g., in the case where the
-// input and output types are the same), an implicit filter is added such that
-// a transformer is applicable only if that exact transformer is not already
-// in the tail of the [Path] since the last non-[Transform] step.
-// For situations where the implicit filter is still insufficient,
-// consider using [github.com/google/go-cmp/cmp/cmpopts.AcyclicTransformer],
-// which adds a filter to prevent the transformer from
-// being recursively applied upon itself.
-//
-// The name is a user provided label that is used as the [Transform.Name] in the
-// transformation [PathStep] (and eventually shown in the [Diff] output).
-// The name must be a valid identifier or qualified identifier in Go syntax.
-// If empty, an arbitrary name is used.
-func Transformer(name string, f interface{}) Option {
- v := reflect.ValueOf(f)
- if !function.IsType(v.Type(), function.Transformer) || v.IsNil() {
- panic(fmt.Sprintf("invalid transformer function: %T", f))
- }
- if name == "" {
- name = function.NameOf(v)
- if !identsRx.MatchString(name) {
- name = "λ" // Lambda-symbol as placeholder name
- }
- } else if !identsRx.MatchString(name) {
- panic(fmt.Sprintf("invalid name: %q", name))
- }
- tr := &transformer{name: name, fnc: reflect.ValueOf(f)}
- if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
- tr.typ = ti
- }
- return tr
-}
-
-type transformer struct {
- core
- name string
- typ reflect.Type // T
- fnc reflect.Value // func(T) R
-}
-
-func (tr *transformer) isFiltered() bool { return tr.typ != nil }
-
-func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption {
- for i := len(s.curPath) - 1; i >= 0; i-- {
- if t, ok := s.curPath[i].(Transform); !ok {
- break // Hit most recent non-Transform step
- } else if tr == t.trans {
- return nil // Cannot directly use same Transform
- }
- }
- if tr.typ == nil || t.AssignableTo(tr.typ) {
- return tr
- }
- return nil
-}
-
-func (tr *transformer) apply(s *state, vx, vy reflect.Value) {
- step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}}
- vvx := s.callTRFunc(tr.fnc, vx, step)
- vvy := s.callTRFunc(tr.fnc, vy, step)
- step.vx, step.vy = vvx, vvy
- s.compareAny(step)
-}
-
-func (tr transformer) String() string {
- return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc))
-}
-
-// Comparer returns an [Option] that determines whether two values are equal
-// to each other.
-//
-// The comparer f must be a function "func(T, T) bool" and is implicitly
-// filtered to input values assignable to T. If T is an interface, it is
-// possible that f is called with two values of different concrete types that
-// both implement T.
-//
-// The equality function must be:
-// - Symmetric: equal(x, y) == equal(y, x)
-// - Deterministic: equal(x, y) == equal(x, y)
-// - Pure: equal(x, y) does not modify x or y
-func Comparer(f interface{}) Option {
- v := reflect.ValueOf(f)
- if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
- panic(fmt.Sprintf("invalid comparer function: %T", f))
- }
- cm := &comparer{fnc: v}
- if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
- cm.typ = ti
- }
- return cm
-}
-
-type comparer struct {
- core
- typ reflect.Type // T
- fnc reflect.Value // func(T, T) bool
-}
-
-func (cm *comparer) isFiltered() bool { return cm.typ != nil }
-
-func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption {
- if cm.typ == nil || t.AssignableTo(cm.typ) {
- return cm
- }
- return nil
-}
-
-func (cm *comparer) apply(s *state, vx, vy reflect.Value) {
- eq := s.callTTBFunc(cm.fnc, vx, vy)
- s.report(eq, reportByFunc)
-}
-
-func (cm comparer) String() string {
- return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc))
-}
-
-// Exporter returns an [Option] that specifies whether [Equal] is allowed to
-// introspect into the unexported fields of certain struct types.
-//
-// Users of this option must understand that comparing on unexported fields
-// from external packages is not safe since changes in the internal
-// implementation of some external package may cause the result of [Equal]
-// to unexpectedly change. However, it may be valid to use this option on types
-// defined in an internal package where the semantic meaning of an unexported
-// field is in the control of the user.
-//
-// In many cases, a custom [Comparer] should be used instead that defines
-// equality as a function of the public API of a type rather than the underlying
-// unexported implementation.
-//
-// For example, the [reflect.Type] documentation defines equality to be determined
-// by the == operator on the interface (essentially performing a shallow pointer
-// comparison) and most attempts to compare *[regexp.Regexp] types are interested
-// in only checking that the regular expression strings are equal.
-// Both of these are accomplished using [Comparer] options:
-//
-// Comparer(func(x, y reflect.Type) bool { return x == y })
-// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
-//
-// In other cases, the [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]
-// option can be used to ignore all unexported fields on specified struct types.
-func Exporter(f func(reflect.Type) bool) Option {
- return exporter(f)
-}
-
-type exporter func(reflect.Type) bool
-
-func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
- panic("not implemented")
-}
-
-// AllowUnexported returns an [Option] that allows [Equal] to forcibly introspect
-// unexported fields of the specified struct types.
-//
-// See [Exporter] for the proper use of this option.
-func AllowUnexported(types ...interface{}) Option {
- m := make(map[reflect.Type]bool)
- for _, typ := range types {
- t := reflect.TypeOf(typ)
- if t.Kind() != reflect.Struct {
- panic(fmt.Sprintf("invalid struct type: %T", typ))
- }
- m[t] = true
- }
- return exporter(func(t reflect.Type) bool { return m[t] })
-}
-
-// Result represents the comparison result for a single node and
-// is provided by cmp when calling Report (see [Reporter]).
-type Result struct {
- _ [0]func() // Make Result incomparable
- flags resultFlags
-}
-
-// Equal reports whether the node was determined to be equal or not.
-// As a special case, ignored nodes are considered equal.
-func (r Result) Equal() bool {
- return r.flags&(reportEqual|reportByIgnore) != 0
-}
-
-// ByIgnore reports whether the node is equal because it was ignored.
-// This never reports true if [Result.Equal] reports false.
-func (r Result) ByIgnore() bool {
- return r.flags&reportByIgnore != 0
-}
-
-// ByMethod reports whether the Equal method determined equality.
-func (r Result) ByMethod() bool {
- return r.flags&reportByMethod != 0
-}
-
-// ByFunc reports whether a [Comparer] function determined equality.
-func (r Result) ByFunc() bool {
- return r.flags&reportByFunc != 0
-}
-
-// ByCycle reports whether a reference cycle was detected.
-func (r Result) ByCycle() bool {
- return r.flags&reportByCycle != 0
-}
-
-type resultFlags uint
-
-const (
- _ resultFlags = (1 << iota) / 2
-
- reportEqual
- reportUnequal
- reportByIgnore
- reportByMethod
- reportByFunc
- reportByCycle
-)
-
-// Reporter is an [Option] that can be passed to [Equal]. When [Equal] traverses
-// the value trees, it calls PushStep as it descends into each node in the
-// tree and PopStep as it ascend out of the node. The leaves of the tree are
-// either compared (determined to be equal or not equal) or ignored and reported
-// as such by calling the Report method.
-func Reporter(r interface {
- // PushStep is called when a tree-traversal operation is performed.
- // The PathStep itself is only valid until the step is popped.
- // The PathStep.Values are valid for the duration of the entire traversal
- // and must not be mutated.
- //
- // Equal always calls PushStep at the start to provide an operation-less
- // PathStep used to report the root values.
- //
- // Within a slice, the exact set of inserted, removed, or modified elements
- // is unspecified and may change in future implementations.
- // The entries of a map are iterated through in an unspecified order.
- PushStep(PathStep)
-
- // Report is called exactly once on leaf nodes to report whether the
- // comparison identified the node as equal, unequal, or ignored.
- // A leaf node is one that is immediately preceded by and followed by
- // a pair of PushStep and PopStep calls.
- Report(Result)
-
- // PopStep ascends back up the value tree.
- // There is always a matching pop call for every push call.
- PopStep()
-}) Option {
- return reporter{r}
-}
-
-type reporter struct{ reporterIface }
-type reporterIface interface {
- PushStep(PathStep)
- Report(Result)
- PopStep()
-}
-
-func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
- panic("not implemented")
-}
-
-// normalizeOption normalizes the input options such that all Options groups
-// are flattened and groups with a single element are reduced to that element.
-// Only coreOptions and Options containing coreOptions are allowed.
-func normalizeOption(src Option) Option {
- switch opts := flattenOptions(nil, Options{src}); len(opts) {
- case 0:
- return nil
- case 1:
- return opts[0]
- default:
- return opts
- }
-}
-
-// flattenOptions copies all options in src to dst as a flat list.
-// Only coreOptions and Options containing coreOptions are allowed.
-func flattenOptions(dst, src Options) Options {
- for _, opt := range src {
- switch opt := opt.(type) {
- case nil:
- continue
- case Options:
- dst = flattenOptions(dst, opt)
- case coreOption:
- dst = append(dst, opt)
- default:
- panic(fmt.Sprintf("invalid option type: %T", opt))
- }
- }
- return dst
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go
deleted file mode 100644
index c3c14564..00000000
--- a/vendor/github.com/google/go-cmp/cmp/path.go
+++ /dev/null
@@ -1,390 +0,0 @@
-// Copyright 2017, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmp
-
-import (
- "fmt"
- "reflect"
- "strings"
- "unicode"
- "unicode/utf8"
-
- "github.com/google/go-cmp/cmp/internal/value"
-)
-
-// Path is a list of [PathStep] describing the sequence of operations to get
-// from some root type to the current position in the value tree.
-// The first Path element is always an operation-less [PathStep] that exists
-// simply to identify the initial type.
-//
-// When traversing structs with embedded structs, the embedded struct will
-// always be accessed as a field before traversing the fields of the
-// embedded struct themselves. That is, an exported field from the
-// embedded struct will never be accessed directly from the parent struct.
-type Path []PathStep
-
-// PathStep is a union-type for specific operations to traverse
-// a value's tree structure. Users of this package never need to implement
-// these types as values of this type will be returned by this package.
-//
-// Implementations of this interface:
-// - [StructField]
-// - [SliceIndex]
-// - [MapIndex]
-// - [Indirect]
-// - [TypeAssertion]
-// - [Transform]
-type PathStep interface {
- String() string
-
- // Type is the resulting type after performing the path step.
- Type() reflect.Type
-
- // Values is the resulting values after performing the path step.
- // The type of each valid value is guaranteed to be identical to Type.
- //
- // In some cases, one or both may be invalid or have restrictions:
- // - For StructField, both are not interface-able if the current field
- // is unexported and the struct type is not explicitly permitted by
- // an Exporter to traverse unexported fields.
- // - For SliceIndex, one may be invalid if an element is missing from
- // either the x or y slice.
- // - For MapIndex, one may be invalid if an entry is missing from
- // either the x or y map.
- //
- // The provided values must not be mutated.
- Values() (vx, vy reflect.Value)
-}
-
-var (
- _ PathStep = StructField{}
- _ PathStep = SliceIndex{}
- _ PathStep = MapIndex{}
- _ PathStep = Indirect{}
- _ PathStep = TypeAssertion{}
- _ PathStep = Transform{}
-)
-
-func (pa *Path) push(s PathStep) {
- *pa = append(*pa, s)
-}
-
-func (pa *Path) pop() {
- *pa = (*pa)[:len(*pa)-1]
-}
-
-// Last returns the last [PathStep] in the Path.
-// If the path is empty, this returns a non-nil [PathStep]
-// that reports a nil [PathStep.Type].
-func (pa Path) Last() PathStep {
- return pa.Index(-1)
-}
-
-// Index returns the ith step in the Path and supports negative indexing.
-// A negative index starts counting from the tail of the Path such that -1
-// refers to the last step, -2 refers to the second-to-last step, and so on.
-// If index is invalid, this returns a non-nil [PathStep]
-// that reports a nil [PathStep.Type].
-func (pa Path) Index(i int) PathStep {
- if i < 0 {
- i = len(pa) + i
- }
- if i < 0 || i >= len(pa) {
- return pathStep{}
- }
- return pa[i]
-}
-
-// String returns the simplified path to a node.
-// The simplified path only contains struct field accesses.
-//
-// For example:
-//
-// MyMap.MySlices.MyField
-func (pa Path) String() string {
- var ss []string
- for _, s := range pa {
- if _, ok := s.(StructField); ok {
- ss = append(ss, s.String())
- }
- }
- return strings.TrimPrefix(strings.Join(ss, ""), ".")
-}
-
-// GoString returns the path to a specific node using Go syntax.
-//
-// For example:
-//
-// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
-func (pa Path) GoString() string {
- var ssPre, ssPost []string
- var numIndirect int
- for i, s := range pa {
- var nextStep PathStep
- if i+1 < len(pa) {
- nextStep = pa[i+1]
- }
- switch s := s.(type) {
- case Indirect:
- numIndirect++
- pPre, pPost := "(", ")"
- switch nextStep.(type) {
- case Indirect:
- continue // Next step is indirection, so let them batch up
- case StructField:
- numIndirect-- // Automatic indirection on struct fields
- case nil:
- pPre, pPost = "", "" // Last step; no need for parenthesis
- }
- if numIndirect > 0 {
- ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect))
- ssPost = append(ssPost, pPost)
- }
- numIndirect = 0
- continue
- case Transform:
- ssPre = append(ssPre, s.trans.name+"(")
- ssPost = append(ssPost, ")")
- continue
- }
- ssPost = append(ssPost, s.String())
- }
- for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 {
- ssPre[i], ssPre[j] = ssPre[j], ssPre[i]
- }
- return strings.Join(ssPre, "") + strings.Join(ssPost, "")
-}
-
-type pathStep struct {
- typ reflect.Type
- vx, vy reflect.Value
-}
-
-func (ps pathStep) Type() reflect.Type { return ps.typ }
-func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy }
-func (ps pathStep) String() string {
- if ps.typ == nil {
- return ""
- }
- s := value.TypeString(ps.typ, false)
- if s == "" || strings.ContainsAny(s, "{}\n") {
- return "root" // Type too simple or complex to print
- }
- return fmt.Sprintf("{%s}", s)
-}
-
-// StructField is a [PathStep] that represents a struct field access
-// on a field called [StructField.Name].
-type StructField struct{ *structField }
-type structField struct {
- pathStep
- name string
- idx int
-
- // These fields are used for forcibly accessing an unexported field.
- // pvx, pvy, and field are only valid if unexported is true.
- unexported bool
- mayForce bool // Forcibly allow visibility
- paddr bool // Was parent addressable?
- pvx, pvy reflect.Value // Parent values (always addressable)
- field reflect.StructField // Field information
-}
-
-func (sf StructField) Type() reflect.Type { return sf.typ }
-func (sf StructField) Values() (vx, vy reflect.Value) {
- if !sf.unexported {
- return sf.vx, sf.vy // CanInterface reports true
- }
-
- // Forcibly obtain read-write access to an unexported struct field.
- if sf.mayForce {
- vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr)
- vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr)
- return vx, vy // CanInterface reports true
- }
- return sf.vx, sf.vy // CanInterface reports false
-}
-func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) }
-
-// Name is the field name.
-func (sf StructField) Name() string { return sf.name }
-
-// Index is the index of the field in the parent struct type.
-// See [reflect.Type.Field].
-func (sf StructField) Index() int { return sf.idx }
-
-// SliceIndex is a [PathStep] that represents an index operation on
-// a slice or array at some index [SliceIndex.Key].
-type SliceIndex struct{ *sliceIndex }
-type sliceIndex struct {
- pathStep
- xkey, ykey int
- isSlice bool // False for reflect.Array
-}
-
-func (si SliceIndex) Type() reflect.Type { return si.typ }
-func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy }
-func (si SliceIndex) String() string {
- switch {
- case si.xkey == si.ykey:
- return fmt.Sprintf("[%d]", si.xkey)
- case si.ykey == -1:
- // [5->?] means "I don't know where X[5] went"
- return fmt.Sprintf("[%d->?]", si.xkey)
- case si.xkey == -1:
- // [?->3] means "I don't know where Y[3] came from"
- return fmt.Sprintf("[?->%d]", si.ykey)
- default:
- // [5->3] means "X[5] moved to Y[3]"
- return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey)
- }
-}
-
-// Key is the index key; it may return -1 if in a split state
-func (si SliceIndex) Key() int {
- if si.xkey != si.ykey {
- return -1
- }
- return si.xkey
-}
-
-// SplitKeys are the indexes for indexing into slices in the
-// x and y values, respectively. These indexes may differ due to the
-// insertion or removal of an element in one of the slices, causing
-// all of the indexes to be shifted. If an index is -1, then that
-// indicates that the element does not exist in the associated slice.
-//
-// [SliceIndex.Key] is guaranteed to return -1 if and only if the indexes
-// returned by SplitKeys are not the same. SplitKeys will never return -1 for
-// both indexes.
-func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey }
-
-// MapIndex is a [PathStep] that represents an index operation on a map at some index Key.
-type MapIndex struct{ *mapIndex }
-type mapIndex struct {
- pathStep
- key reflect.Value
-}
-
-func (mi MapIndex) Type() reflect.Type { return mi.typ }
-func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy }
-func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) }
-
-// Key is the value of the map key.
-func (mi MapIndex) Key() reflect.Value { return mi.key }
-
-// Indirect is a [PathStep] that represents pointer indirection on the parent type.
-type Indirect struct{ *indirect }
-type indirect struct {
- pathStep
-}
-
-func (in Indirect) Type() reflect.Type { return in.typ }
-func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy }
-func (in Indirect) String() string { return "*" }
-
-// TypeAssertion is a [PathStep] that represents a type assertion on an interface.
-type TypeAssertion struct{ *typeAssertion }
-type typeAssertion struct {
- pathStep
-}
-
-func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
-func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
-func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) }
-
-// Transform is a [PathStep] that represents a transformation
-// from the parent type to the current type.
-type Transform struct{ *transform }
-type transform struct {
- pathStep
- trans *transformer
-}
-
-func (tf Transform) Type() reflect.Type { return tf.typ }
-func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy }
-func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) }
-
-// Name is the name of the [Transformer].
-func (tf Transform) Name() string { return tf.trans.name }
-
-// Func is the function pointer to the transformer function.
-func (tf Transform) Func() reflect.Value { return tf.trans.fnc }
-
-// Option returns the originally constructed [Transformer] option.
-// The == operator can be used to detect the exact option used.
-func (tf Transform) Option() Option { return tf.trans }
-
-// pointerPath represents a dual-stack of pointers encountered when
-// recursively traversing the x and y values. This data structure supports
-// detection of cycles and determining whether the cycles are equal.
-// In Go, cycles can occur via pointers, slices, and maps.
-//
-// The pointerPath uses a map to represent a stack; where descension into a
-// pointer pushes the address onto the stack, and ascension from a pointer
-// pops the address from the stack. Thus, when traversing into a pointer from
-// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles
-// by checking whether the pointer has already been visited. The cycle detection
-// uses a separate stack for the x and y values.
-//
-// If a cycle is detected we need to determine whether the two pointers
-// should be considered equal. The definition of equality chosen by Equal
-// requires two graphs to have the same structure. To determine this, both the
-// x and y values must have a cycle where the previous pointers were also
-// encountered together as a pair.
-//
-// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and
-// MapIndex with pointer information for the x and y values.
-// Suppose px and py are two pointers to compare, we then search the
-// Path for whether px was ever encountered in the Path history of x, and
-// similarly so with py. If either side has a cycle, the comparison is only
-// equal if both px and py have a cycle resulting from the same PathStep.
-//
-// Using a map as a stack is more performant as we can perform cycle detection
-// in O(1) instead of O(N) where N is len(Path).
-type pointerPath struct {
- // mx is keyed by x pointers, where the value is the associated y pointer.
- mx map[value.Pointer]value.Pointer
- // my is keyed by y pointers, where the value is the associated x pointer.
- my map[value.Pointer]value.Pointer
-}
-
-func (p *pointerPath) Init() {
- p.mx = make(map[value.Pointer]value.Pointer)
- p.my = make(map[value.Pointer]value.Pointer)
-}
-
-// Push indicates intent to descend into pointers vx and vy where
-// visited reports whether either has been seen before. If visited before,
-// equal reports whether both pointers were encountered together.
-// Pop must be called if and only if the pointers were never visited.
-//
-// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map
-// and be non-nil.
-func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) {
- px := value.PointerOf(vx)
- py := value.PointerOf(vy)
- _, ok1 := p.mx[px]
- _, ok2 := p.my[py]
- if ok1 || ok2 {
- equal = p.mx[px] == py && p.my[py] == px // Pointers paired together
- return equal, true
- }
- p.mx[px] = py
- p.my[py] = px
- return false, false
-}
-
-// Pop ascends from pointers vx and vy.
-func (p pointerPath) Pop(vx, vy reflect.Value) {
- delete(p.mx, value.PointerOf(vx))
- delete(p.my, value.PointerOf(vy))
-}
-
-// isExported reports whether the identifier is exported.
-func isExported(id string) bool {
- r, _ := utf8.DecodeRuneInString(id)
- return unicode.IsUpper(r)
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go
deleted file mode 100644
index f43cd12e..00000000
--- a/vendor/github.com/google/go-cmp/cmp/report.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2017, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmp
-
-// defaultReporter implements the reporter interface.
-//
-// As Equal serially calls the PushStep, Report, and PopStep methods, the
-// defaultReporter constructs a tree-based representation of the compared value
-// and the result of each comparison (see valueNode).
-//
-// When the String method is called, the FormatDiff method transforms the
-// valueNode tree into a textNode tree, which is a tree-based representation
-// of the textual output (see textNode).
-//
-// Lastly, the textNode.String method produces the final report as a string.
-type defaultReporter struct {
- root *valueNode
- curr *valueNode
-}
-
-func (r *defaultReporter) PushStep(ps PathStep) {
- r.curr = r.curr.PushStep(ps)
- if r.root == nil {
- r.root = r.curr
- }
-}
-func (r *defaultReporter) Report(rs Result) {
- r.curr.Report(rs)
-}
-func (r *defaultReporter) PopStep() {
- r.curr = r.curr.PopStep()
-}
-
-// String provides a full report of the differences detected as a structured
-// literal in pseudo-Go syntax. String may only be called after the entire tree
-// has been traversed.
-func (r *defaultReporter) String() string {
- assert(r.root != nil && r.curr == nil)
- if r.root.NumDiff == 0 {
- return ""
- }
- ptrs := new(pointerReferences)
- text := formatOptions{}.FormatDiff(r.root, ptrs)
- resolveReferences(text)
- return text.String()
-}
-
-func assert(ok bool) {
- if !ok {
- panic("assertion failure")
- }
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go
deleted file mode 100644
index 2050bf6b..00000000
--- a/vendor/github.com/google/go-cmp/cmp/report_compare.go
+++ /dev/null
@@ -1,433 +0,0 @@
-// Copyright 2019, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmp
-
-import (
- "fmt"
- "reflect"
-)
-
-// numContextRecords is the number of surrounding equal records to print.
-const numContextRecords = 2
-
-type diffMode byte
-
-const (
- diffUnknown diffMode = 0
- diffIdentical diffMode = ' '
- diffRemoved diffMode = '-'
- diffInserted diffMode = '+'
-)
-
-type typeMode int
-
-const (
- // emitType always prints the type.
- emitType typeMode = iota
- // elideType never prints the type.
- elideType
- // autoType prints the type only for composite kinds
- // (i.e., structs, slices, arrays, and maps).
- autoType
-)
-
-type formatOptions struct {
- // DiffMode controls the output mode of FormatDiff.
- //
- // If diffUnknown, then produce a diff of the x and y values.
- // If diffIdentical, then emit values as if they were equal.
- // If diffRemoved, then only emit x values (ignoring y values).
- // If diffInserted, then only emit y values (ignoring x values).
- DiffMode diffMode
-
- // TypeMode controls whether to print the type for the current node.
- //
- // As a general rule of thumb, we always print the type of the next node
- // after an interface, and always elide the type of the next node after
- // a slice or map node.
- TypeMode typeMode
-
- // formatValueOptions are options specific to printing reflect.Values.
- formatValueOptions
-}
-
-func (opts formatOptions) WithDiffMode(d diffMode) formatOptions {
- opts.DiffMode = d
- return opts
-}
-func (opts formatOptions) WithTypeMode(t typeMode) formatOptions {
- opts.TypeMode = t
- return opts
-}
-func (opts formatOptions) WithVerbosity(level int) formatOptions {
- opts.VerbosityLevel = level
- opts.LimitVerbosity = true
- return opts
-}
-func (opts formatOptions) verbosity() uint {
- switch {
- case opts.VerbosityLevel < 0:
- return 0
- case opts.VerbosityLevel > 16:
- return 16 // some reasonable maximum to avoid shift overflow
- default:
- return uint(opts.VerbosityLevel)
- }
-}
-
-const maxVerbosityPreset = 6
-
-// verbosityPreset modifies the verbosity settings given an index
-// between 0 and maxVerbosityPreset, inclusive.
-func verbosityPreset(opts formatOptions, i int) formatOptions {
- opts.VerbosityLevel = int(opts.verbosity()) + 2*i
- if i > 0 {
- opts.AvoidStringer = true
- }
- if i >= maxVerbosityPreset {
- opts.PrintAddresses = true
- opts.QualifiedNames = true
- }
- return opts
-}
-
-// FormatDiff converts a valueNode tree into a textNode tree, where the later
-// is a textual representation of the differences detected in the former.
-func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) {
- if opts.DiffMode == diffIdentical {
- opts = opts.WithVerbosity(1)
- } else if opts.verbosity() < 3 {
- opts = opts.WithVerbosity(3)
- }
-
- // Check whether we have specialized formatting for this node.
- // This is not necessary, but helpful for producing more readable outputs.
- if opts.CanFormatDiffSlice(v) {
- return opts.FormatDiffSlice(v)
- }
-
- var parentKind reflect.Kind
- if v.parent != nil && v.parent.TransformerName == "" {
- parentKind = v.parent.Type.Kind()
- }
-
- // For leaf nodes, format the value based on the reflect.Values alone.
- // As a special case, treat equal []byte as a leaf nodes.
- isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType
- isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0
- if v.MaxDepth == 0 || isEqualBytes {
- switch opts.DiffMode {
- case diffUnknown, diffIdentical:
- // Format Equal.
- if v.NumDiff == 0 {
- outx := opts.FormatValue(v.ValueX, parentKind, ptrs)
- outy := opts.FormatValue(v.ValueY, parentKind, ptrs)
- if v.NumIgnored > 0 && v.NumSame == 0 {
- return textEllipsis
- } else if outx.Len() < outy.Len() {
- return outx
- } else {
- return outy
- }
- }
-
- // Format unequal.
- assert(opts.DiffMode == diffUnknown)
- var list textList
- outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs)
- outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs)
- for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
- opts2 := verbosityPreset(opts, i).WithTypeMode(elideType)
- outx = opts2.FormatValue(v.ValueX, parentKind, ptrs)
- outy = opts2.FormatValue(v.ValueY, parentKind, ptrs)
- }
- if outx != nil {
- list = append(list, textRecord{Diff: '-', Value: outx})
- }
- if outy != nil {
- list = append(list, textRecord{Diff: '+', Value: outy})
- }
- return opts.WithTypeMode(emitType).FormatType(v.Type, list)
- case diffRemoved:
- return opts.FormatValue(v.ValueX, parentKind, ptrs)
- case diffInserted:
- return opts.FormatValue(v.ValueY, parentKind, ptrs)
- default:
- panic("invalid diff mode")
- }
- }
-
- // Register slice element to support cycle detection.
- if parentKind == reflect.Slice {
- ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true)
- defer ptrs.Pop()
- defer func() { out = wrapTrunkReferences(ptrRefs, out) }()
- }
-
- // Descend into the child value node.
- if v.TransformerName != "" {
- out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
- out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"}
- return opts.FormatType(v.Type, out)
- } else {
- switch k := v.Type.Kind(); k {
- case reflect.Struct, reflect.Array, reflect.Slice:
- out = opts.formatDiffList(v.Records, k, ptrs)
- out = opts.FormatType(v.Type, out)
- case reflect.Map:
- // Register map to support cycle detection.
- ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
- defer ptrs.Pop()
-
- out = opts.formatDiffList(v.Records, k, ptrs)
- out = wrapTrunkReferences(ptrRefs, out)
- out = opts.FormatType(v.Type, out)
- case reflect.Ptr:
- // Register pointer to support cycle detection.
- ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
- defer ptrs.Pop()
-
- out = opts.FormatDiff(v.Value, ptrs)
- out = wrapTrunkReferences(ptrRefs, out)
- out = &textWrap{Prefix: "&", Value: out}
- case reflect.Interface:
- out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
- default:
- panic(fmt.Sprintf("%v cannot have children", k))
- }
- return out
- }
-}
-
-func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode {
- // Derive record name based on the data structure kind.
- var name string
- var formatKey func(reflect.Value) string
- switch k {
- case reflect.Struct:
- name = "field"
- opts = opts.WithTypeMode(autoType)
- formatKey = func(v reflect.Value) string { return v.String() }
- case reflect.Slice, reflect.Array:
- name = "element"
- opts = opts.WithTypeMode(elideType)
- formatKey = func(reflect.Value) string { return "" }
- case reflect.Map:
- name = "entry"
- opts = opts.WithTypeMode(elideType)
- formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) }
- }
-
- maxLen := -1
- if opts.LimitVerbosity {
- if opts.DiffMode == diffIdentical {
- maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
- } else {
- maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc...
- }
- opts.VerbosityLevel--
- }
-
- // Handle unification.
- switch opts.DiffMode {
- case diffIdentical, diffRemoved, diffInserted:
- var list textList
- var deferredEllipsis bool // Add final "..." to indicate records were dropped
- for _, r := range recs {
- if len(list) == maxLen {
- deferredEllipsis = true
- break
- }
-
- // Elide struct fields that are zero value.
- if k == reflect.Struct {
- var isZero bool
- switch opts.DiffMode {
- case diffIdentical:
- isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero()
- case diffRemoved:
- isZero = r.Value.ValueX.IsZero()
- case diffInserted:
- isZero = r.Value.ValueY.IsZero()
- }
- if isZero {
- continue
- }
- }
- // Elide ignored nodes.
- if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 {
- deferredEllipsis = !(k == reflect.Slice || k == reflect.Array)
- if !deferredEllipsis {
- list.AppendEllipsis(diffStats{})
- }
- continue
- }
- if out := opts.FormatDiff(r.Value, ptrs); out != nil {
- list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
- }
- }
- if deferredEllipsis {
- list.AppendEllipsis(diffStats{})
- }
- return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
- case diffUnknown:
- default:
- panic("invalid diff mode")
- }
-
- // Handle differencing.
- var numDiffs int
- var list textList
- var keys []reflect.Value // invariant: len(list) == len(keys)
- groups := coalesceAdjacentRecords(name, recs)
- maxGroup := diffStats{Name: name}
- for i, ds := range groups {
- if maxLen >= 0 && numDiffs >= maxLen {
- maxGroup = maxGroup.Append(ds)
- continue
- }
-
- // Handle equal records.
- if ds.NumDiff() == 0 {
- // Compute the number of leading and trailing records to print.
- var numLo, numHi int
- numEqual := ds.NumIgnored + ds.NumIdentical
- for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 {
- if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
- break
- }
- numLo++
- }
- for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
- if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
- break
- }
- numHi++
- }
- if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 {
- numHi++ // Avoid pointless coalescing of a single equal record
- }
-
- // Format the equal values.
- for _, r := range recs[:numLo] {
- out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
- list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
- keys = append(keys, r.Key)
- }
- if numEqual > numLo+numHi {
- ds.NumIdentical -= numLo + numHi
- list.AppendEllipsis(ds)
- for len(keys) < len(list) {
- keys = append(keys, reflect.Value{})
- }
- }
- for _, r := range recs[numEqual-numHi : numEqual] {
- out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
- list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
- keys = append(keys, r.Key)
- }
- recs = recs[numEqual:]
- continue
- }
-
- // Handle unequal records.
- for _, r := range recs[:ds.NumDiff()] {
- switch {
- case opts.CanFormatDiffSlice(r.Value):
- out := opts.FormatDiffSlice(r.Value)
- list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
- keys = append(keys, r.Key)
- case r.Value.NumChildren == r.Value.MaxDepth:
- outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
- outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
- for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
- opts2 := verbosityPreset(opts, i)
- outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
- outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
- }
- if outx != nil {
- list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx})
- keys = append(keys, r.Key)
- }
- if outy != nil {
- list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy})
- keys = append(keys, r.Key)
- }
- default:
- out := opts.FormatDiff(r.Value, ptrs)
- list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
- keys = append(keys, r.Key)
- }
- }
- recs = recs[ds.NumDiff():]
- numDiffs += ds.NumDiff()
- }
- if maxGroup.IsZero() {
- assert(len(recs) == 0)
- } else {
- list.AppendEllipsis(maxGroup)
- for len(keys) < len(list) {
- keys = append(keys, reflect.Value{})
- }
- }
- assert(len(list) == len(keys))
-
- // For maps, the default formatting logic uses fmt.Stringer which may
- // produce ambiguous output. Avoid calling String to disambiguate.
- if k == reflect.Map {
- var ambiguous bool
- seenKeys := map[string]reflect.Value{}
- for i, currKey := range keys {
- if currKey.IsValid() {
- strKey := list[i].Key
- prevKey, seen := seenKeys[strKey]
- if seen && prevKey.CanInterface() && currKey.CanInterface() {
- ambiguous = prevKey.Interface() != currKey.Interface()
- if ambiguous {
- break
- }
- }
- seenKeys[strKey] = currKey
- }
- }
- if ambiguous {
- for i, k := range keys {
- if k.IsValid() {
- list[i].Key = formatMapKey(k, true, ptrs)
- }
- }
- }
- }
-
- return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
-}
-
-// coalesceAdjacentRecords coalesces the list of records into groups of
-// adjacent equal, or unequal counts.
-func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) {
- var prevCase int // Arbitrary index into which case last occurred
- lastStats := func(i int) *diffStats {
- if prevCase != i {
- groups = append(groups, diffStats{Name: name})
- prevCase = i
- }
- return &groups[len(groups)-1]
- }
- for _, r := range recs {
- switch rv := r.Value; {
- case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0:
- lastStats(1).NumIgnored++
- case rv.NumDiff == 0:
- lastStats(1).NumIdentical++
- case rv.NumDiff > 0 && !rv.ValueY.IsValid():
- lastStats(2).NumRemoved++
- case rv.NumDiff > 0 && !rv.ValueX.IsValid():
- lastStats(2).NumInserted++
- default:
- lastStats(2).NumModified++
- }
- }
- return groups
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_references.go b/vendor/github.com/google/go-cmp/cmp/report_references.go
deleted file mode 100644
index be31b33a..00000000
--- a/vendor/github.com/google/go-cmp/cmp/report_references.go
+++ /dev/null
@@ -1,264 +0,0 @@
-// Copyright 2020, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmp
-
-import (
- "fmt"
- "reflect"
- "strings"
-
- "github.com/google/go-cmp/cmp/internal/flags"
- "github.com/google/go-cmp/cmp/internal/value"
-)
-
-const (
- pointerDelimPrefix = "⟪"
- pointerDelimSuffix = "⟫"
-)
-
-// formatPointer prints the address of the pointer.
-func formatPointer(p value.Pointer, withDelims bool) string {
- v := p.Uintptr()
- if flags.Deterministic {
- v = 0xdeadf00f // Only used for stable testing purposes
- }
- if withDelims {
- return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix
- }
- return formatHex(uint64(v))
-}
-
-// pointerReferences is a stack of pointers visited so far.
-type pointerReferences [][2]value.Pointer
-
-func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) {
- if deref && vx.IsValid() {
- vx = vx.Addr()
- }
- if deref && vy.IsValid() {
- vy = vy.Addr()
- }
- switch d {
- case diffUnknown, diffIdentical:
- pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)}
- case diffRemoved:
- pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}}
- case diffInserted:
- pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)}
- }
- *ps = append(*ps, pp)
- return pp
-}
-
-func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) {
- p = value.PointerOf(v)
- for _, pp := range *ps {
- if p == pp[0] || p == pp[1] {
- return p, true
- }
- }
- *ps = append(*ps, [2]value.Pointer{p, p})
- return p, false
-}
-
-func (ps *pointerReferences) Pop() {
- *ps = (*ps)[:len(*ps)-1]
-}
-
-// trunkReferences is metadata for a textNode indicating that the sub-tree
-// represents the value for either pointer in a pair of references.
-type trunkReferences struct{ pp [2]value.Pointer }
-
-// trunkReference is metadata for a textNode indicating that the sub-tree
-// represents the value for the given pointer reference.
-type trunkReference struct{ p value.Pointer }
-
-// leafReference is metadata for a textNode indicating that the value is
-// truncated as it refers to another part of the tree (i.e., a trunk).
-type leafReference struct{ p value.Pointer }
-
-func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode {
- switch {
- case pp[0].IsNil():
- return &textWrap{Value: s, Metadata: trunkReference{pp[1]}}
- case pp[1].IsNil():
- return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
- case pp[0] == pp[1]:
- return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
- default:
- return &textWrap{Value: s, Metadata: trunkReferences{pp}}
- }
-}
-func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode {
- var prefix string
- if printAddress {
- prefix = formatPointer(p, true)
- }
- return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}}
-}
-func makeLeafReference(p value.Pointer, printAddress bool) textNode {
- out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"}
- var prefix string
- if printAddress {
- prefix = formatPointer(p, true)
- }
- return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}}
-}
-
-// resolveReferences walks the textNode tree searching for any leaf reference
-// metadata and resolves each against the corresponding trunk references.
-// Since pointer addresses in memory are not particularly readable to the user,
-// it replaces each pointer value with an arbitrary and unique reference ID.
-func resolveReferences(s textNode) {
- var walkNodes func(textNode, func(textNode))
- walkNodes = func(s textNode, f func(textNode)) {
- f(s)
- switch s := s.(type) {
- case *textWrap:
- walkNodes(s.Value, f)
- case textList:
- for _, r := range s {
- walkNodes(r.Value, f)
- }
- }
- }
-
- // Collect all trunks and leaves with reference metadata.
- var trunks, leaves []*textWrap
- walkNodes(s, func(s textNode) {
- if s, ok := s.(*textWrap); ok {
- switch s.Metadata.(type) {
- case leafReference:
- leaves = append(leaves, s)
- case trunkReference, trunkReferences:
- trunks = append(trunks, s)
- }
- }
- })
-
- // No leaf references to resolve.
- if len(leaves) == 0 {
- return
- }
-
- // Collect the set of all leaf references to resolve.
- leafPtrs := make(map[value.Pointer]bool)
- for _, leaf := range leaves {
- leafPtrs[leaf.Metadata.(leafReference).p] = true
- }
-
- // Collect the set of trunk pointers that are always paired together.
- // This allows us to assign a single ID to both pointers for brevity.
- // If a pointer in a pair ever occurs by itself or as a different pair,
- // then the pair is broken.
- pairedTrunkPtrs := make(map[value.Pointer]value.Pointer)
- unpair := func(p value.Pointer) {
- if !pairedTrunkPtrs[p].IsNil() {
- pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half
- }
- pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half
- }
- for _, trunk := range trunks {
- switch p := trunk.Metadata.(type) {
- case trunkReference:
- unpair(p.p) // standalone pointer cannot be part of a pair
- case trunkReferences:
- p0, ok0 := pairedTrunkPtrs[p.pp[0]]
- p1, ok1 := pairedTrunkPtrs[p.pp[1]]
- switch {
- case !ok0 && !ok1:
- // Register the newly seen pair.
- pairedTrunkPtrs[p.pp[0]] = p.pp[1]
- pairedTrunkPtrs[p.pp[1]] = p.pp[0]
- case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]:
- // Exact pair already seen; do nothing.
- default:
- // Pair conflicts with some other pair; break all pairs.
- unpair(p.pp[0])
- unpair(p.pp[1])
- }
- }
- }
-
- // Correlate each pointer referenced by leaves to a unique identifier,
- // and print the IDs for each trunk that matches those pointers.
- var nextID uint
- ptrIDs := make(map[value.Pointer]uint)
- newID := func() uint {
- id := nextID
- nextID++
- return id
- }
- for _, trunk := range trunks {
- switch p := trunk.Metadata.(type) {
- case trunkReference:
- if print := leafPtrs[p.p]; print {
- id, ok := ptrIDs[p.p]
- if !ok {
- id = newID()
- ptrIDs[p.p] = id
- }
- trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
- }
- case trunkReferences:
- print0 := leafPtrs[p.pp[0]]
- print1 := leafPtrs[p.pp[1]]
- if print0 || print1 {
- id0, ok0 := ptrIDs[p.pp[0]]
- id1, ok1 := ptrIDs[p.pp[1]]
- isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0]
- if isPair {
- var id uint
- assert(ok0 == ok1) // must be seen together or not at all
- if ok0 {
- assert(id0 == id1) // must have the same ID
- id = id0
- } else {
- id = newID()
- ptrIDs[p.pp[0]] = id
- ptrIDs[p.pp[1]] = id
- }
- trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
- } else {
- if print0 && !ok0 {
- id0 = newID()
- ptrIDs[p.pp[0]] = id0
- }
- if print1 && !ok1 {
- id1 = newID()
- ptrIDs[p.pp[1]] = id1
- }
- switch {
- case print0 && print1:
- trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1))
- case print0:
- trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0))
- case print1:
- trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1))
- }
- }
- }
- }
- }
-
- // Update all leaf references with the unique identifier.
- for _, leaf := range leaves {
- if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok {
- leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id))
- }
- }
-}
-
-func formatReference(id uint) string {
- return fmt.Sprintf("ref#%d", id)
-}
-
-func updateReferencePrefix(prefix, ref string) string {
- if prefix == "" {
- return pointerDelimPrefix + ref + pointerDelimSuffix
- }
- suffix := strings.TrimPrefix(prefix, pointerDelimPrefix)
- return pointerDelimPrefix + ref + ": " + suffix
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go
deleted file mode 100644
index e39f4228..00000000
--- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go
+++ /dev/null
@@ -1,414 +0,0 @@
-// Copyright 2019, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmp
-
-import (
- "bytes"
- "fmt"
- "reflect"
- "strconv"
- "strings"
- "unicode"
- "unicode/utf8"
-
- "github.com/google/go-cmp/cmp/internal/value"
-)
-
-var (
- anyType = reflect.TypeOf((*interface{})(nil)).Elem()
- stringType = reflect.TypeOf((*string)(nil)).Elem()
- bytesType = reflect.TypeOf((*[]byte)(nil)).Elem()
- byteType = reflect.TypeOf((*byte)(nil)).Elem()
-)
-
-type formatValueOptions struct {
- // AvoidStringer controls whether to avoid calling custom stringer
- // methods like error.Error or fmt.Stringer.String.
- AvoidStringer bool
-
- // PrintAddresses controls whether to print the address of all pointers,
- // slice elements, and maps.
- PrintAddresses bool
-
- // QualifiedNames controls whether FormatType uses the fully qualified name
- // (including the full package path as opposed to just the package name).
- QualifiedNames bool
-
- // VerbosityLevel controls the amount of output to produce.
- // A higher value produces more output. A value of zero or lower produces
- // no output (represented using an ellipsis).
- // If LimitVerbosity is false, then the level is treated as infinite.
- VerbosityLevel int
-
- // LimitVerbosity specifies that formatting should respect VerbosityLevel.
- LimitVerbosity bool
-}
-
-// FormatType prints the type as if it were wrapping s.
-// This may return s as-is depending on the current type and TypeMode mode.
-func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode {
- // Check whether to emit the type or not.
- switch opts.TypeMode {
- case autoType:
- switch t.Kind() {
- case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map:
- if s.Equal(textNil) {
- return s
- }
- default:
- return s
- }
- if opts.DiffMode == diffIdentical {
- return s // elide type for identical nodes
- }
- case elideType:
- return s
- }
-
- // Determine the type label, applying special handling for unnamed types.
- typeName := value.TypeString(t, opts.QualifiedNames)
- if t.Name() == "" {
- // According to Go grammar, certain type literals contain symbols that
- // do not strongly bind to the next lexicographical token (e.g., *T).
- switch t.Kind() {
- case reflect.Chan, reflect.Func, reflect.Ptr:
- typeName = "(" + typeName + ")"
- }
- }
- return &textWrap{Prefix: typeName, Value: wrapParens(s)}
-}
-
-// wrapParens wraps s with a set of parenthesis, but avoids it if the
-// wrapped node itself is already surrounded by a pair of parenthesis or braces.
-// It handles unwrapping one level of pointer-reference nodes.
-func wrapParens(s textNode) textNode {
- var refNode *textWrap
- if s2, ok := s.(*textWrap); ok {
- // Unwrap a single pointer reference node.
- switch s2.Metadata.(type) {
- case leafReference, trunkReference, trunkReferences:
- refNode = s2
- if s3, ok := refNode.Value.(*textWrap); ok {
- s2 = s3
- }
- }
-
- // Already has delimiters that make parenthesis unnecessary.
- hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")")
- hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}")
- if hasParens || hasBraces {
- return s
- }
- }
- if refNode != nil {
- refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"}
- return s
- }
- return &textWrap{Prefix: "(", Value: s, Suffix: ")"}
-}
-
-// FormatValue prints the reflect.Value, taking extra care to avoid descending
-// into pointers already in ptrs. As pointers are visited, ptrs is also updated.
-func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) {
- if !v.IsValid() {
- return nil
- }
- t := v.Type()
-
- // Check slice element for cycles.
- if parentKind == reflect.Slice {
- ptrRef, visited := ptrs.Push(v.Addr())
- if visited {
- return makeLeafReference(ptrRef, false)
- }
- defer ptrs.Pop()
- defer func() { out = wrapTrunkReference(ptrRef, false, out) }()
- }
-
- // Check whether there is an Error or String method to call.
- if !opts.AvoidStringer && v.CanInterface() {
- // Avoid calling Error or String methods on nil receivers since many
- // implementations crash when doing so.
- if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() {
- var prefix, strVal string
- func() {
- // Swallow and ignore any panics from String or Error.
- defer func() { recover() }()
- switch v := v.Interface().(type) {
- case error:
- strVal = v.Error()
- prefix = "e"
- case fmt.Stringer:
- strVal = v.String()
- prefix = "s"
- }
- }()
- if prefix != "" {
- return opts.formatString(prefix, strVal)
- }
- }
- }
-
- // Check whether to explicitly wrap the result with the type.
- var skipType bool
- defer func() {
- if !skipType {
- out = opts.FormatType(t, out)
- }
- }()
-
- switch t.Kind() {
- case reflect.Bool:
- return textLine(fmt.Sprint(v.Bool()))
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return textLine(fmt.Sprint(v.Int()))
- case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return textLine(fmt.Sprint(v.Uint()))
- case reflect.Uint8:
- if parentKind == reflect.Slice || parentKind == reflect.Array {
- return textLine(formatHex(v.Uint()))
- }
- return textLine(fmt.Sprint(v.Uint()))
- case reflect.Uintptr:
- return textLine(formatHex(v.Uint()))
- case reflect.Float32, reflect.Float64:
- return textLine(fmt.Sprint(v.Float()))
- case reflect.Complex64, reflect.Complex128:
- return textLine(fmt.Sprint(v.Complex()))
- case reflect.String:
- return opts.formatString("", v.String())
- case reflect.UnsafePointer, reflect.Chan, reflect.Func:
- return textLine(formatPointer(value.PointerOf(v), true))
- case reflect.Struct:
- var list textList
- v := makeAddressable(v) // needed for retrieveUnexportedField
- maxLen := v.NumField()
- if opts.LimitVerbosity {
- maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
- opts.VerbosityLevel--
- }
- for i := 0; i < v.NumField(); i++ {
- vv := v.Field(i)
- if vv.IsZero() {
- continue // Elide fields with zero values
- }
- if len(list) == maxLen {
- list.AppendEllipsis(diffStats{})
- break
- }
- sf := t.Field(i)
- if !isExported(sf.Name) {
- vv = retrieveUnexportedField(v, sf, true)
- }
- s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs)
- list = append(list, textRecord{Key: sf.Name, Value: s})
- }
- return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
- case reflect.Slice:
- if v.IsNil() {
- return textNil
- }
-
- // Check whether this is a []byte of text data.
- if t.Elem() == byteType {
- b := v.Bytes()
- isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) }
- if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
- out = opts.formatString("", string(b))
- skipType = true
- return opts.FormatType(t, out)
- }
- }
-
- fallthrough
- case reflect.Array:
- maxLen := v.Len()
- if opts.LimitVerbosity {
- maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
- opts.VerbosityLevel--
- }
- var list textList
- for i := 0; i < v.Len(); i++ {
- if len(list) == maxLen {
- list.AppendEllipsis(diffStats{})
- break
- }
- s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs)
- list = append(list, textRecord{Value: s})
- }
-
- out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
- if t.Kind() == reflect.Slice && opts.PrintAddresses {
- header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap())
- out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out}
- }
- return out
- case reflect.Map:
- if v.IsNil() {
- return textNil
- }
-
- // Check pointer for cycles.
- ptrRef, visited := ptrs.Push(v)
- if visited {
- return makeLeafReference(ptrRef, opts.PrintAddresses)
- }
- defer ptrs.Pop()
-
- maxLen := v.Len()
- if opts.LimitVerbosity {
- maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
- opts.VerbosityLevel--
- }
- var list textList
- for _, k := range value.SortKeys(v.MapKeys()) {
- if len(list) == maxLen {
- list.AppendEllipsis(diffStats{})
- break
- }
- sk := formatMapKey(k, false, ptrs)
- sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs)
- list = append(list, textRecord{Key: sk, Value: sv})
- }
-
- out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
- out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
- return out
- case reflect.Ptr:
- if v.IsNil() {
- return textNil
- }
-
- // Check pointer for cycles.
- ptrRef, visited := ptrs.Push(v)
- if visited {
- out = makeLeafReference(ptrRef, opts.PrintAddresses)
- return &textWrap{Prefix: "&", Value: out}
- }
- defer ptrs.Pop()
-
- // Skip the name only if this is an unnamed pointer type.
- // Otherwise taking the address of a value does not reproduce
- // the named pointer type.
- if v.Type().Name() == "" {
- skipType = true // Let the underlying value print the type instead
- }
- out = opts.FormatValue(v.Elem(), t.Kind(), ptrs)
- out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
- out = &textWrap{Prefix: "&", Value: out}
- return out
- case reflect.Interface:
- if v.IsNil() {
- return textNil
- }
- // Interfaces accept different concrete types,
- // so configure the underlying value to explicitly print the type.
- return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs)
- default:
- panic(fmt.Sprintf("%v kind not handled", v.Kind()))
- }
-}
-
-func (opts formatOptions) formatString(prefix, s string) textNode {
- maxLen := len(s)
- maxLines := strings.Count(s, "\n") + 1
- if opts.LimitVerbosity {
- maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc...
- maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
- }
-
- // For multiline strings, use the triple-quote syntax,
- // but only use it when printing removed or inserted nodes since
- // we only want the extra verbosity for those cases.
- lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n")
- isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+')
- for i := 0; i < len(lines) && isTripleQuoted; i++ {
- lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
- isPrintable := func(r rune) bool {
- return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
- }
- line := lines[i]
- isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen
- }
- if isTripleQuoted {
- var list textList
- list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
- for i, line := range lines {
- if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 {
- comment := commentString(fmt.Sprintf("%d elided lines", numElided))
- list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment})
- break
- }
- list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true})
- }
- list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
- return &textWrap{Prefix: "(", Value: list, Suffix: ")"}
- }
-
- // Format the string as a single-line quoted string.
- if len(s) > maxLen+len(textEllipsis) {
- return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis))
- }
- return textLine(prefix + formatString(s))
-}
-
-// formatMapKey formats v as if it were a map key.
-// The result is guaranteed to be a single line.
-func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string {
- var opts formatOptions
- opts.DiffMode = diffIdentical
- opts.TypeMode = elideType
- opts.PrintAddresses = disambiguate
- opts.AvoidStringer = disambiguate
- opts.QualifiedNames = disambiguate
- opts.VerbosityLevel = maxVerbosityPreset
- opts.LimitVerbosity = true
- s := opts.FormatValue(v, reflect.Map, ptrs).String()
- return strings.TrimSpace(s)
-}
-
-// formatString prints s as a double-quoted or backtick-quoted string.
-func formatString(s string) string {
- // Use quoted string if it the same length as a raw string literal.
- // Otherwise, attempt to use the raw string form.
- qs := strconv.Quote(s)
- if len(qs) == 1+len(s)+1 {
- return qs
- }
-
- // Disallow newlines to ensure output is a single line.
- // Only allow printable runes for readability purposes.
- rawInvalid := func(r rune) bool {
- return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t')
- }
- if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 {
- return "`" + s + "`"
- }
- return qs
-}
-
-// formatHex prints u as a hexadecimal integer in Go notation.
-func formatHex(u uint64) string {
- var f string
- switch {
- case u <= 0xff:
- f = "0x%02x"
- case u <= 0xffff:
- f = "0x%04x"
- case u <= 0xffffff:
- f = "0x%06x"
- case u <= 0xffffffff:
- f = "0x%08x"
- case u <= 0xffffffffff:
- f = "0x%010x"
- case u <= 0xffffffffffff:
- f = "0x%012x"
- case u <= 0xffffffffffffff:
- f = "0x%014x"
- case u <= 0xffffffffffffffff:
- f = "0x%016x"
- }
- return fmt.Sprintf(f, u)
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go
deleted file mode 100644
index 23e444f6..00000000
--- a/vendor/github.com/google/go-cmp/cmp/report_slices.go
+++ /dev/null
@@ -1,614 +0,0 @@
-// Copyright 2019, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmp
-
-import (
- "bytes"
- "fmt"
- "math"
- "reflect"
- "strconv"
- "strings"
- "unicode"
- "unicode/utf8"
-
- "github.com/google/go-cmp/cmp/internal/diff"
-)
-
-// CanFormatDiffSlice reports whether we support custom formatting for nodes
-// that are slices of primitive kinds or strings.
-func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
- switch {
- case opts.DiffMode != diffUnknown:
- return false // Must be formatting in diff mode
- case v.NumDiff == 0:
- return false // No differences detected
- case !v.ValueX.IsValid() || !v.ValueY.IsValid():
- return false // Both values must be valid
- case v.NumIgnored > 0:
- return false // Some ignore option was used
- case v.NumTransformed > 0:
- return false // Some transform option was used
- case v.NumCompared > 1:
- return false // More than one comparison was used
- case v.NumCompared == 1 && v.Type.Name() != "":
- // The need for cmp to check applicability of options on every element
- // in a slice is a significant performance detriment for large []byte.
- // The workaround is to specify Comparer(bytes.Equal),
- // which enables cmp to compare []byte more efficiently.
- // If they differ, we still want to provide batched diffing.
- // The logic disallows named types since they tend to have their own
- // String method, with nicer formatting than what this provides.
- return false
- }
-
- // Check whether this is an interface with the same concrete types.
- t := v.Type
- vx, vy := v.ValueX, v.ValueY
- if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() {
- vx, vy = vx.Elem(), vy.Elem()
- t = vx.Type()
- }
-
- // Check whether we provide specialized diffing for this type.
- switch t.Kind() {
- case reflect.String:
- case reflect.Array, reflect.Slice:
- // Only slices of primitive types have specialized handling.
- switch t.Elem().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
- reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
- default:
- return false
- }
-
- // Both slice values have to be non-empty.
- if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) {
- return false
- }
-
- // If a sufficient number of elements already differ,
- // use specialized formatting even if length requirement is not met.
- if v.NumDiff > v.NumSame {
- return true
- }
- default:
- return false
- }
-
- // Use specialized string diffing for longer slices or strings.
- const minLength = 32
- return vx.Len() >= minLength && vy.Len() >= minLength
-}
-
-// FormatDiffSlice prints a diff for the slices (or strings) represented by v.
-// This provides custom-tailored logic to make printing of differences in
-// textual strings and slices of primitive kinds more readable.
-func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
- assert(opts.DiffMode == diffUnknown)
- t, vx, vy := v.Type, v.ValueX, v.ValueY
- if t.Kind() == reflect.Interface {
- vx, vy = vx.Elem(), vy.Elem()
- t = vx.Type()
- opts = opts.WithTypeMode(emitType)
- }
-
- // Auto-detect the type of the data.
- var sx, sy string
- var ssx, ssy []string
- var isString, isMostlyText, isPureLinedText, isBinary bool
- switch {
- case t.Kind() == reflect.String:
- sx, sy = vx.String(), vy.String()
- isString = true
- case t.Kind() == reflect.Slice && t.Elem() == byteType:
- sx, sy = string(vx.Bytes()), string(vy.Bytes())
- isString = true
- case t.Kind() == reflect.Array:
- // Arrays need to be addressable for slice operations to work.
- vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem()
- vx2.Set(vx)
- vy2.Set(vy)
- vx, vy = vx2, vy2
- }
- if isString {
- var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int
- for i, r := range sx + sy {
- numTotalRunes++
- if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError {
- numValidRunes++
- }
- if r == '\n' {
- if maxLineLen < i-lastLineIdx {
- maxLineLen = i - lastLineIdx
- }
- lastLineIdx = i + 1
- numLines++
- }
- }
- isPureText := numValidRunes == numTotalRunes
- isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes))
- isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024
- isBinary = !isMostlyText
-
- // Avoid diffing by lines if it produces a significantly more complex
- // edit script than diffing by bytes.
- if isPureLinedText {
- ssx = strings.Split(sx, "\n")
- ssy = strings.Split(sy, "\n")
- esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result {
- return diff.BoolResult(ssx[ix] == ssy[iy])
- })
- esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result {
- return diff.BoolResult(sx[ix] == sy[iy])
- })
- efficiencyLines := float64(esLines.Dist()) / float64(len(esLines))
- efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes))
- quotedLength := len(strconv.Quote(sx + sy))
- unquotedLength := len(sx) + len(sy)
- escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength)
- isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1
- }
- }
-
- // Format the string into printable records.
- var list textList
- var delim string
- switch {
- // If the text appears to be multi-lined text,
- // then perform differencing across individual lines.
- case isPureLinedText:
- list = opts.formatDiffSlice(
- reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line",
- func(v reflect.Value, d diffMode) textRecord {
- s := formatString(v.Index(0).String())
- return textRecord{Diff: d, Value: textLine(s)}
- },
- )
- delim = "\n"
-
- // If possible, use a custom triple-quote (""") syntax for printing
- // differences in a string literal. This format is more readable,
- // but has edge-cases where differences are visually indistinguishable.
- // This format is avoided under the following conditions:
- // - A line starts with `"""`
- // - A line starts with "..."
- // - A line contains non-printable characters
- // - Adjacent different lines differ only by whitespace
- //
- // For example:
- //
- // """
- // ... // 3 identical lines
- // foo
- // bar
- // - baz
- // + BAZ
- // """
- isTripleQuoted := true
- prevRemoveLines := map[string]bool{}
- prevInsertLines := map[string]bool{}
- var list2 textList
- list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
- for _, r := range list {
- if !r.Value.Equal(textEllipsis) {
- line, _ := strconv.Unquote(string(r.Value.(textLine)))
- line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
- normLine := strings.Map(func(r rune) rune {
- if unicode.IsSpace(r) {
- return -1 // drop whitespace to avoid visually indistinguishable output
- }
- return r
- }, line)
- isPrintable := func(r rune) bool {
- return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
- }
- isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == ""
- switch r.Diff {
- case diffRemoved:
- isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine]
- prevRemoveLines[normLine] = true
- case diffInserted:
- isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine]
- prevInsertLines[normLine] = true
- }
- if !isTripleQuoted {
- break
- }
- r.Value = textLine(line)
- r.ElideComma = true
- }
- if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group
- prevRemoveLines = map[string]bool{}
- prevInsertLines = map[string]bool{}
- }
- list2 = append(list2, r)
- }
- if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 {
- list2 = list2[:len(list2)-1] // elide single empty line at the end
- }
- list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
- if isTripleQuoted {
- var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"}
- switch t.Kind() {
- case reflect.String:
- if t != stringType {
- out = opts.FormatType(t, out)
- }
- case reflect.Slice:
- // Always emit type for slices since the triple-quote syntax
- // looks like a string (not a slice).
- opts = opts.WithTypeMode(emitType)
- out = opts.FormatType(t, out)
- }
- return out
- }
-
- // If the text appears to be single-lined text,
- // then perform differencing in approximately fixed-sized chunks.
- // The output is printed as quoted strings.
- case isMostlyText:
- list = opts.formatDiffSlice(
- reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte",
- func(v reflect.Value, d diffMode) textRecord {
- s := formatString(v.String())
- return textRecord{Diff: d, Value: textLine(s)}
- },
- )
-
- // If the text appears to be binary data,
- // then perform differencing in approximately fixed-sized chunks.
- // The output is inspired by hexdump.
- case isBinary:
- list = opts.formatDiffSlice(
- reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte",
- func(v reflect.Value, d diffMode) textRecord {
- var ss []string
- for i := 0; i < v.Len(); i++ {
- ss = append(ss, formatHex(v.Index(i).Uint()))
- }
- s := strings.Join(ss, ", ")
- comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String())))
- return textRecord{Diff: d, Value: textLine(s), Comment: comment}
- },
- )
-
- // For all other slices of primitive types,
- // then perform differencing in approximately fixed-sized chunks.
- // The size of each chunk depends on the width of the element kind.
- default:
- var chunkSize int
- if t.Elem().Kind() == reflect.Bool {
- chunkSize = 16
- } else {
- switch t.Elem().Bits() {
- case 8:
- chunkSize = 16
- case 16:
- chunkSize = 12
- case 32:
- chunkSize = 8
- default:
- chunkSize = 8
- }
- }
- list = opts.formatDiffSlice(
- vx, vy, chunkSize, t.Elem().Kind().String(),
- func(v reflect.Value, d diffMode) textRecord {
- var ss []string
- for i := 0; i < v.Len(); i++ {
- switch t.Elem().Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- ss = append(ss, fmt.Sprint(v.Index(i).Int()))
- case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- ss = append(ss, fmt.Sprint(v.Index(i).Uint()))
- case reflect.Uint8, reflect.Uintptr:
- ss = append(ss, formatHex(v.Index(i).Uint()))
- case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
- ss = append(ss, fmt.Sprint(v.Index(i).Interface()))
- }
- }
- s := strings.Join(ss, ", ")
- return textRecord{Diff: d, Value: textLine(s)}
- },
- )
- }
-
- // Wrap the output with appropriate type information.
- var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
- if !isMostlyText {
- // The "{...}" byte-sequence literal is not valid Go syntax for strings.
- // Emit the type for extra clarity (e.g. "string{...}").
- if t.Kind() == reflect.String {
- opts = opts.WithTypeMode(emitType)
- }
- return opts.FormatType(t, out)
- }
- switch t.Kind() {
- case reflect.String:
- out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
- if t != stringType {
- out = opts.FormatType(t, out)
- }
- case reflect.Slice:
- out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
- if t != bytesType {
- out = opts.FormatType(t, out)
- }
- }
- return out
-}
-
-// formatASCII formats s as an ASCII string.
-// This is useful for printing binary strings in a semi-legible way.
-func formatASCII(s string) string {
- b := bytes.Repeat([]byte{'.'}, len(s))
- for i := 0; i < len(s); i++ {
- if ' ' <= s[i] && s[i] <= '~' {
- b[i] = s[i]
- }
- }
- return string(b)
-}
-
-func (opts formatOptions) formatDiffSlice(
- vx, vy reflect.Value, chunkSize int, name string,
- makeRec func(reflect.Value, diffMode) textRecord,
-) (list textList) {
- eq := func(ix, iy int) bool {
- return vx.Index(ix).Interface() == vy.Index(iy).Interface()
- }
- es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result {
- return diff.BoolResult(eq(ix, iy))
- })
-
- appendChunks := func(v reflect.Value, d diffMode) int {
- n0 := v.Len()
- for v.Len() > 0 {
- n := chunkSize
- if n > v.Len() {
- n = v.Len()
- }
- list = append(list, makeRec(v.Slice(0, n), d))
- v = v.Slice(n, v.Len())
- }
- return n0 - v.Len()
- }
-
- var numDiffs int
- maxLen := -1
- if opts.LimitVerbosity {
- maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
- opts.VerbosityLevel--
- }
-
- groups := coalesceAdjacentEdits(name, es)
- groups = coalesceInterveningIdentical(groups, chunkSize/4)
- groups = cleanupSurroundingIdentical(groups, eq)
- maxGroup := diffStats{Name: name}
- for i, ds := range groups {
- if maxLen >= 0 && numDiffs >= maxLen {
- maxGroup = maxGroup.Append(ds)
- continue
- }
-
- // Print equal.
- if ds.NumDiff() == 0 {
- // Compute the number of leading and trailing equal bytes to print.
- var numLo, numHi int
- numEqual := ds.NumIgnored + ds.NumIdentical
- for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 {
- numLo++
- }
- for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
- numHi++
- }
- if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 {
- numHi = numEqual - numLo // Avoid pointless coalescing of single equal row
- }
-
- // Print the equal bytes.
- appendChunks(vx.Slice(0, numLo), diffIdentical)
- if numEqual > numLo+numHi {
- ds.NumIdentical -= numLo + numHi
- list.AppendEllipsis(ds)
- }
- appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical)
- vx = vx.Slice(numEqual, vx.Len())
- vy = vy.Slice(numEqual, vy.Len())
- continue
- }
-
- // Print unequal.
- len0 := len(list)
- nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved)
- vx = vx.Slice(nx, vx.Len())
- ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted)
- vy = vy.Slice(ny, vy.Len())
- numDiffs += len(list) - len0
- }
- if maxGroup.IsZero() {
- assert(vx.Len() == 0 && vy.Len() == 0)
- } else {
- list.AppendEllipsis(maxGroup)
- }
- return list
-}
-
-// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent
-// equal or unequal counts.
-//
-// Example:
-//
-// Input: "..XXY...Y"
-// Output: [
-// {NumIdentical: 2},
-// {NumRemoved: 2, NumInserted 1},
-// {NumIdentical: 3},
-// {NumInserted: 1},
-// ]
-func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
- var prevMode byte
- lastStats := func(mode byte) *diffStats {
- if prevMode != mode {
- groups = append(groups, diffStats{Name: name})
- prevMode = mode
- }
- return &groups[len(groups)-1]
- }
- for _, e := range es {
- switch e {
- case diff.Identity:
- lastStats('=').NumIdentical++
- case diff.UniqueX:
- lastStats('!').NumRemoved++
- case diff.UniqueY:
- lastStats('!').NumInserted++
- case diff.Modified:
- lastStats('!').NumModified++
- }
- }
- return groups
-}
-
-// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize)
-// equal groups into adjacent unequal groups that currently result in a
-// dual inserted/removed printout. This acts as a high-pass filter to smooth
-// out high-frequency changes within the windowSize.
-//
-// Example:
-//
-// WindowSize: 16,
-// Input: [
-// {NumIdentical: 61}, // group 0
-// {NumRemoved: 3, NumInserted: 1}, // group 1
-// {NumIdentical: 6}, // ├── coalesce
-// {NumInserted: 2}, // ├── coalesce
-// {NumIdentical: 1}, // ├── coalesce
-// {NumRemoved: 9}, // └── coalesce
-// {NumIdentical: 64}, // group 2
-// {NumRemoved: 3, NumInserted: 1}, // group 3
-// {NumIdentical: 6}, // ├── coalesce
-// {NumInserted: 2}, // ├── coalesce
-// {NumIdentical: 1}, // ├── coalesce
-// {NumRemoved: 7}, // ├── coalesce
-// {NumIdentical: 1}, // ├── coalesce
-// {NumRemoved: 2}, // └── coalesce
-// {NumIdentical: 63}, // group 4
-// ]
-// Output: [
-// {NumIdentical: 61},
-// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3},
-// {NumIdentical: 64},
-// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3},
-// {NumIdentical: 63},
-// ]
-func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
- groups, groupsOrig := groups[:0], groups
- for i, ds := range groupsOrig {
- if len(groups) >= 2 && ds.NumDiff() > 0 {
- prev := &groups[len(groups)-2] // Unequal group
- curr := &groups[len(groups)-1] // Equal group
- next := &groupsOrig[i] // Unequal group
- hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0
- hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0
- if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize {
- *prev = prev.Append(*curr).Append(*next)
- groups = groups[:len(groups)-1] // Truncate off equal group
- continue
- }
- }
- groups = append(groups, ds)
- }
- return groups
-}
-
-// cleanupSurroundingIdentical scans through all unequal groups, and
-// moves any leading sequence of equal elements to the preceding equal group and
-// moves and trailing sequence of equal elements to the succeeding equal group.
-//
-// This is necessary since coalesceInterveningIdentical may coalesce edit groups
-// together such that leading/trailing spans of equal elements becomes possible.
-// Note that this can occur even with an optimal diffing algorithm.
-//
-// Example:
-//
-// Input: [
-// {NumIdentical: 61},
-// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements
-// {NumIdentical: 67},
-// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements
-// {NumIdentical: 54},
-// ]
-// Output: [
-// {NumIdentical: 64}, // incremented by 3
-// {NumRemoved: 9},
-// {NumIdentical: 67},
-// {NumRemoved: 9},
-// {NumIdentical: 64}, // incremented by 10
-// ]
-func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats {
- var ix, iy int // indexes into sequence x and y
- for i, ds := range groups {
- // Handle equal group.
- if ds.NumDiff() == 0 {
- ix += ds.NumIdentical
- iy += ds.NumIdentical
- continue
- }
-
- // Handle unequal group.
- nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified
- ny := ds.NumIdentical + ds.NumInserted + ds.NumModified
- var numLeadingIdentical, numTrailingIdentical int
- for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ {
- numLeadingIdentical++
- }
- for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ {
- numTrailingIdentical++
- }
- if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 {
- if numLeadingIdentical > 0 {
- // Remove leading identical span from this group and
- // insert it into the preceding group.
- if i-1 >= 0 {
- groups[i-1].NumIdentical += numLeadingIdentical
- } else {
- // No preceding group exists, so prepend a new group,
- // but do so after we finish iterating over all groups.
- defer func() {
- groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...)
- }()
- }
- // Increment indexes since the preceding group would have handled this.
- ix += numLeadingIdentical
- iy += numLeadingIdentical
- }
- if numTrailingIdentical > 0 {
- // Remove trailing identical span from this group and
- // insert it into the succeeding group.
- if i+1 < len(groups) {
- groups[i+1].NumIdentical += numTrailingIdentical
- } else {
- // No succeeding group exists, so append a new group,
- // but do so after we finish iterating over all groups.
- defer func() {
- groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical})
- }()
- }
- // Do not increment indexes since the succeeding group will handle this.
- }
-
- // Update this group since some identical elements were removed.
- nx -= numIdentical
- ny -= numIdentical
- groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny}
- }
- ix += nx
- iy += ny
- }
- return groups
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go
deleted file mode 100644
index 388fcf57..00000000
--- a/vendor/github.com/google/go-cmp/cmp/report_text.go
+++ /dev/null
@@ -1,432 +0,0 @@
-// Copyright 2019, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmp
-
-import (
- "bytes"
- "fmt"
- "math/rand"
- "strings"
- "time"
- "unicode/utf8"
-
- "github.com/google/go-cmp/cmp/internal/flags"
-)
-
-var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
-
-const maxColumnLength = 80
-
-type indentMode int
-
-func (n indentMode) appendIndent(b []byte, d diffMode) []byte {
- // The output of Diff is documented as being unstable to provide future
- // flexibility in changing the output for more humanly readable reports.
- // This logic intentionally introduces instability to the exact output
- // so that users can detect accidental reliance on stability early on,
- // rather than much later when an actual change to the format occurs.
- if flags.Deterministic || randBool {
- // Use regular spaces (U+0020).
- switch d {
- case diffUnknown, diffIdentical:
- b = append(b, " "...)
- case diffRemoved:
- b = append(b, "- "...)
- case diffInserted:
- b = append(b, "+ "...)
- }
- } else {
- // Use non-breaking spaces (U+00a0).
- switch d {
- case diffUnknown, diffIdentical:
- b = append(b, " "...)
- case diffRemoved:
- b = append(b, "- "...)
- case diffInserted:
- b = append(b, "+ "...)
- }
- }
- return repeatCount(n).appendChar(b, '\t')
-}
-
-type repeatCount int
-
-func (n repeatCount) appendChar(b []byte, c byte) []byte {
- for ; n > 0; n-- {
- b = append(b, c)
- }
- return b
-}
-
-// textNode is a simplified tree-based representation of structured text.
-// Possible node types are textWrap, textList, or textLine.
-type textNode interface {
- // Len reports the length in bytes of a single-line version of the tree.
- // Nested textRecord.Diff and textRecord.Comment fields are ignored.
- Len() int
- // Equal reports whether the two trees are structurally identical.
- // Nested textRecord.Diff and textRecord.Comment fields are compared.
- Equal(textNode) bool
- // String returns the string representation of the text tree.
- // It is not guaranteed that len(x.String()) == x.Len(),
- // nor that x.String() == y.String() implies that x.Equal(y).
- String() string
-
- // formatCompactTo formats the contents of the tree as a single-line string
- // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment
- // fields are ignored.
- //
- // However, not all nodes in the tree should be collapsed as a single-line.
- // If a node can be collapsed as a single-line, it is replaced by a textLine
- // node. Since the top-level node cannot replace itself, this also returns
- // the current node itself.
- //
- // This does not mutate the receiver.
- formatCompactTo([]byte, diffMode) ([]byte, textNode)
- // formatExpandedTo formats the contents of the tree as a multi-line string
- // to the provided buffer. In order for column alignment to operate well,
- // formatCompactTo must be called before calling formatExpandedTo.
- formatExpandedTo([]byte, diffMode, indentMode) []byte
-}
-
-// textWrap is a wrapper that concatenates a prefix and/or a suffix
-// to the underlying node.
-type textWrap struct {
- Prefix string // e.g., "bytes.Buffer{"
- Value textNode // textWrap | textList | textLine
- Suffix string // e.g., "}"
- Metadata interface{} // arbitrary metadata; has no effect on formatting
-}
-
-func (s *textWrap) Len() int {
- return len(s.Prefix) + s.Value.Len() + len(s.Suffix)
-}
-func (s1 *textWrap) Equal(s2 textNode) bool {
- if s2, ok := s2.(*textWrap); ok {
- return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix
- }
- return false
-}
-func (s *textWrap) String() string {
- var d diffMode
- var n indentMode
- _, s2 := s.formatCompactTo(nil, d)
- b := n.appendIndent(nil, d) // Leading indent
- b = s2.formatExpandedTo(b, d, n) // Main body
- b = append(b, '\n') // Trailing newline
- return string(b)
-}
-func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
- n0 := len(b) // Original buffer length
- b = append(b, s.Prefix...)
- b, s.Value = s.Value.formatCompactTo(b, d)
- b = append(b, s.Suffix...)
- if _, ok := s.Value.(textLine); ok {
- return b, textLine(b[n0:])
- }
- return b, s
-}
-func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
- b = append(b, s.Prefix...)
- b = s.Value.formatExpandedTo(b, d, n)
- b = append(b, s.Suffix...)
- return b
-}
-
-// textList is a comma-separated list of textWrap or textLine nodes.
-// The list may be formatted as multi-lines or single-line at the discretion
-// of the textList.formatCompactTo method.
-type textList []textRecord
-type textRecord struct {
- Diff diffMode // e.g., 0 or '-' or '+'
- Key string // e.g., "MyField"
- Value textNode // textWrap | textLine
- ElideComma bool // avoid trailing comma
- Comment fmt.Stringer // e.g., "6 identical fields"
-}
-
-// AppendEllipsis appends a new ellipsis node to the list if none already
-// exists at the end. If cs is non-zero it coalesces the statistics with the
-// previous diffStats.
-func (s *textList) AppendEllipsis(ds diffStats) {
- hasStats := !ds.IsZero()
- if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) {
- if hasStats {
- *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds})
- } else {
- *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true})
- }
- return
- }
- if hasStats {
- (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds)
- }
-}
-
-func (s textList) Len() (n int) {
- for i, r := range s {
- n += len(r.Key)
- if r.Key != "" {
- n += len(": ")
- }
- n += r.Value.Len()
- if i < len(s)-1 {
- n += len(", ")
- }
- }
- return n
-}
-
-func (s1 textList) Equal(s2 textNode) bool {
- if s2, ok := s2.(textList); ok {
- if len(s1) != len(s2) {
- return false
- }
- for i := range s1 {
- r1, r2 := s1[i], s2[i]
- if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) {
- return false
- }
- }
- return true
- }
- return false
-}
-
-func (s textList) String() string {
- return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String()
-}
-
-func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
- s = append(textList(nil), s...) // Avoid mutating original
-
- // Determine whether we can collapse this list as a single line.
- n0 := len(b) // Original buffer length
- var multiLine bool
- for i, r := range s {
- if r.Diff == diffInserted || r.Diff == diffRemoved {
- multiLine = true
- }
- b = append(b, r.Key...)
- if r.Key != "" {
- b = append(b, ": "...)
- }
- b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff)
- if _, ok := s[i].Value.(textLine); !ok {
- multiLine = true
- }
- if r.Comment != nil {
- multiLine = true
- }
- if i < len(s)-1 {
- b = append(b, ", "...)
- }
- }
- // Force multi-lined output when printing a removed/inserted node that
- // is sufficiently long.
- if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength {
- multiLine = true
- }
- if !multiLine {
- return b, textLine(b[n0:])
- }
- return b, s
-}
-
-func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
- alignKeyLens := s.alignLens(
- func(r textRecord) bool {
- _, isLine := r.Value.(textLine)
- return r.Key == "" || !isLine
- },
- func(r textRecord) int { return utf8.RuneCountInString(r.Key) },
- )
- alignValueLens := s.alignLens(
- func(r textRecord) bool {
- _, isLine := r.Value.(textLine)
- return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil
- },
- func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) },
- )
-
- // Format lists of simple lists in a batched form.
- // If the list is sequence of only textLine values,
- // then batch multiple values on a single line.
- var isSimple bool
- for _, r := range s {
- _, isLine := r.Value.(textLine)
- isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil
- if !isSimple {
- break
- }
- }
- if isSimple {
- n++
- var batch []byte
- emitBatch := func() {
- if len(batch) > 0 {
- b = n.appendIndent(append(b, '\n'), d)
- b = append(b, bytes.TrimRight(batch, " ")...)
- batch = batch[:0]
- }
- }
- for _, r := range s {
- line := r.Value.(textLine)
- if len(batch)+len(line)+len(", ") > maxColumnLength {
- emitBatch()
- }
- batch = append(batch, line...)
- batch = append(batch, ", "...)
- }
- emitBatch()
- n--
- return n.appendIndent(append(b, '\n'), d)
- }
-
- // Format the list as a multi-lined output.
- n++
- for i, r := range s {
- b = n.appendIndent(append(b, '\n'), d|r.Diff)
- if r.Key != "" {
- b = append(b, r.Key+": "...)
- }
- b = alignKeyLens[i].appendChar(b, ' ')
-
- b = r.Value.formatExpandedTo(b, d|r.Diff, n)
- if !r.ElideComma {
- b = append(b, ',')
- }
- b = alignValueLens[i].appendChar(b, ' ')
-
- if r.Comment != nil {
- b = append(b, " // "+r.Comment.String()...)
- }
- }
- n--
-
- return n.appendIndent(append(b, '\n'), d)
-}
-
-func (s textList) alignLens(
- skipFunc func(textRecord) bool,
- lenFunc func(textRecord) int,
-) []repeatCount {
- var startIdx, endIdx, maxLen int
- lens := make([]repeatCount, len(s))
- for i, r := range s {
- if skipFunc(r) {
- for j := startIdx; j < endIdx && j < len(s); j++ {
- lens[j] = repeatCount(maxLen - lenFunc(s[j]))
- }
- startIdx, endIdx, maxLen = i+1, i+1, 0
- } else {
- if maxLen < lenFunc(r) {
- maxLen = lenFunc(r)
- }
- endIdx = i + 1
- }
- }
- for j := startIdx; j < endIdx && j < len(s); j++ {
- lens[j] = repeatCount(maxLen - lenFunc(s[j]))
- }
- return lens
-}
-
-// textLine is a single-line segment of text and is always a leaf node
-// in the textNode tree.
-type textLine []byte
-
-var (
- textNil = textLine("nil")
- textEllipsis = textLine("...")
-)
-
-func (s textLine) Len() int {
- return len(s)
-}
-func (s1 textLine) Equal(s2 textNode) bool {
- if s2, ok := s2.(textLine); ok {
- return bytes.Equal([]byte(s1), []byte(s2))
- }
- return false
-}
-func (s textLine) String() string {
- return string(s)
-}
-func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
- return append(b, s...), s
-}
-func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte {
- return append(b, s...)
-}
-
-type diffStats struct {
- Name string
- NumIgnored int
- NumIdentical int
- NumRemoved int
- NumInserted int
- NumModified int
-}
-
-func (s diffStats) IsZero() bool {
- s.Name = ""
- return s == diffStats{}
-}
-
-func (s diffStats) NumDiff() int {
- return s.NumRemoved + s.NumInserted + s.NumModified
-}
-
-func (s diffStats) Append(ds diffStats) diffStats {
- assert(s.Name == ds.Name)
- s.NumIgnored += ds.NumIgnored
- s.NumIdentical += ds.NumIdentical
- s.NumRemoved += ds.NumRemoved
- s.NumInserted += ds.NumInserted
- s.NumModified += ds.NumModified
- return s
-}
-
-// String prints a humanly-readable summary of coalesced records.
-//
-// Example:
-//
-// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields"
-func (s diffStats) String() string {
- var ss []string
- var sum int
- labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"}
- counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified}
- for i, n := range counts {
- if n > 0 {
- ss = append(ss, fmt.Sprintf("%d %v", n, labels[i]))
- }
- sum += n
- }
-
- // Pluralize the name (adjusting for some obscure English grammar rules).
- name := s.Name
- if sum > 1 {
- name += "s"
- if strings.HasSuffix(name, "ys") {
- name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries"
- }
- }
-
- // Format the list according to English grammar (with Oxford comma).
- switch n := len(ss); n {
- case 0:
- return ""
- case 1, 2:
- return strings.Join(ss, " and ") + " " + name
- default:
- return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name
- }
-}
-
-type commentString string
-
-func (s commentString) String() string { return string(s) }
diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go
deleted file mode 100644
index 668d470f..00000000
--- a/vendor/github.com/google/go-cmp/cmp/report_value.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright 2019, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cmp
-
-import "reflect"
-
-// valueNode represents a single node within a report, which is a
-// structured representation of the value tree, containing information
-// regarding which nodes are equal or not.
-type valueNode struct {
- parent *valueNode
-
- Type reflect.Type
- ValueX reflect.Value
- ValueY reflect.Value
-
- // NumSame is the number of leaf nodes that are equal.
- // All descendants are equal only if NumDiff is 0.
- NumSame int
- // NumDiff is the number of leaf nodes that are not equal.
- NumDiff int
- // NumIgnored is the number of leaf nodes that are ignored.
- NumIgnored int
- // NumCompared is the number of leaf nodes that were compared
- // using an Equal method or Comparer function.
- NumCompared int
- // NumTransformed is the number of non-leaf nodes that were transformed.
- NumTransformed int
- // NumChildren is the number of transitive descendants of this node.
- // This counts from zero; thus, leaf nodes have no descendants.
- NumChildren int
- // MaxDepth is the maximum depth of the tree. This counts from zero;
- // thus, leaf nodes have a depth of zero.
- MaxDepth int
-
- // Records is a list of struct fields, slice elements, or map entries.
- Records []reportRecord // If populated, implies Value is not populated
-
- // Value is the result of a transformation, pointer indirect, of
- // type assertion.
- Value *valueNode // If populated, implies Records is not populated
-
- // TransformerName is the name of the transformer.
- TransformerName string // If non-empty, implies Value is populated
-}
-type reportRecord struct {
- Key reflect.Value // Invalid for slice element
- Value *valueNode
-}
-
-func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) {
- vx, vy := ps.Values()
- child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy}
- switch s := ps.(type) {
- case StructField:
- assert(parent.Value == nil)
- parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child})
- case SliceIndex:
- assert(parent.Value == nil)
- parent.Records = append(parent.Records, reportRecord{Value: child})
- case MapIndex:
- assert(parent.Value == nil)
- parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child})
- case Indirect:
- assert(parent.Value == nil && parent.Records == nil)
- parent.Value = child
- case TypeAssertion:
- assert(parent.Value == nil && parent.Records == nil)
- parent.Value = child
- case Transform:
- assert(parent.Value == nil && parent.Records == nil)
- parent.Value = child
- parent.TransformerName = s.Name()
- parent.NumTransformed++
- default:
- assert(parent == nil) // Must be the root step
- }
- return child
-}
-
-func (r *valueNode) Report(rs Result) {
- assert(r.MaxDepth == 0) // May only be called on leaf nodes
-
- if rs.ByIgnore() {
- r.NumIgnored++
- } else {
- if rs.Equal() {
- r.NumSame++
- } else {
- r.NumDiff++
- }
- }
- assert(r.NumSame+r.NumDiff+r.NumIgnored == 1)
-
- if rs.ByMethod() {
- r.NumCompared++
- }
- if rs.ByFunc() {
- r.NumCompared++
- }
- assert(r.NumCompared <= 1)
-}
-
-func (child *valueNode) PopStep() (parent *valueNode) {
- if child.parent == nil {
- return nil
- }
- parent = child.parent
- parent.NumSame += child.NumSame
- parent.NumDiff += child.NumDiff
- parent.NumIgnored += child.NumIgnored
- parent.NumCompared += child.NumCompared
- parent.NumTransformed += child.NumTransformed
- parent.NumChildren += child.NumChildren + 1
- if parent.MaxDepth < child.MaxDepth+1 {
- parent.MaxDepth = child.MaxDepth + 1
- }
- return parent
-}
diff --git a/vendor/github.com/google/s2a-go/.gitignore b/vendor/github.com/google/s2a-go/.gitignore
deleted file mode 100644
index 01764d1c..00000000
--- a/vendor/github.com/google/s2a-go/.gitignore
+++ /dev/null
@@ -1,6 +0,0 @@
-# Ignore binaries without extension
-//example/client/client
-//example/server/server
-//internal/v2/fakes2av2_server/fakes2av2_server
-
-.idea/
\ No newline at end of file
diff --git a/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md b/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md
deleted file mode 100644
index dc079b4d..00000000
--- a/vendor/github.com/google/s2a-go/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,93 +0,0 @@
-# Code of Conduct
-
-## Our Pledge
-
-In the interest of fostering an open and welcoming environment, we as
-contributors and maintainers pledge to making participation in our project and
-our community a harassment-free experience for everyone, regardless of age, body
-size, disability, ethnicity, gender identity and expression, level of
-experience, education, socio-economic status, nationality, personal appearance,
-race, religion, or sexual identity and orientation.
-
-## Our Standards
-
-Examples of behavior that contributes to creating a positive environment
-include:
-
-* Using welcoming and inclusive language
-* Being respectful of differing viewpoints and experiences
-* Gracefully accepting constructive criticism
-* Focusing on what is best for the community
-* Showing empathy towards other community members
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery and unwelcome sexual attention or
- advances
-* Trolling, insulting/derogatory comments, and personal or political attacks
-* Public or private harassment
-* Publishing others' private information, such as a physical or electronic
- address, without explicit permission
-* Other conduct which could reasonably be considered inappropriate in a
- professional setting
-
-## Our Responsibilities
-
-Project maintainers are responsible for clarifying the standards of acceptable
-behavior and are expected to take appropriate and fair corrective action in
-response to any instances of unacceptable behavior.
-
-Project maintainers have the right and responsibility to remove, edit, or reject
-comments, commits, code, wiki edits, issues, and other contributions that are
-not aligned to this Code of Conduct, or to ban temporarily or permanently any
-contributor for other behaviors that they deem inappropriate, threatening,
-offensive, or harmful.
-
-## Scope
-
-This Code of Conduct applies both within project spaces and in public spaces
-when an individual is representing the project or its community. Examples of
-representing a project or community include using an official project e-mail
-address, posting via an official social media account, or acting as an appointed
-representative at an online or offline event. Representation of a project may be
-further defined and clarified by project maintainers.
-
-This Code of Conduct also applies outside the project spaces when the Project
-Steward has a reasonable belief that an individual's behavior may have a
-negative impact on the project or its community.
-
-## Conflict Resolution
-
-We do not believe that all conflict is bad; healthy debate and disagreement
-often yield positive results. However, it is never okay to be disrespectful or
-to engage in behavior that violates the project’s code of conduct.
-
-If you see someone violating the code of conduct, you are encouraged to address
-the behavior directly with those involved. Many issues can be resolved quickly
-and easily, and this gives people more control over the outcome of their
-dispute. If you are unable to resolve the matter for any reason, or if the
-behavior is threatening or harassing, report it. We are dedicated to providing
-an environment where participants feel welcome and safe.
-
-Reports should be directed to *[PROJECT STEWARD NAME(s) AND EMAIL(s)]*, the
-Project Steward(s) for *[PROJECT NAME]*. It is the Project Steward’s duty to
-receive and address reported violations of the code of conduct. They will then
-work with a committee consisting of representatives from the Open Source
-Programs Office and the Google Open Source Strategy team. If for any reason you
-are uncomfortable reaching out to the Project Steward, please email
-opensource@google.com.
-
-We will investigate every complaint, but you may not receive a direct response.
-We will use our discretion in determining when and how to follow up on reported
-incidents, which may range from not taking action to permanent expulsion from
-the project and project-sponsored spaces. We will notify the accused of the
-report and provide them an opportunity to discuss it before any action is taken.
-The identity of the reporter will be omitted from the details of the report
-supplied to the accused. In potentially harmful situations, such as ongoing
-harassment or threats to anyone's safety, we may take action without notice.
-
-## Attribution
-
-This Code of Conduct is adapted from the Contributor Covenant, version 1.4,
-available at
-https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
diff --git a/vendor/github.com/google/s2a-go/CONTRIBUTING.md b/vendor/github.com/google/s2a-go/CONTRIBUTING.md
deleted file mode 100644
index 22b241cb..00000000
--- a/vendor/github.com/google/s2a-go/CONTRIBUTING.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# How to Contribute
-
-We'd love to accept your patches and contributions to this project. There are
-just a few small guidelines you need to follow.
-
-## Contributor License Agreement
-
-Contributions to this project must be accompanied by a Contributor License
-Agreement (CLA). You (or your employer) retain the copyright to your
-contribution; this simply gives us permission to use and redistribute your
-contributions as part of the project. Head over to
- to see your current agreements on file or
-to sign a new one.
-
-You generally only need to submit a CLA once, so if you've already submitted one
-(even if it was for a different project), you probably don't need to do it
-again.
-
-## Code reviews
-
-All submissions, including submissions by project members, require review. We
-use GitHub pull requests for this purpose. Consult
-[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
-information on using pull requests.
-
-## Community Guidelines
-
-This project follows
-[Google's Open Source Community Guidelines](https://opensource.google/conduct/).
diff --git a/vendor/github.com/google/s2a-go/LICENSE.md b/vendor/github.com/google/s2a-go/LICENSE.md
deleted file mode 100644
index d6456956..00000000
--- a/vendor/github.com/google/s2a-go/LICENSE.md
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/google/s2a-go/README.md b/vendor/github.com/google/s2a-go/README.md
deleted file mode 100644
index fe0f5c1d..00000000
--- a/vendor/github.com/google/s2a-go/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-# Secure Session Agent Client Libraries
-
-The Secure Session Agent is a service that enables a workload to offload select
-operations from the mTLS handshake and protects a workload's private key
-material from exfiltration. Specifically, the workload asks the Secure Session
-Agent for the TLS configuration to use during the handshake, to perform private
-key operations, and to validate the peer certificate chain. The Secure Session
-Agent's client libraries enable applications to communicate with the Secure
-Session Agent during the TLS handshake, and to encrypt traffic to the peer
-after the TLS handshake is complete.
-
-This repository contains the source code for the Secure Session Agent's Go
-client libraries, which allow gRPC and HTTP Go applications to use the Secure Session
-Agent.
diff --git a/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go b/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go
deleted file mode 100644
index 034d1b91..00000000
--- a/vendor/github.com/google/s2a-go/fallback/s2a_fallback.go
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- *
- * Copyright 2023 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package fallback provides default implementations of fallback options when S2A fails.
-package fallback
-
-import (
- "context"
- "crypto/tls"
- "fmt"
- "net"
-
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
-)
-
-const (
- alpnProtoStrH2 = "h2"
- alpnProtoStrHTTP = "http/1.1"
- defaultHTTPSPort = "443"
-)
-
-// FallbackTLSConfigGRPC is a tls.Config used by the DefaultFallbackClientHandshakeFunc function.
-// It supports GRPC use case, thus the alpn is set to 'h2'.
-var FallbackTLSConfigGRPC = tls.Config{
- MinVersion: tls.VersionTLS13,
- ClientSessionCache: nil,
- NextProtos: []string{alpnProtoStrH2},
-}
-
-// FallbackTLSConfigHTTP is a tls.Config used by the DefaultFallbackDialerAndAddress func.
-// It supports the HTTP use case and the alpn is set to both 'http/1.1' and 'h2'.
-var FallbackTLSConfigHTTP = tls.Config{
- MinVersion: tls.VersionTLS13,
- ClientSessionCache: nil,
- NextProtos: []string{alpnProtoStrH2, alpnProtoStrHTTP},
-}
-
-// ClientHandshake establishes a TLS connection and returns it, plus its auth info.
-// Inputs:
-//
-// targetServer: the server attempted with S2A.
-// conn: the tcp connection to the server at address targetServer that was passed into S2A's ClientHandshake func.
-// If fallback is successful, the `conn` should be closed.
-// err: the error encountered when performing the client-side TLS handshake with S2A.
-type ClientHandshake func(ctx context.Context, targetServer string, conn net.Conn, err error) (net.Conn, credentials.AuthInfo, error)
-
-// DefaultFallbackClientHandshakeFunc returns a ClientHandshake function,
-// which establishes a TLS connection to the provided fallbackAddr, returns the new connection and its auth info.
-// Example use:
-//
-// transportCreds, _ = s2a.NewClientCreds(&s2a.ClientOptions{
-// S2AAddress: s2aAddress,
-// FallbackOpts: &s2a.FallbackOptions{ // optional
-// FallbackClientHandshakeFunc: fallback.DefaultFallbackClientHandshakeFunc(fallbackAddr),
-// },
-// })
-//
-// The fallback server's certificate must be verifiable using OS root store.
-// The fallbackAddr is expected to be a network address, e.g. example.com:port. If port is not specified,
-// it uses default port 443.
-// In the returned function's TLS config, ClientSessionCache is explicitly set to nil to disable TLS resumption,
-// and min TLS version is set to 1.3.
-func DefaultFallbackClientHandshakeFunc(fallbackAddr string) (ClientHandshake, error) {
- var fallbackDialer = tls.Dialer{Config: &FallbackTLSConfigGRPC}
- return defaultFallbackClientHandshakeFuncInternal(fallbackAddr, fallbackDialer.DialContext)
-}
-
-func defaultFallbackClientHandshakeFuncInternal(fallbackAddr string, dialContextFunc func(context.Context, string, string) (net.Conn, error)) (ClientHandshake, error) {
- fallbackServerAddr, err := processFallbackAddr(fallbackAddr)
- if err != nil {
- if grpclog.V(1) {
- grpclog.Infof("error processing fallback address [%s]: %v", fallbackAddr, err)
- }
- return nil, err
- }
- return func(ctx context.Context, targetServer string, conn net.Conn, s2aErr error) (net.Conn, credentials.AuthInfo, error) {
- fbConn, fbErr := dialContextFunc(ctx, "tcp", fallbackServerAddr)
- if fbErr != nil {
- grpclog.Infof("dialing to fallback server %s failed: %v", fallbackServerAddr, fbErr)
- return nil, nil, fmt.Errorf("dialing to fallback server %s failed: %v; S2A client handshake with %s error: %w", fallbackServerAddr, fbErr, targetServer, s2aErr)
- }
-
- tc, success := fbConn.(*tls.Conn)
- if !success {
- grpclog.Infof("the connection with fallback server is expected to be tls but isn't")
- return nil, nil, fmt.Errorf("the connection with fallback server is expected to be tls but isn't; S2A client handshake with %s error: %w", targetServer, s2aErr)
- }
-
- tlsInfo := credentials.TLSInfo{
- State: tc.ConnectionState(),
- CommonAuthInfo: credentials.CommonAuthInfo{
- SecurityLevel: credentials.PrivacyAndIntegrity,
- },
- }
- if grpclog.V(1) {
- grpclog.Infof("ConnectionState.NegotiatedProtocol: %v", tc.ConnectionState().NegotiatedProtocol)
- grpclog.Infof("ConnectionState.HandshakeComplete: %v", tc.ConnectionState().HandshakeComplete)
- grpclog.Infof("ConnectionState.ServerName: %v", tc.ConnectionState().ServerName)
- }
- conn.Close()
- return fbConn, tlsInfo, nil
- }, nil
-}
-
-// DefaultFallbackDialerAndAddress returns a TLS dialer and the network address to dial.
-// Example use:
-//
-// fallbackDialer, fallbackServerAddr := fallback.DefaultFallbackDialerAndAddress(fallbackAddr)
-// dialTLSContext := s2a.NewS2aDialTLSContextFunc(&s2a.ClientOptions{
-// S2AAddress: s2aAddress, // required
-// FallbackOpts: &s2a.FallbackOptions{
-// FallbackDialer: &s2a.FallbackDialer{
-// Dialer: fallbackDialer,
-// ServerAddr: fallbackServerAddr,
-// },
-// },
-// })
-//
-// The fallback server's certificate should be verifiable using OS root store.
-// The fallbackAddr is expected to be a network address, e.g. example.com:port. If port is not specified,
-// it uses default port 443.
-// In the returned function's TLS config, ClientSessionCache is explicitly set to nil to disable TLS resumption,
-// and min TLS version is set to 1.3.
-func DefaultFallbackDialerAndAddress(fallbackAddr string) (*tls.Dialer, string, error) {
- fallbackServerAddr, err := processFallbackAddr(fallbackAddr)
- if err != nil {
- if grpclog.V(1) {
- grpclog.Infof("error processing fallback address [%s]: %v", fallbackAddr, err)
- }
- return nil, "", err
- }
- return &tls.Dialer{Config: &FallbackTLSConfigHTTP}, fallbackServerAddr, nil
-}
-
-func processFallbackAddr(fallbackAddr string) (string, error) {
- var fallbackServerAddr string
- var err error
-
- if fallbackAddr == "" {
- return "", fmt.Errorf("empty fallback address")
- }
- _, _, err = net.SplitHostPort(fallbackAddr)
- if err != nil {
- // fallbackAddr does not have port suffix
- fallbackServerAddr = net.JoinHostPort(fallbackAddr, defaultHTTPSPort)
- } else {
- // FallbackServerAddr already has port suffix
- fallbackServerAddr = fallbackAddr
- }
- return fallbackServerAddr, nil
-}
diff --git a/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go b/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go
deleted file mode 100644
index aa3967f9..00000000
--- a/vendor/github.com/google/s2a-go/internal/authinfo/authinfo.go
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- *
- * Copyright 2021 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package authinfo provides authentication and authorization information that
-// results from the TLS handshake.
-package authinfo
-
-import (
- "errors"
-
- commonpb "github.com/google/s2a-go/internal/proto/common_go_proto"
- contextpb "github.com/google/s2a-go/internal/proto/s2a_context_go_proto"
- grpcpb "github.com/google/s2a-go/internal/proto/s2a_go_proto"
- "google.golang.org/grpc/credentials"
-)
-
-var _ credentials.AuthInfo = (*S2AAuthInfo)(nil)
-
-const s2aAuthType = "s2a"
-
-// S2AAuthInfo exposes authentication and authorization information from the
-// S2A session result to the gRPC stack.
-type S2AAuthInfo struct {
- s2aContext *contextpb.S2AContext
- commonAuthInfo credentials.CommonAuthInfo
-}
-
-// NewS2AAuthInfo returns a new S2AAuthInfo object from the S2A session result.
-func NewS2AAuthInfo(result *grpcpb.SessionResult) (credentials.AuthInfo, error) {
- return newS2AAuthInfo(result)
-}
-
-func newS2AAuthInfo(result *grpcpb.SessionResult) (*S2AAuthInfo, error) {
- if result == nil {
- return nil, errors.New("NewS2aAuthInfo given nil session result")
- }
- return &S2AAuthInfo{
- s2aContext: &contextpb.S2AContext{
- ApplicationProtocol: result.GetApplicationProtocol(),
- TlsVersion: result.GetState().GetTlsVersion(),
- Ciphersuite: result.GetState().GetTlsCiphersuite(),
- PeerIdentity: result.GetPeerIdentity(),
- LocalIdentity: result.GetLocalIdentity(),
- PeerCertFingerprint: result.GetPeerCertFingerprint(),
- LocalCertFingerprint: result.GetLocalCertFingerprint(),
- IsHandshakeResumed: result.GetState().GetIsHandshakeResumed(),
- },
- commonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity},
- }, nil
-}
-
-// AuthType returns the authentication type.
-func (s *S2AAuthInfo) AuthType() string {
- return s2aAuthType
-}
-
-// ApplicationProtocol returns the application protocol, e.g. "grpc".
-func (s *S2AAuthInfo) ApplicationProtocol() string {
- return s.s2aContext.GetApplicationProtocol()
-}
-
-// TLSVersion returns the TLS version negotiated during the handshake.
-func (s *S2AAuthInfo) TLSVersion() commonpb.TLSVersion {
- return s.s2aContext.GetTlsVersion()
-}
-
-// Ciphersuite returns the ciphersuite negotiated during the handshake.
-func (s *S2AAuthInfo) Ciphersuite() commonpb.Ciphersuite {
- return s.s2aContext.GetCiphersuite()
-}
-
-// PeerIdentity returns the authenticated identity of the peer.
-func (s *S2AAuthInfo) PeerIdentity() *commonpb.Identity {
- return s.s2aContext.GetPeerIdentity()
-}
-
-// LocalIdentity returns the local identity of the application used during
-// session setup.
-func (s *S2AAuthInfo) LocalIdentity() *commonpb.Identity {
- return s.s2aContext.GetLocalIdentity()
-}
-
-// PeerCertFingerprint returns the SHA256 hash of the peer certificate used in
-// the S2A handshake.
-func (s *S2AAuthInfo) PeerCertFingerprint() []byte {
- return s.s2aContext.GetPeerCertFingerprint()
-}
-
-// LocalCertFingerprint returns the SHA256 hash of the local certificate used
-// in the S2A handshake.
-func (s *S2AAuthInfo) LocalCertFingerprint() []byte {
- return s.s2aContext.GetLocalCertFingerprint()
-}
-
-// IsHandshakeResumed returns true if a cached session was used to resume
-// the handshake.
-func (s *S2AAuthInfo) IsHandshakeResumed() bool {
- return s.s2aContext.GetIsHandshakeResumed()
-}
-
-// SecurityLevel returns the security level of the connection.
-func (s *S2AAuthInfo) SecurityLevel() credentials.SecurityLevel {
- return s.commonAuthInfo.SecurityLevel
-}
diff --git a/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go b/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go
deleted file mode 100644
index 8297c9a9..00000000
--- a/vendor/github.com/google/s2a-go/internal/handshaker/handshaker.go
+++ /dev/null
@@ -1,438 +0,0 @@
-/*
- *
- * Copyright 2021 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package handshaker communicates with the S2A handshaker service.
-package handshaker
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
- "net"
- "sync"
-
- "github.com/google/s2a-go/internal/authinfo"
- commonpb "github.com/google/s2a-go/internal/proto/common_go_proto"
- s2apb "github.com/google/s2a-go/internal/proto/s2a_go_proto"
- "github.com/google/s2a-go/internal/record"
- "github.com/google/s2a-go/internal/tokenmanager"
- grpc "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
-)
-
-var (
- // appProtocol contains the application protocol accepted by the handshaker.
- appProtocol = "grpc"
- // frameLimit is the maximum size of a frame in bytes.
- frameLimit = 1024 * 64
- // peerNotRespondingError is the error thrown when the peer doesn't respond.
- errPeerNotResponding = errors.New("peer is not responding and re-connection should be attempted")
-)
-
-// Handshaker defines a handshaker interface.
-type Handshaker interface {
- // ClientHandshake starts and completes a TLS handshake from the client side,
- // and returns a secure connection along with additional auth information.
- ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error)
- // ServerHandshake starts and completes a TLS handshake from the server side,
- // and returns a secure connection along with additional auth information.
- ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error)
- // Close terminates the Handshaker. It should be called when the handshake
- // is complete.
- Close() error
-}
-
-// ClientHandshakerOptions contains the options needed to configure the S2A
-// handshaker service on the client-side.
-type ClientHandshakerOptions struct {
- // MinTLSVersion specifies the min TLS version supported by the client.
- MinTLSVersion commonpb.TLSVersion
- // MaxTLSVersion specifies the max TLS version supported by the client.
- MaxTLSVersion commonpb.TLSVersion
- // TLSCiphersuites is the ordered list of ciphersuites supported by the
- // client.
- TLSCiphersuites []commonpb.Ciphersuite
- // TargetIdentities contains a list of allowed server identities. One of the
- // target identities should match the peer identity in the handshake
- // result; otherwise, the handshake fails.
- TargetIdentities []*commonpb.Identity
- // LocalIdentity is the local identity of the client application. If none is
- // provided, then the S2A will choose the default identity.
- LocalIdentity *commonpb.Identity
- // TargetName is the allowed server name, which may be used for server
- // authorization check by the S2A if it is provided.
- TargetName string
- // EnsureProcessSessionTickets allows users to wait and ensure that all
- // available session tickets are sent to S2A before a process completes.
- EnsureProcessSessionTickets *sync.WaitGroup
-}
-
-// ServerHandshakerOptions contains the options needed to configure the S2A
-// handshaker service on the server-side.
-type ServerHandshakerOptions struct {
- // MinTLSVersion specifies the min TLS version supported by the server.
- MinTLSVersion commonpb.TLSVersion
- // MaxTLSVersion specifies the max TLS version supported by the server.
- MaxTLSVersion commonpb.TLSVersion
- // TLSCiphersuites is the ordered list of ciphersuites supported by the
- // server.
- TLSCiphersuites []commonpb.Ciphersuite
- // LocalIdentities is the list of local identities that may be assumed by
- // the server. If no local identity is specified, then the S2A chooses a
- // default local identity.
- LocalIdentities []*commonpb.Identity
-}
-
-// s2aHandshaker performs a TLS handshake using the S2A handshaker service.
-type s2aHandshaker struct {
- // stream is used to communicate with the S2A handshaker service.
- stream s2apb.S2AService_SetUpSessionClient
- // conn is the connection to the peer.
- conn net.Conn
- // clientOpts should be non-nil iff the handshaker is client-side.
- clientOpts *ClientHandshakerOptions
- // serverOpts should be non-nil iff the handshaker is server-side.
- serverOpts *ServerHandshakerOptions
- // isClient determines if the handshaker is client or server side.
- isClient bool
- // hsAddr stores the address of the S2A handshaker service.
- hsAddr string
- // tokenManager manages access tokens for authenticating to S2A.
- tokenManager tokenmanager.AccessTokenManager
- // localIdentities is the set of local identities for whom the
- // tokenManager should fetch a token when preparing a request to be
- // sent to S2A.
- localIdentities []*commonpb.Identity
-}
-
-// NewClientHandshaker creates an s2aHandshaker instance that performs a
-// client-side TLS handshake using the S2A handshaker service.
-func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, hsAddr string, opts *ClientHandshakerOptions) (Handshaker, error) {
- stream, err := s2apb.NewS2AServiceClient(conn).SetUpSession(ctx, grpc.WaitForReady(true))
- if err != nil {
- return nil, err
- }
- tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager()
- if err != nil {
- grpclog.Infof("failed to create single token access token manager: %v", err)
- }
- return newClientHandshaker(stream, c, hsAddr, opts, tokenManager), nil
-}
-
-func newClientHandshaker(stream s2apb.S2AService_SetUpSessionClient, c net.Conn, hsAddr string, opts *ClientHandshakerOptions, tokenManager tokenmanager.AccessTokenManager) *s2aHandshaker {
- var localIdentities []*commonpb.Identity
- if opts != nil {
- localIdentities = []*commonpb.Identity{opts.LocalIdentity}
- }
- return &s2aHandshaker{
- stream: stream,
- conn: c,
- clientOpts: opts,
- isClient: true,
- hsAddr: hsAddr,
- tokenManager: tokenManager,
- localIdentities: localIdentities,
- }
-}
-
-// NewServerHandshaker creates an s2aHandshaker instance that performs a
-// server-side TLS handshake using the S2A handshaker service.
-func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, hsAddr string, opts *ServerHandshakerOptions) (Handshaker, error) {
- stream, err := s2apb.NewS2AServiceClient(conn).SetUpSession(ctx, grpc.WaitForReady(true))
- if err != nil {
- return nil, err
- }
- tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager()
- if err != nil {
- grpclog.Infof("failed to create single token access token manager: %v", err)
- }
- return newServerHandshaker(stream, c, hsAddr, opts, tokenManager), nil
-}
-
-func newServerHandshaker(stream s2apb.S2AService_SetUpSessionClient, c net.Conn, hsAddr string, opts *ServerHandshakerOptions, tokenManager tokenmanager.AccessTokenManager) *s2aHandshaker {
- var localIdentities []*commonpb.Identity
- if opts != nil {
- localIdentities = opts.LocalIdentities
- }
- return &s2aHandshaker{
- stream: stream,
- conn: c,
- serverOpts: opts,
- isClient: false,
- hsAddr: hsAddr,
- tokenManager: tokenManager,
- localIdentities: localIdentities,
- }
-}
-
-// ClientHandshake performs a client-side TLS handshake using the S2A handshaker
-// service. When complete, returns a TLS connection.
-func (h *s2aHandshaker) ClientHandshake(_ context.Context) (net.Conn, credentials.AuthInfo, error) {
- if !h.isClient {
- return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client-side handshake")
- }
- // Extract the hostname from the target name. The target name is assumed to be an authority.
- hostname, _, err := net.SplitHostPort(h.clientOpts.TargetName)
- if err != nil {
- // If the target name had no host port or could not be parsed, use it as is.
- hostname = h.clientOpts.TargetName
- }
-
- // Prepare a client start message to send to the S2A handshaker service.
- req := &s2apb.SessionReq{
- ReqOneof: &s2apb.SessionReq_ClientStart{
- ClientStart: &s2apb.ClientSessionStartReq{
- ApplicationProtocols: []string{appProtocol},
- MinTlsVersion: h.clientOpts.MinTLSVersion,
- MaxTlsVersion: h.clientOpts.MaxTLSVersion,
- TlsCiphersuites: h.clientOpts.TLSCiphersuites,
- TargetIdentities: h.clientOpts.TargetIdentities,
- LocalIdentity: h.clientOpts.LocalIdentity,
- TargetName: hostname,
- },
- },
- AuthMechanisms: h.getAuthMechanisms(),
- }
- conn, result, err := h.setUpSession(req)
- if err != nil {
- return nil, nil, err
- }
- authInfo, err := authinfo.NewS2AAuthInfo(result)
- if err != nil {
- return nil, nil, err
- }
- return conn, authInfo, nil
-}
-
-// ServerHandshake performs a server-side TLS handshake using the S2A handshaker
-// service. When complete, returns a TLS connection.
-func (h *s2aHandshaker) ServerHandshake(_ context.Context) (net.Conn, credentials.AuthInfo, error) {
- if h.isClient {
- return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server-side handshake")
- }
- p := make([]byte, frameLimit)
- n, err := h.conn.Read(p)
- if err != nil {
- return nil, nil, err
- }
- // Prepare a server start message to send to the S2A handshaker service.
- req := &s2apb.SessionReq{
- ReqOneof: &s2apb.SessionReq_ServerStart{
- ServerStart: &s2apb.ServerSessionStartReq{
- ApplicationProtocols: []string{appProtocol},
- MinTlsVersion: h.serverOpts.MinTLSVersion,
- MaxTlsVersion: h.serverOpts.MaxTLSVersion,
- TlsCiphersuites: h.serverOpts.TLSCiphersuites,
- LocalIdentities: h.serverOpts.LocalIdentities,
- InBytes: p[:n],
- },
- },
- AuthMechanisms: h.getAuthMechanisms(),
- }
- conn, result, err := h.setUpSession(req)
- if err != nil {
- return nil, nil, err
- }
- authInfo, err := authinfo.NewS2AAuthInfo(result)
- if err != nil {
- return nil, nil, err
- }
- return conn, authInfo, nil
-}
-
-// setUpSession proxies messages between the peer and the S2A handshaker
-// service.
-func (h *s2aHandshaker) setUpSession(req *s2apb.SessionReq) (net.Conn, *s2apb.SessionResult, error) {
- resp, err := h.accessHandshakerService(req)
- if err != nil {
- return nil, nil, err
- }
- // Check if the returned status is an error.
- if resp.GetStatus() != nil {
- if got, want := resp.GetStatus().Code, uint32(codes.OK); got != want {
- return nil, nil, fmt.Errorf("%v", resp.GetStatus().Details)
- }
- }
- // Calculate the extra unread bytes from the Session. Attempting to consume
- // more than the bytes sent will throw an error.
- var extra []byte
- if req.GetServerStart() != nil {
- if resp.GetBytesConsumed() > uint32(len(req.GetServerStart().GetInBytes())) {
- return nil, nil, errors.New("handshaker service consumed bytes value is out-of-bounds")
- }
- extra = req.GetServerStart().GetInBytes()[resp.GetBytesConsumed():]
- }
- result, extra, err := h.processUntilDone(resp, extra)
- if err != nil {
- return nil, nil, err
- }
- if result.GetLocalIdentity() == nil {
- return nil, nil, errors.New("local identity must be populated in session result")
- }
-
- // Create a new TLS record protocol using the Session Result.
- newConn, err := record.NewConn(&record.ConnParameters{
- NetConn: h.conn,
- Ciphersuite: result.GetState().GetTlsCiphersuite(),
- TLSVersion: result.GetState().GetTlsVersion(),
- InTrafficSecret: result.GetState().GetInKey(),
- OutTrafficSecret: result.GetState().GetOutKey(),
- UnusedBuf: extra,
- InSequence: result.GetState().GetInSequence(),
- OutSequence: result.GetState().GetOutSequence(),
- HSAddr: h.hsAddr,
- ConnectionID: result.GetState().GetConnectionId(),
- LocalIdentity: result.GetLocalIdentity(),
- EnsureProcessSessionTickets: h.ensureProcessSessionTickets(),
- })
- if err != nil {
- return nil, nil, err
- }
- return newConn, result, nil
-}
-
-func (h *s2aHandshaker) ensureProcessSessionTickets() *sync.WaitGroup {
- if h.clientOpts == nil {
- return nil
- }
- return h.clientOpts.EnsureProcessSessionTickets
-}
-
-// accessHandshakerService sends the session request to the S2A handshaker
-// service and returns the session response.
-func (h *s2aHandshaker) accessHandshakerService(req *s2apb.SessionReq) (*s2apb.SessionResp, error) {
- if err := h.stream.Send(req); err != nil {
- return nil, err
- }
- resp, err := h.stream.Recv()
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-// processUntilDone continues proxying messages between the peer and the S2A
-// handshaker service until the handshaker service returns the SessionResult at
-// the end of the handshake or an error occurs.
-func (h *s2aHandshaker) processUntilDone(resp *s2apb.SessionResp, unusedBytes []byte) (*s2apb.SessionResult, []byte, error) {
- for {
- if len(resp.OutFrames) > 0 {
- if _, err := h.conn.Write(resp.OutFrames); err != nil {
- return nil, nil, err
- }
- }
- if resp.Result != nil {
- return resp.Result, unusedBytes, nil
- }
- buf := make([]byte, frameLimit)
- n, err := h.conn.Read(buf)
- if err != nil && err != io.EOF {
- return nil, nil, err
- }
- // If there is nothing to send to the handshaker service and nothing is
- // received from the peer, then we are stuck. This covers the case when
- // the peer is not responding. Note that handshaker service connection
- // issues are caught in accessHandshakerService before we even get
- // here.
- if len(resp.OutFrames) == 0 && n == 0 {
- return nil, nil, errPeerNotResponding
- }
- // Append extra bytes from the previous interaction with the handshaker
- // service with the current buffer read from conn.
- p := append(unusedBytes, buf[:n]...)
- // From here on, p and unusedBytes point to the same slice.
- resp, err = h.accessHandshakerService(&s2apb.SessionReq{
- ReqOneof: &s2apb.SessionReq_Next{
- Next: &s2apb.SessionNextReq{
- InBytes: p,
- },
- },
- AuthMechanisms: h.getAuthMechanisms(),
- })
- if err != nil {
- return nil, nil, err
- }
-
- // Cache the local identity returned by S2A, if it is populated. This
- // overwrites any existing local identities. This is done because, once the
- // S2A has selected a local identity, then only that local identity should
- // be asserted in future requests until the end of the current handshake.
- if resp.GetLocalIdentity() != nil {
- h.localIdentities = []*commonpb.Identity{resp.GetLocalIdentity()}
- }
-
- // Set unusedBytes based on the handshaker service response.
- if resp.GetBytesConsumed() > uint32(len(p)) {
- return nil, nil, errors.New("handshaker service consumed bytes value is out-of-bounds")
- }
- unusedBytes = p[resp.GetBytesConsumed():]
- }
-}
-
-// Close shuts down the handshaker and the stream to the S2A handshaker service
-// when the handshake is complete. It should be called when the caller obtains
-// the secure connection at the end of the handshake.
-func (h *s2aHandshaker) Close() error {
- return h.stream.CloseSend()
-}
-
-func (h *s2aHandshaker) getAuthMechanisms() []*s2apb.AuthenticationMechanism {
- if h.tokenManager == nil {
- return nil
- }
- // First handle the special case when no local identities have been provided
- // by the application. In this case, an AuthenticationMechanism with no local
- // identity will be sent.
- if len(h.localIdentities) == 0 {
- token, err := h.tokenManager.DefaultToken()
- if err != nil {
- grpclog.Infof("unable to get token for empty local identity: %v", err)
- return nil
- }
- return []*s2apb.AuthenticationMechanism{
- {
- MechanismOneof: &s2apb.AuthenticationMechanism_Token{
- Token: token,
- },
- },
- }
- }
-
- // Next, handle the case where the application (or the S2A) has provided
- // one or more local identities.
- var authMechanisms []*s2apb.AuthenticationMechanism
- for _, localIdentity := range h.localIdentities {
- token, err := h.tokenManager.Token(localIdentity)
- if err != nil {
- grpclog.Infof("unable to get token for local identity %v: %v", localIdentity, err)
- continue
- }
-
- authMechanism := &s2apb.AuthenticationMechanism{
- Identity: localIdentity,
- MechanismOneof: &s2apb.AuthenticationMechanism_Token{
- Token: token,
- },
- }
- authMechanisms = append(authMechanisms, authMechanism)
- }
- return authMechanisms
-}
diff --git a/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go b/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go
deleted file mode 100644
index ed449653..00000000
--- a/vendor/github.com/google/s2a-go/internal/handshaker/service/service.go
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- *
- * Copyright 2021 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package service is a utility for calling the S2A handshaker service.
-package service
-
-import (
- "context"
- "sync"
-
- grpc "google.golang.org/grpc"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/credentials/insecure"
-)
-
-var (
- // mu guards hsConnMap and hsDialer.
- mu sync.Mutex
- // hsConnMap represents a mapping from an S2A handshaker service address
- // to a corresponding connection to an S2A handshaker service instance.
- hsConnMap = make(map[string]*grpc.ClientConn)
- // hsDialer will be reassigned in tests.
- hsDialer = grpc.DialContext
-)
-
-// Dial dials the S2A handshaker service. If a connection has already been
-// established, this function returns it. Otherwise, a new connection is
-// created.
-func Dial(ctx context.Context, handshakerServiceAddress string, transportCreds credentials.TransportCredentials) (*grpc.ClientConn, error) {
- mu.Lock()
- defer mu.Unlock()
-
- hsConn, ok := hsConnMap[handshakerServiceAddress]
- if !ok {
- // Create a new connection to the S2A handshaker service. Note that
- // this connection stays open until the application is closed.
- var grpcOpts []grpc.DialOption
- if transportCreds != nil {
- grpcOpts = append(grpcOpts, grpc.WithTransportCredentials(transportCreds))
- } else {
- grpcOpts = append(grpcOpts, grpc.WithTransportCredentials(insecure.NewCredentials()))
- }
- var err error
- hsConn, err = hsDialer(ctx, handshakerServiceAddress, grpcOpts...)
- if err != nil {
- return nil, err
- }
- hsConnMap[handshakerServiceAddress] = hsConn
- }
- return hsConn, nil
-}
diff --git a/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go b/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go
deleted file mode 100644
index 16278a1d..00000000
--- a/vendor/github.com/google/s2a-go/internal/proto/common_go_proto/common.pb.go
+++ /dev/null
@@ -1,389 +0,0 @@
-// Copyright 2021 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.30.0
-// protoc v3.21.12
-// source: internal/proto/common/common.proto
-
-package common_go_proto
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// The ciphersuites supported by S2A. The name determines the confidentiality,
-// and authentication ciphers as well as the hash algorithm used for PRF in
-// TLS 1.2 or HKDF in TLS 1.3. Thus, the components of the name are:
-// - AEAD -- for encryption and authentication, e.g., AES_128_GCM.
-// - Hash algorithm -- used in PRF or HKDF, e.g., SHA256.
-type Ciphersuite int32
-
-const (
- Ciphersuite_AES_128_GCM_SHA256 Ciphersuite = 0
- Ciphersuite_AES_256_GCM_SHA384 Ciphersuite = 1
- Ciphersuite_CHACHA20_POLY1305_SHA256 Ciphersuite = 2
-)
-
-// Enum value maps for Ciphersuite.
-var (
- Ciphersuite_name = map[int32]string{
- 0: "AES_128_GCM_SHA256",
- 1: "AES_256_GCM_SHA384",
- 2: "CHACHA20_POLY1305_SHA256",
- }
- Ciphersuite_value = map[string]int32{
- "AES_128_GCM_SHA256": 0,
- "AES_256_GCM_SHA384": 1,
- "CHACHA20_POLY1305_SHA256": 2,
- }
-)
-
-func (x Ciphersuite) Enum() *Ciphersuite {
- p := new(Ciphersuite)
- *p = x
- return p
-}
-
-func (x Ciphersuite) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (Ciphersuite) Descriptor() protoreflect.EnumDescriptor {
- return file_internal_proto_common_common_proto_enumTypes[0].Descriptor()
-}
-
-func (Ciphersuite) Type() protoreflect.EnumType {
- return &file_internal_proto_common_common_proto_enumTypes[0]
-}
-
-func (x Ciphersuite) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use Ciphersuite.Descriptor instead.
-func (Ciphersuite) EnumDescriptor() ([]byte, []int) {
- return file_internal_proto_common_common_proto_rawDescGZIP(), []int{0}
-}
-
-// The TLS versions supported by S2A's handshaker module.
-type TLSVersion int32
-
-const (
- TLSVersion_TLS1_2 TLSVersion = 0
- TLSVersion_TLS1_3 TLSVersion = 1
-)
-
-// Enum value maps for TLSVersion.
-var (
- TLSVersion_name = map[int32]string{
- 0: "TLS1_2",
- 1: "TLS1_3",
- }
- TLSVersion_value = map[string]int32{
- "TLS1_2": 0,
- "TLS1_3": 1,
- }
-)
-
-func (x TLSVersion) Enum() *TLSVersion {
- p := new(TLSVersion)
- *p = x
- return p
-}
-
-func (x TLSVersion) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (TLSVersion) Descriptor() protoreflect.EnumDescriptor {
- return file_internal_proto_common_common_proto_enumTypes[1].Descriptor()
-}
-
-func (TLSVersion) Type() protoreflect.EnumType {
- return &file_internal_proto_common_common_proto_enumTypes[1]
-}
-
-func (x TLSVersion) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use TLSVersion.Descriptor instead.
-func (TLSVersion) EnumDescriptor() ([]byte, []int) {
- return file_internal_proto_common_common_proto_rawDescGZIP(), []int{1}
-}
-
-type Identity struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to IdentityOneof:
- //
- // *Identity_SpiffeId
- // *Identity_Hostname
- // *Identity_Uid
- // *Identity_MdbUsername
- // *Identity_GaiaId
- IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"`
- // Additional identity-specific attributes.
- Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
-}
-
-func (x *Identity) Reset() {
- *x = Identity{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_common_common_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Identity) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Identity) ProtoMessage() {}
-
-func (x *Identity) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_common_common_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Identity.ProtoReflect.Descriptor instead.
-func (*Identity) Descriptor() ([]byte, []int) {
- return file_internal_proto_common_common_proto_rawDescGZIP(), []int{0}
-}
-
-func (m *Identity) GetIdentityOneof() isIdentity_IdentityOneof {
- if m != nil {
- return m.IdentityOneof
- }
- return nil
-}
-
-func (x *Identity) GetSpiffeId() string {
- if x, ok := x.GetIdentityOneof().(*Identity_SpiffeId); ok {
- return x.SpiffeId
- }
- return ""
-}
-
-func (x *Identity) GetHostname() string {
- if x, ok := x.GetIdentityOneof().(*Identity_Hostname); ok {
- return x.Hostname
- }
- return ""
-}
-
-func (x *Identity) GetUid() string {
- if x, ok := x.GetIdentityOneof().(*Identity_Uid); ok {
- return x.Uid
- }
- return ""
-}
-
-func (x *Identity) GetMdbUsername() string {
- if x, ok := x.GetIdentityOneof().(*Identity_MdbUsername); ok {
- return x.MdbUsername
- }
- return ""
-}
-
-func (x *Identity) GetGaiaId() string {
- if x, ok := x.GetIdentityOneof().(*Identity_GaiaId); ok {
- return x.GaiaId
- }
- return ""
-}
-
-func (x *Identity) GetAttributes() map[string]string {
- if x != nil {
- return x.Attributes
- }
- return nil
-}
-
-type isIdentity_IdentityOneof interface {
- isIdentity_IdentityOneof()
-}
-
-type Identity_SpiffeId struct {
- // The SPIFFE ID of a connection endpoint.
- SpiffeId string `protobuf:"bytes,1,opt,name=spiffe_id,json=spiffeId,proto3,oneof"`
-}
-
-type Identity_Hostname struct {
- // The hostname of a connection endpoint.
- Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3,oneof"`
-}
-
-type Identity_Uid struct {
- // The UID of a connection endpoint.
- Uid string `protobuf:"bytes,4,opt,name=uid,proto3,oneof"`
-}
-
-type Identity_MdbUsername struct {
- // The MDB username of a connection endpoint.
- MdbUsername string `protobuf:"bytes,5,opt,name=mdb_username,json=mdbUsername,proto3,oneof"`
-}
-
-type Identity_GaiaId struct {
- // The Gaia ID of a connection endpoint.
- GaiaId string `protobuf:"bytes,6,opt,name=gaia_id,json=gaiaId,proto3,oneof"`
-}
-
-func (*Identity_SpiffeId) isIdentity_IdentityOneof() {}
-
-func (*Identity_Hostname) isIdentity_IdentityOneof() {}
-
-func (*Identity_Uid) isIdentity_IdentityOneof() {}
-
-func (*Identity_MdbUsername) isIdentity_IdentityOneof() {}
-
-func (*Identity_GaiaId) isIdentity_IdentityOneof() {}
-
-var File_internal_proto_common_common_proto protoreflect.FileDescriptor
-
-var file_internal_proto_common_common_proto_rawDesc = []byte{
- 0x0a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
- 0xb1, 0x02, 0x0a, 0x08, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x09,
- 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48,
- 0x00, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x08, 0x68,
- 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52,
- 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x75, 0x69, 0x64,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x23, 0x0a,
- 0x0c, 0x6d, 0x64, 0x62, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x6d, 0x64, 0x62, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61,
- 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x07, 0x67, 0x61, 0x69, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20,
- 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x67, 0x61, 0x69, 0x61, 0x49, 0x64, 0x12, 0x43, 0x0a,
- 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64,
- 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65,
- 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
- 0x65, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
- 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
- 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x6f, 0x6e,
- 0x65, 0x6f, 0x66, 0x2a, 0x5b, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69,
- 0x74, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43,
- 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x45,
- 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34,
- 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x41, 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50,
- 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x02,
- 0x2a, 0x24, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0a,
- 0x0a, 0x06, 0x54, 0x4c, 0x53, 0x31, 0x5f, 0x32, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x54, 0x4c,
- 0x53, 0x31, 0x5f, 0x33, 0x10, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
- 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f,
- 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63,
- 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_internal_proto_common_common_proto_rawDescOnce sync.Once
- file_internal_proto_common_common_proto_rawDescData = file_internal_proto_common_common_proto_rawDesc
-)
-
-func file_internal_proto_common_common_proto_rawDescGZIP() []byte {
- file_internal_proto_common_common_proto_rawDescOnce.Do(func() {
- file_internal_proto_common_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_common_common_proto_rawDescData)
- })
- return file_internal_proto_common_common_proto_rawDescData
-}
-
-var file_internal_proto_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
-var file_internal_proto_common_common_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
-var file_internal_proto_common_common_proto_goTypes = []interface{}{
- (Ciphersuite)(0), // 0: s2a.proto.Ciphersuite
- (TLSVersion)(0), // 1: s2a.proto.TLSVersion
- (*Identity)(nil), // 2: s2a.proto.Identity
- nil, // 3: s2a.proto.Identity.AttributesEntry
-}
-var file_internal_proto_common_common_proto_depIdxs = []int32{
- 3, // 0: s2a.proto.Identity.attributes:type_name -> s2a.proto.Identity.AttributesEntry
- 1, // [1:1] is the sub-list for method output_type
- 1, // [1:1] is the sub-list for method input_type
- 1, // [1:1] is the sub-list for extension type_name
- 1, // [1:1] is the sub-list for extension extendee
- 0, // [0:1] is the sub-list for field type_name
-}
-
-func init() { file_internal_proto_common_common_proto_init() }
-func file_internal_proto_common_common_proto_init() {
- if File_internal_proto_common_common_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_internal_proto_common_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Identity); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_internal_proto_common_common_proto_msgTypes[0].OneofWrappers = []interface{}{
- (*Identity_SpiffeId)(nil),
- (*Identity_Hostname)(nil),
- (*Identity_Uid)(nil),
- (*Identity_MdbUsername)(nil),
- (*Identity_GaiaId)(nil),
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_internal_proto_common_common_proto_rawDesc,
- NumEnums: 2,
- NumMessages: 2,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_internal_proto_common_common_proto_goTypes,
- DependencyIndexes: file_internal_proto_common_common_proto_depIdxs,
- EnumInfos: file_internal_proto_common_common_proto_enumTypes,
- MessageInfos: file_internal_proto_common_common_proto_msgTypes,
- }.Build()
- File_internal_proto_common_common_proto = out.File
- file_internal_proto_common_common_proto_rawDesc = nil
- file_internal_proto_common_common_proto_goTypes = nil
- file_internal_proto_common_common_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go
deleted file mode 100644
index f4f763ae..00000000
--- a/vendor/github.com/google/s2a-go/internal/proto/s2a_context_go_proto/s2a_context.pb.go
+++ /dev/null
@@ -1,267 +0,0 @@
-// Copyright 2021 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.30.0
-// protoc v3.21.12
-// source: internal/proto/s2a_context/s2a_context.proto
-
-package s2a_context_go_proto
-
-import (
- common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-type S2AContext struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The application protocol negotiated for this connection, e.g., 'grpc'.
- ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"`
- // The TLS version number that the S2A's handshaker module used to set up the
- // session.
- TlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=tls_version,json=tlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"tls_version,omitempty"`
- // The TLS ciphersuite negotiated by the S2A's handshaker module.
- Ciphersuite common_go_proto.Ciphersuite `protobuf:"varint,3,opt,name=ciphersuite,proto3,enum=s2a.proto.Ciphersuite" json:"ciphersuite,omitempty"`
- // The authenticated identity of the peer.
- PeerIdentity *common_go_proto.Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"`
- // The local identity used during session setup. This could be:
- // - The local identity that the client specifies in ClientSessionStartReq.
- // - One of the local identities that the server specifies in
- // ServerSessionStartReq.
- // - If neither client or server specifies local identities, the S2A picks the
- // default one. In this case, this field will contain that identity.
- LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"`
- // The SHA256 hash of the peer certificate used in the handshake.
- PeerCertFingerprint []byte `protobuf:"bytes,6,opt,name=peer_cert_fingerprint,json=peerCertFingerprint,proto3" json:"peer_cert_fingerprint,omitempty"`
- // The SHA256 hash of the local certificate used in the handshake.
- LocalCertFingerprint []byte `protobuf:"bytes,7,opt,name=local_cert_fingerprint,json=localCertFingerprint,proto3" json:"local_cert_fingerprint,omitempty"`
- // Set to true if a cached session was reused to resume the handshake.
- IsHandshakeResumed bool `protobuf:"varint,8,opt,name=is_handshake_resumed,json=isHandshakeResumed,proto3" json:"is_handshake_resumed,omitempty"`
-}
-
-func (x *S2AContext) Reset() {
- *x = S2AContext{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *S2AContext) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*S2AContext) ProtoMessage() {}
-
-func (x *S2AContext) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use S2AContext.ProtoReflect.Descriptor instead.
-func (*S2AContext) Descriptor() ([]byte, []int) {
- return file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *S2AContext) GetApplicationProtocol() string {
- if x != nil {
- return x.ApplicationProtocol
- }
- return ""
-}
-
-func (x *S2AContext) GetTlsVersion() common_go_proto.TLSVersion {
- if x != nil {
- return x.TlsVersion
- }
- return common_go_proto.TLSVersion(0)
-}
-
-func (x *S2AContext) GetCiphersuite() common_go_proto.Ciphersuite {
- if x != nil {
- return x.Ciphersuite
- }
- return common_go_proto.Ciphersuite(0)
-}
-
-func (x *S2AContext) GetPeerIdentity() *common_go_proto.Identity {
- if x != nil {
- return x.PeerIdentity
- }
- return nil
-}
-
-func (x *S2AContext) GetLocalIdentity() *common_go_proto.Identity {
- if x != nil {
- return x.LocalIdentity
- }
- return nil
-}
-
-func (x *S2AContext) GetPeerCertFingerprint() []byte {
- if x != nil {
- return x.PeerCertFingerprint
- }
- return nil
-}
-
-func (x *S2AContext) GetLocalCertFingerprint() []byte {
- if x != nil {
- return x.LocalCertFingerprint
- }
- return nil
-}
-
-func (x *S2AContext) GetIsHandshakeResumed() bool {
- if x != nil {
- return x.IsHandshakeResumed
- }
- return false
-}
-
-var File_internal_proto_s2a_context_s2a_context_proto protoreflect.FileDescriptor
-
-var file_internal_proto_s2a_context_s2a_context_proto_rawDesc = []byte{
- 0x0a, 0x2c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61,
- 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09,
- 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72,
- 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
- 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc3, 0x03,
- 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x31, 0x0a, 0x14,
- 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12,
- 0x36, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x74, 0x6c, 0x73,
- 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x0a, 0x0b, 0x63, 0x69, 0x70, 0x68, 0x65,
- 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73,
- 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73,
- 0x75, 0x69, 0x74, 0x65, 0x52, 0x0b, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74,
- 0x65, 0x12, 0x38, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69,
- 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70,
- 0x65, 0x65, 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x0e, 0x6c,
- 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
- 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49,
- 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x65, 0x65, 0x72, 0x5f,
- 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74,
- 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74,
- 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x6c,
- 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72,
- 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x6f, 0x63,
- 0x61, 0x6c, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e,
- 0x74, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b,
- 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52,
- 0x12, 0x69, 0x73, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x75,
- 0x6d, 0x65, 0x64, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
- 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74,
- 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x32, 0x61, 0x5f,
- 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_internal_proto_s2a_context_s2a_context_proto_rawDescOnce sync.Once
- file_internal_proto_s2a_context_s2a_context_proto_rawDescData = file_internal_proto_s2a_context_s2a_context_proto_rawDesc
-)
-
-func file_internal_proto_s2a_context_s2a_context_proto_rawDescGZIP() []byte {
- file_internal_proto_s2a_context_s2a_context_proto_rawDescOnce.Do(func() {
- file_internal_proto_s2a_context_s2a_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_s2a_context_s2a_context_proto_rawDescData)
- })
- return file_internal_proto_s2a_context_s2a_context_proto_rawDescData
-}
-
-var file_internal_proto_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_internal_proto_s2a_context_s2a_context_proto_goTypes = []interface{}{
- (*S2AContext)(nil), // 0: s2a.proto.S2AContext
- (common_go_proto.TLSVersion)(0), // 1: s2a.proto.TLSVersion
- (common_go_proto.Ciphersuite)(0), // 2: s2a.proto.Ciphersuite
- (*common_go_proto.Identity)(nil), // 3: s2a.proto.Identity
-}
-var file_internal_proto_s2a_context_s2a_context_proto_depIdxs = []int32{
- 1, // 0: s2a.proto.S2AContext.tls_version:type_name -> s2a.proto.TLSVersion
- 2, // 1: s2a.proto.S2AContext.ciphersuite:type_name -> s2a.proto.Ciphersuite
- 3, // 2: s2a.proto.S2AContext.peer_identity:type_name -> s2a.proto.Identity
- 3, // 3: s2a.proto.S2AContext.local_identity:type_name -> s2a.proto.Identity
- 4, // [4:4] is the sub-list for method output_type
- 4, // [4:4] is the sub-list for method input_type
- 4, // [4:4] is the sub-list for extension type_name
- 4, // [4:4] is the sub-list for extension extendee
- 0, // [0:4] is the sub-list for field type_name
-}
-
-func init() { file_internal_proto_s2a_context_s2a_context_proto_init() }
-func file_internal_proto_s2a_context_s2a_context_proto_init() {
- if File_internal_proto_s2a_context_s2a_context_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_internal_proto_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*S2AContext); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_internal_proto_s2a_context_s2a_context_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 1,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_internal_proto_s2a_context_s2a_context_proto_goTypes,
- DependencyIndexes: file_internal_proto_s2a_context_s2a_context_proto_depIdxs,
- MessageInfos: file_internal_proto_s2a_context_s2a_context_proto_msgTypes,
- }.Build()
- File_internal_proto_s2a_context_s2a_context_proto = out.File
- file_internal_proto_s2a_context_s2a_context_proto_rawDesc = nil
- file_internal_proto_s2a_context_s2a_context_proto_goTypes = nil
- file_internal_proto_s2a_context_s2a_context_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go
deleted file mode 100644
index 0a86ebee..00000000
--- a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a.pb.go
+++ /dev/null
@@ -1,1377 +0,0 @@
-// Copyright 2021 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.30.0
-// protoc v3.21.12
-// source: internal/proto/s2a/s2a.proto
-
-package s2a_go_proto
-
-import (
- common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-type AuthenticationMechanism struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // (Optional) Application may specify an identity associated to an
- // authentication mechanism. Otherwise, S2A assumes that the authentication
- // mechanism is associated with the default identity. If the default identity
- // cannot be determined, session setup fails.
- Identity *common_go_proto.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"`
- // Types that are assignable to MechanismOneof:
- //
- // *AuthenticationMechanism_Token
- MechanismOneof isAuthenticationMechanism_MechanismOneof `protobuf_oneof:"mechanism_oneof"`
-}
-
-func (x *AuthenticationMechanism) Reset() {
- *x = AuthenticationMechanism{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_s2a_s2a_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AuthenticationMechanism) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AuthenticationMechanism) ProtoMessage() {}
-
-func (x *AuthenticationMechanism) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_s2a_s2a_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AuthenticationMechanism.ProtoReflect.Descriptor instead.
-func (*AuthenticationMechanism) Descriptor() ([]byte, []int) {
- return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *AuthenticationMechanism) GetIdentity() *common_go_proto.Identity {
- if x != nil {
- return x.Identity
- }
- return nil
-}
-
-func (m *AuthenticationMechanism) GetMechanismOneof() isAuthenticationMechanism_MechanismOneof {
- if m != nil {
- return m.MechanismOneof
- }
- return nil
-}
-
-func (x *AuthenticationMechanism) GetToken() string {
- if x, ok := x.GetMechanismOneof().(*AuthenticationMechanism_Token); ok {
- return x.Token
- }
- return ""
-}
-
-type isAuthenticationMechanism_MechanismOneof interface {
- isAuthenticationMechanism_MechanismOneof()
-}
-
-type AuthenticationMechanism_Token struct {
- // A token that the application uses to authenticate itself to the S2A.
- Token string `protobuf:"bytes,2,opt,name=token,proto3,oneof"`
-}
-
-func (*AuthenticationMechanism_Token) isAuthenticationMechanism_MechanismOneof() {}
-
-type ClientSessionStartReq struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The application protocols supported by the client, e.g., "grpc".
- ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"`
- // (Optional) The minimum TLS version number that the S2A's handshaker module
- // will use to set up the session. If this field is not provided, S2A will use
- // the minimum version it supports.
- MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"min_tls_version,omitempty"`
- // (Optional) The maximum TLS version number that the S2A's handshaker module
- // will use to set up the session. If this field is not provided, S2A will use
- // the maximum version it supports.
- MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"max_tls_version,omitempty"`
- // The TLS ciphersuites that the client is willing to support.
- TlsCiphersuites []common_go_proto.Ciphersuite `protobuf:"varint,4,rep,packed,name=tls_ciphersuites,json=tlsCiphersuites,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuites,omitempty"`
- // (Optional) Describes which server identities are acceptable by the client.
- // If target identities are provided and none of them matches the peer
- // identity of the server, session setup fails.
- TargetIdentities []*common_go_proto.Identity `protobuf:"bytes,5,rep,name=target_identities,json=targetIdentities,proto3" json:"target_identities,omitempty"`
- // (Optional) Application may specify a local identity. Otherwise, S2A chooses
- // the default local identity. If the default identity cannot be determined,
- // session setup fails.
- LocalIdentity *common_go_proto.Identity `protobuf:"bytes,6,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"`
- // The target name that is used by S2A to configure SNI in the TLS handshake.
- // It is also used to perform server authorization check if avaiable. This
- // check is intended to verify that the peer authenticated identity is
- // authorized to run a service with the target name.
- // This field MUST only contain the host portion of the server address. It
- // MUST not contain the scheme or the port number. For example, if the server
- // address is dns://www.example.com:443, the value of this field should be
- // set to www.example.com.
- TargetName string `protobuf:"bytes,7,opt,name=target_name,json=targetName,proto3" json:"target_name,omitempty"`
-}
-
-func (x *ClientSessionStartReq) Reset() {
- *x = ClientSessionStartReq{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_s2a_s2a_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ClientSessionStartReq) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ClientSessionStartReq) ProtoMessage() {}
-
-func (x *ClientSessionStartReq) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_s2a_s2a_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ClientSessionStartReq.ProtoReflect.Descriptor instead.
-func (*ClientSessionStartReq) Descriptor() ([]byte, []int) {
- return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *ClientSessionStartReq) GetApplicationProtocols() []string {
- if x != nil {
- return x.ApplicationProtocols
- }
- return nil
-}
-
-func (x *ClientSessionStartReq) GetMinTlsVersion() common_go_proto.TLSVersion {
- if x != nil {
- return x.MinTlsVersion
- }
- return common_go_proto.TLSVersion(0)
-}
-
-func (x *ClientSessionStartReq) GetMaxTlsVersion() common_go_proto.TLSVersion {
- if x != nil {
- return x.MaxTlsVersion
- }
- return common_go_proto.TLSVersion(0)
-}
-
-func (x *ClientSessionStartReq) GetTlsCiphersuites() []common_go_proto.Ciphersuite {
- if x != nil {
- return x.TlsCiphersuites
- }
- return nil
-}
-
-func (x *ClientSessionStartReq) GetTargetIdentities() []*common_go_proto.Identity {
- if x != nil {
- return x.TargetIdentities
- }
- return nil
-}
-
-func (x *ClientSessionStartReq) GetLocalIdentity() *common_go_proto.Identity {
- if x != nil {
- return x.LocalIdentity
- }
- return nil
-}
-
-func (x *ClientSessionStartReq) GetTargetName() string {
- if x != nil {
- return x.TargetName
- }
- return ""
-}
-
-type ServerSessionStartReq struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The application protocols supported by the server, e.g., "grpc".
- ApplicationProtocols []string `protobuf:"bytes,1,rep,name=application_protocols,json=applicationProtocols,proto3" json:"application_protocols,omitempty"`
- // (Optional) The minimum TLS version number that the S2A's handshaker module
- // will use to set up the session. If this field is not provided, S2A will use
- // the minimum version it supports.
- MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"min_tls_version,omitempty"`
- // (Optional) The maximum TLS version number that the S2A's handshaker module
- // will use to set up the session. If this field is not provided, S2A will use
- // the maximum version it supports.
- MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"max_tls_version,omitempty"`
- // The TLS ciphersuites that the server is willing to support.
- TlsCiphersuites []common_go_proto.Ciphersuite `protobuf:"varint,4,rep,packed,name=tls_ciphersuites,json=tlsCiphersuites,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuites,omitempty"`
- // (Optional) A list of local identities supported by the server, if
- // specified. Otherwise, S2A chooses the default local identity. If the
- // default identity cannot be determined, session setup fails.
- LocalIdentities []*common_go_proto.Identity `protobuf:"bytes,5,rep,name=local_identities,json=localIdentities,proto3" json:"local_identities,omitempty"`
- // The byte representation of the first handshake message received from the
- // client peer. It is possible that this first message is split into multiple
- // chunks. In this case, the first chunk is sent using this field and the
- // following chunks are sent using the in_bytes field of SessionNextReq
- // Specifically, if the client peer is using S2A, this field contains the
- // bytes in the out_frames field of SessionResp message that the client peer
- // received from its S2A after initiating the handshake.
- InBytes []byte `protobuf:"bytes,6,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"`
-}
-
-func (x *ServerSessionStartReq) Reset() {
- *x = ServerSessionStartReq{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_s2a_s2a_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ServerSessionStartReq) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ServerSessionStartReq) ProtoMessage() {}
-
-func (x *ServerSessionStartReq) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_s2a_s2a_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ServerSessionStartReq.ProtoReflect.Descriptor instead.
-func (*ServerSessionStartReq) Descriptor() ([]byte, []int) {
- return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *ServerSessionStartReq) GetApplicationProtocols() []string {
- if x != nil {
- return x.ApplicationProtocols
- }
- return nil
-}
-
-func (x *ServerSessionStartReq) GetMinTlsVersion() common_go_proto.TLSVersion {
- if x != nil {
- return x.MinTlsVersion
- }
- return common_go_proto.TLSVersion(0)
-}
-
-func (x *ServerSessionStartReq) GetMaxTlsVersion() common_go_proto.TLSVersion {
- if x != nil {
- return x.MaxTlsVersion
- }
- return common_go_proto.TLSVersion(0)
-}
-
-func (x *ServerSessionStartReq) GetTlsCiphersuites() []common_go_proto.Ciphersuite {
- if x != nil {
- return x.TlsCiphersuites
- }
- return nil
-}
-
-func (x *ServerSessionStartReq) GetLocalIdentities() []*common_go_proto.Identity {
- if x != nil {
- return x.LocalIdentities
- }
- return nil
-}
-
-func (x *ServerSessionStartReq) GetInBytes() []byte {
- if x != nil {
- return x.InBytes
- }
- return nil
-}
-
-type SessionNextReq struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The byte representation of session setup, i.e., handshake messages.
- // Specifically:
- // - All handshake messages sent from the server to the client.
- // - All, except for the first, handshake messages sent from the client to
- // the server. Note that the first message is communicated to S2A using the
- // in_bytes field of ServerSessionStartReq.
- //
- // If the peer is using S2A, this field contains the bytes in the out_frames
- // field of SessionResp message that the peer received from its S2A.
- InBytes []byte `protobuf:"bytes,1,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"`
-}
-
-func (x *SessionNextReq) Reset() {
- *x = SessionNextReq{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_s2a_s2a_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SessionNextReq) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SessionNextReq) ProtoMessage() {}
-
-func (x *SessionNextReq) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_s2a_s2a_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SessionNextReq.ProtoReflect.Descriptor instead.
-func (*SessionNextReq) Descriptor() ([]byte, []int) {
- return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *SessionNextReq) GetInBytes() []byte {
- if x != nil {
- return x.InBytes
- }
- return nil
-}
-
-type ResumptionTicketReq struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The byte representation of a NewSessionTicket message received from the
- // server.
- InBytes [][]byte `protobuf:"bytes,1,rep,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"`
- // A connection identifier that was created and sent by S2A at the end of a
- // handshake.
- ConnectionId uint64 `protobuf:"varint,2,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"`
- // The local identity that was used by S2A during session setup and included
- // in |SessionResult|.
- LocalIdentity *common_go_proto.Identity `protobuf:"bytes,3,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"`
-}
-
-func (x *ResumptionTicketReq) Reset() {
- *x = ResumptionTicketReq{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_s2a_s2a_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ResumptionTicketReq) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ResumptionTicketReq) ProtoMessage() {}
-
-func (x *ResumptionTicketReq) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_s2a_s2a_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ResumptionTicketReq.ProtoReflect.Descriptor instead.
-func (*ResumptionTicketReq) Descriptor() ([]byte, []int) {
- return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *ResumptionTicketReq) GetInBytes() [][]byte {
- if x != nil {
- return x.InBytes
- }
- return nil
-}
-
-func (x *ResumptionTicketReq) GetConnectionId() uint64 {
- if x != nil {
- return x.ConnectionId
- }
- return 0
-}
-
-func (x *ResumptionTicketReq) GetLocalIdentity() *common_go_proto.Identity {
- if x != nil {
- return x.LocalIdentity
- }
- return nil
-}
-
-type SessionReq struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to ReqOneof:
- //
- // *SessionReq_ClientStart
- // *SessionReq_ServerStart
- // *SessionReq_Next
- // *SessionReq_ResumptionTicket
- ReqOneof isSessionReq_ReqOneof `protobuf_oneof:"req_oneof"`
- // (Optional) The authentication mechanisms that the client wishes to use to
- // authenticate to the S2A, ordered by preference. The S2A will always use the
- // first authentication mechanism that appears in the list and is supported by
- // the S2A.
- AuthMechanisms []*AuthenticationMechanism `protobuf:"bytes,5,rep,name=auth_mechanisms,json=authMechanisms,proto3" json:"auth_mechanisms,omitempty"`
-}
-
-func (x *SessionReq) Reset() {
- *x = SessionReq{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_s2a_s2a_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SessionReq) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SessionReq) ProtoMessage() {}
-
-func (x *SessionReq) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_s2a_s2a_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SessionReq.ProtoReflect.Descriptor instead.
-func (*SessionReq) Descriptor() ([]byte, []int) {
- return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{5}
-}
-
-func (m *SessionReq) GetReqOneof() isSessionReq_ReqOneof {
- if m != nil {
- return m.ReqOneof
- }
- return nil
-}
-
-func (x *SessionReq) GetClientStart() *ClientSessionStartReq {
- if x, ok := x.GetReqOneof().(*SessionReq_ClientStart); ok {
- return x.ClientStart
- }
- return nil
-}
-
-func (x *SessionReq) GetServerStart() *ServerSessionStartReq {
- if x, ok := x.GetReqOneof().(*SessionReq_ServerStart); ok {
- return x.ServerStart
- }
- return nil
-}
-
-func (x *SessionReq) GetNext() *SessionNextReq {
- if x, ok := x.GetReqOneof().(*SessionReq_Next); ok {
- return x.Next
- }
- return nil
-}
-
-func (x *SessionReq) GetResumptionTicket() *ResumptionTicketReq {
- if x, ok := x.GetReqOneof().(*SessionReq_ResumptionTicket); ok {
- return x.ResumptionTicket
- }
- return nil
-}
-
-func (x *SessionReq) GetAuthMechanisms() []*AuthenticationMechanism {
- if x != nil {
- return x.AuthMechanisms
- }
- return nil
-}
-
-type isSessionReq_ReqOneof interface {
- isSessionReq_ReqOneof()
-}
-
-type SessionReq_ClientStart struct {
- // The client session setup request message.
- ClientStart *ClientSessionStartReq `protobuf:"bytes,1,opt,name=client_start,json=clientStart,proto3,oneof"`
-}
-
-type SessionReq_ServerStart struct {
- // The server session setup request message.
- ServerStart *ServerSessionStartReq `protobuf:"bytes,2,opt,name=server_start,json=serverStart,proto3,oneof"`
-}
-
-type SessionReq_Next struct {
- // The next session setup message request message.
- Next *SessionNextReq `protobuf:"bytes,3,opt,name=next,proto3,oneof"`
-}
-
-type SessionReq_ResumptionTicket struct {
- // The resumption ticket that is received from the server. This message is
- // only accepted by S2A if it is running as a client and if it is received
- // after session setup is complete. If S2A is running as a server and it
- // receives this message, the session is terminated.
- ResumptionTicket *ResumptionTicketReq `protobuf:"bytes,4,opt,name=resumption_ticket,json=resumptionTicket,proto3,oneof"`
-}
-
-func (*SessionReq_ClientStart) isSessionReq_ReqOneof() {}
-
-func (*SessionReq_ServerStart) isSessionReq_ReqOneof() {}
-
-func (*SessionReq_Next) isSessionReq_ReqOneof() {}
-
-func (*SessionReq_ResumptionTicket) isSessionReq_ReqOneof() {}
-
-type SessionState struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The TLS version number that the S2A's handshaker module used to set up the
- // session.
- TlsVersion common_go_proto.TLSVersion `protobuf:"varint,1,opt,name=tls_version,json=tlsVersion,proto3,enum=s2a.proto.TLSVersion" json:"tls_version,omitempty"`
- // The TLS ciphersuite negotiated by the S2A's handshaker module.
- TlsCiphersuite common_go_proto.Ciphersuite `protobuf:"varint,2,opt,name=tls_ciphersuite,json=tlsCiphersuite,proto3,enum=s2a.proto.Ciphersuite" json:"tls_ciphersuite,omitempty"`
- // The sequence number of the next, incoming, TLS record.
- InSequence uint64 `protobuf:"varint,3,opt,name=in_sequence,json=inSequence,proto3" json:"in_sequence,omitempty"`
- // The sequence number of the next, outgoing, TLS record.
- OutSequence uint64 `protobuf:"varint,4,opt,name=out_sequence,json=outSequence,proto3" json:"out_sequence,omitempty"`
- // The key for the inbound direction.
- InKey []byte `protobuf:"bytes,5,opt,name=in_key,json=inKey,proto3" json:"in_key,omitempty"`
- // The key for the outbound direction.
- OutKey []byte `protobuf:"bytes,6,opt,name=out_key,json=outKey,proto3" json:"out_key,omitempty"`
- // The constant part of the record nonce for the outbound direction.
- InFixedNonce []byte `protobuf:"bytes,7,opt,name=in_fixed_nonce,json=inFixedNonce,proto3" json:"in_fixed_nonce,omitempty"`
- // The constant part of the record nonce for the inbound direction.
- OutFixedNonce []byte `protobuf:"bytes,8,opt,name=out_fixed_nonce,json=outFixedNonce,proto3" json:"out_fixed_nonce,omitempty"`
- // A connection identifier that can be provided to S2A to perform operations
- // related to this connection. This identifier will be stored by the record
- // protocol, and included in the |ResumptionTicketReq| message that is later
- // sent back to S2A. This field is set only for client-side connections.
- ConnectionId uint64 `protobuf:"varint,9,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"`
- // Set to true if a cached session was reused to do an abbreviated handshake.
- IsHandshakeResumed bool `protobuf:"varint,10,opt,name=is_handshake_resumed,json=isHandshakeResumed,proto3" json:"is_handshake_resumed,omitempty"`
-}
-
-func (x *SessionState) Reset() {
- *x = SessionState{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_s2a_s2a_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SessionState) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SessionState) ProtoMessage() {}
-
-func (x *SessionState) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_s2a_s2a_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SessionState.ProtoReflect.Descriptor instead.
-func (*SessionState) Descriptor() ([]byte, []int) {
- return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *SessionState) GetTlsVersion() common_go_proto.TLSVersion {
- if x != nil {
- return x.TlsVersion
- }
- return common_go_proto.TLSVersion(0)
-}
-
-func (x *SessionState) GetTlsCiphersuite() common_go_proto.Ciphersuite {
- if x != nil {
- return x.TlsCiphersuite
- }
- return common_go_proto.Ciphersuite(0)
-}
-
-func (x *SessionState) GetInSequence() uint64 {
- if x != nil {
- return x.InSequence
- }
- return 0
-}
-
-func (x *SessionState) GetOutSequence() uint64 {
- if x != nil {
- return x.OutSequence
- }
- return 0
-}
-
-func (x *SessionState) GetInKey() []byte {
- if x != nil {
- return x.InKey
- }
- return nil
-}
-
-func (x *SessionState) GetOutKey() []byte {
- if x != nil {
- return x.OutKey
- }
- return nil
-}
-
-func (x *SessionState) GetInFixedNonce() []byte {
- if x != nil {
- return x.InFixedNonce
- }
- return nil
-}
-
-func (x *SessionState) GetOutFixedNonce() []byte {
- if x != nil {
- return x.OutFixedNonce
- }
- return nil
-}
-
-func (x *SessionState) GetConnectionId() uint64 {
- if x != nil {
- return x.ConnectionId
- }
- return 0
-}
-
-func (x *SessionState) GetIsHandshakeResumed() bool {
- if x != nil {
- return x.IsHandshakeResumed
- }
- return false
-}
-
-type SessionResult struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The application protocol negotiated for this session.
- ApplicationProtocol string `protobuf:"bytes,1,opt,name=application_protocol,json=applicationProtocol,proto3" json:"application_protocol,omitempty"`
- // The session state at the end. This state contains all cryptographic
- // material required to initialize the record protocol object.
- State *SessionState `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"`
- // The authenticated identity of the peer.
- PeerIdentity *common_go_proto.Identity `protobuf:"bytes,4,opt,name=peer_identity,json=peerIdentity,proto3" json:"peer_identity,omitempty"`
- // The local identity used during session setup. This could be:
- // - The local identity that the client specifies in ClientSessionStartReq.
- // - One of the local identities that the server specifies in
- // ServerSessionStartReq.
- // - If neither client or server specifies local identities, the S2A picks the
- // default one. In this case, this field will contain that identity.
- LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"`
- // The SHA256 hash of the local certificate used in the handshake.
- LocalCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_cert_fingerprint,json=localCertFingerprint,proto3" json:"local_cert_fingerprint,omitempty"`
- // The SHA256 hash of the peer certificate used in the handshake.
- PeerCertFingerprint []byte `protobuf:"bytes,7,opt,name=peer_cert_fingerprint,json=peerCertFingerprint,proto3" json:"peer_cert_fingerprint,omitempty"`
-}
-
-func (x *SessionResult) Reset() {
- *x = SessionResult{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_s2a_s2a_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SessionResult) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SessionResult) ProtoMessage() {}
-
-func (x *SessionResult) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_s2a_s2a_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SessionResult.ProtoReflect.Descriptor instead.
-func (*SessionResult) Descriptor() ([]byte, []int) {
- return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *SessionResult) GetApplicationProtocol() string {
- if x != nil {
- return x.ApplicationProtocol
- }
- return ""
-}
-
-func (x *SessionResult) GetState() *SessionState {
- if x != nil {
- return x.State
- }
- return nil
-}
-
-func (x *SessionResult) GetPeerIdentity() *common_go_proto.Identity {
- if x != nil {
- return x.PeerIdentity
- }
- return nil
-}
-
-func (x *SessionResult) GetLocalIdentity() *common_go_proto.Identity {
- if x != nil {
- return x.LocalIdentity
- }
- return nil
-}
-
-func (x *SessionResult) GetLocalCertFingerprint() []byte {
- if x != nil {
- return x.LocalCertFingerprint
- }
- return nil
-}
-
-func (x *SessionResult) GetPeerCertFingerprint() []byte {
- if x != nil {
- return x.PeerCertFingerprint
- }
- return nil
-}
-
-type SessionStatus struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The status code that is specific to the application and the implementation
- // of S2A, e.g., gRPC status code.
- Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
- // The status details.
- Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"`
-}
-
-func (x *SessionStatus) Reset() {
- *x = SessionStatus{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_s2a_s2a_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SessionStatus) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SessionStatus) ProtoMessage() {}
-
-func (x *SessionStatus) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_s2a_s2a_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SessionStatus.ProtoReflect.Descriptor instead.
-func (*SessionStatus) Descriptor() ([]byte, []int) {
- return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{8}
-}
-
-func (x *SessionStatus) GetCode() uint32 {
- if x != nil {
- return x.Code
- }
- return 0
-}
-
-func (x *SessionStatus) GetDetails() string {
- if x != nil {
- return x.Details
- }
- return ""
-}
-
-type SessionResp struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The local identity used during session setup. This could be:
- // - The local identity that the client specifies in ClientSessionStartReq.
- // - One of the local identities that the server specifies in
- // ServerSessionStartReq.
- // - If neither client or server specifies local identities, the S2A picks the
- // default one. In this case, this field will contain that identity.
- //
- // If the SessionResult is populated, then this must coincide with the local
- // identity specified in the SessionResult; otherwise, the handshake must
- // fail.
- LocalIdentity *common_go_proto.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"`
- // The byte representation of the frames that should be sent to the peer. May
- // be empty if nothing needs to be sent to the peer or if in_bytes in the
- // SessionReq is incomplete. All bytes in a non-empty out_frames must be sent
- // to the peer even if the session setup status is not OK as these frames may
- // contain appropriate alerts.
- OutFrames []byte `protobuf:"bytes,2,opt,name=out_frames,json=outFrames,proto3" json:"out_frames,omitempty"`
- // Number of bytes in the in_bytes field that are consumed by S2A. It is
- // possible that part of in_bytes is unrelated to the session setup process.
- BytesConsumed uint32 `protobuf:"varint,3,opt,name=bytes_consumed,json=bytesConsumed,proto3" json:"bytes_consumed,omitempty"`
- // This is set if the session is successfully set up. out_frames may
- // still be set to frames that needs to be forwarded to the peer.
- Result *SessionResult `protobuf:"bytes,4,opt,name=result,proto3" json:"result,omitempty"`
- // Status of session setup at the current stage.
- Status *SessionStatus `protobuf:"bytes,5,opt,name=status,proto3" json:"status,omitempty"`
-}
-
-func (x *SessionResp) Reset() {
- *x = SessionResp{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_s2a_s2a_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SessionResp) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SessionResp) ProtoMessage() {}
-
-func (x *SessionResp) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_s2a_s2a_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SessionResp.ProtoReflect.Descriptor instead.
-func (*SessionResp) Descriptor() ([]byte, []int) {
- return file_internal_proto_s2a_s2a_proto_rawDescGZIP(), []int{9}
-}
-
-func (x *SessionResp) GetLocalIdentity() *common_go_proto.Identity {
- if x != nil {
- return x.LocalIdentity
- }
- return nil
-}
-
-func (x *SessionResp) GetOutFrames() []byte {
- if x != nil {
- return x.OutFrames
- }
- return nil
-}
-
-func (x *SessionResp) GetBytesConsumed() uint32 {
- if x != nil {
- return x.BytesConsumed
- }
- return 0
-}
-
-func (x *SessionResp) GetResult() *SessionResult {
- if x != nil {
- return x.Result
- }
- return nil
-}
-
-func (x *SessionResp) GetStatus() *SessionStatus {
- if x != nil {
- return x.Status
- }
- return nil
-}
-
-var File_internal_proto_s2a_s2a_proto protoreflect.FileDescriptor
-
-var file_internal_proto_s2a_s2a_proto_rawDesc = []byte{
- 0x0a, 0x1c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09,
- 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72,
- 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
- 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x75, 0x0a,
- 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d,
- 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e,
- 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52,
- 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b,
- 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65,
- 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f,
- 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xac, 0x03, 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53,
- 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x12, 0x33,
- 0x0a, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61,
- 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63,
- 0x6f, 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76,
- 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73,
- 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73,
- 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32,
- 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x12, 0x41, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73,
- 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32,
- 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75,
- 0x69, 0x74, 0x65, 0x52, 0x0f, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75,
- 0x69, 0x74, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x11, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x69,
- 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e,
- 0x74, 0x69, 0x74, 0x79, 0x52, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x49, 0x64, 0x65, 0x6e,
- 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f,
- 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13,
- 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74,
- 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69,
- 0x74, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6e, 0x61, 0x6d,
- 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4e,
- 0x61, 0x6d, 0x65, 0x22, 0xe8, 0x02, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x65,
- 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x12, 0x33, 0x0a,
- 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70,
- 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f,
- 0x6c, 0x73, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32,
- 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x12, 0x3d, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72,
- 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x32, 0x61,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
- 0x12, 0x41, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75,
- 0x69, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69,
- 0x74, 0x65, 0x52, 0x0f, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69,
- 0x74, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65,
- 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e,
- 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69,
- 0x74, 0x79, 0x52, 0x0f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74,
- 0x69, 0x65, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18,
- 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x2b,
- 0x0a, 0x0e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x65, 0x78, 0x74, 0x52, 0x65, 0x71,
- 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x13,
- 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74,
- 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18,
- 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x23,
- 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f,
- 0x6e, 0x49, 0x64, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65,
- 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32,
- 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79,
- 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x22,
- 0xf4, 0x02, 0x0a, 0x0a, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45,
- 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74,
- 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
- 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x45, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f,
- 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x32,
- 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x65,
- 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52,
- 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x2f, 0x0a, 0x04,
- 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x65,
- 0x78, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x12, 0x4d, 0x0a,
- 0x11, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x63, 0x6b,
- 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54,
- 0x69, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x10, 0x72, 0x65, 0x73, 0x75,
- 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x4b, 0x0a, 0x0f,
- 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x18,
- 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x4d,
- 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71,
- 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xa0, 0x03, 0x0a, 0x0c, 0x53, 0x65, 0x73, 0x73, 0x69,
- 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x74, 0x6c, 0x73, 0x5f, 0x76,
- 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73,
- 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73,
- 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x74, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
- 0x3f, 0x0a, 0x0f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69,
- 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65,
- 0x52, 0x0e, 0x74, 0x6c, 0x73, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74, 0x65,
- 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x6e, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x69, 0x6e, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63,
- 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63,
- 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x71, 0x75,
- 0x65, 0x6e, 0x63, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x69, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05,
- 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x69, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x17, 0x0a, 0x07, 0x6f,
- 0x75, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x75,
- 0x74, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x0e, 0x69, 0x6e, 0x5f, 0x66, 0x69, 0x78, 0x65, 0x64,
- 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x69, 0x6e,
- 0x46, 0x69, 0x78, 0x65, 0x64, 0x4e, 0x6f, 0x6e, 0x63, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x6f, 0x75,
- 0x74, 0x5f, 0x66, 0x69, 0x78, 0x65, 0x64, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x18, 0x08, 0x20,
- 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x6f, 0x75, 0x74, 0x46, 0x69, 0x78, 0x65, 0x64, 0x4e, 0x6f, 0x6e,
- 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x69, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x6e, 0x65,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x68, 0x61,
- 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18,
- 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61,
- 0x6b, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x22, 0xd1, 0x02, 0x0a, 0x0d, 0x53, 0x65,
- 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61,
- 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x63, 0x6f, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x2d,
- 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e,
- 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f,
- 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x38, 0x0a,
- 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x49,
- 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c,
- 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e,
- 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74,
- 0x69, 0x74, 0x79, 0x12, 0x34, 0x0a, 0x16, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x65, 0x72,
- 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20,
- 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43, 0x65, 0x72, 0x74, 0x46, 0x69,
- 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x65, 0x65,
- 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69,
- 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65,
- 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x22, 0x3d, 0x0a,
- 0x0d, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12,
- 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f,
- 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x22, 0xf3, 0x01, 0x0a,
- 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x3a, 0x0a, 0x0e,
- 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c,
- 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f,
- 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75,
- 0x74, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73,
- 0x5f, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52,
- 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x30,
- 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18,
- 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69,
- 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
- 0x12, 0x30, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73,
- 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74,
- 0x75, 0x73, 0x32, 0x51, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
- 0x12, 0x43, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e,
- 0x12, 0x15, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73,
- 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x16, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x22,
- 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69,
- 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x32,
- 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x33,
-}
-
-var (
- file_internal_proto_s2a_s2a_proto_rawDescOnce sync.Once
- file_internal_proto_s2a_s2a_proto_rawDescData = file_internal_proto_s2a_s2a_proto_rawDesc
-)
-
-func file_internal_proto_s2a_s2a_proto_rawDescGZIP() []byte {
- file_internal_proto_s2a_s2a_proto_rawDescOnce.Do(func() {
- file_internal_proto_s2a_s2a_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_s2a_s2a_proto_rawDescData)
- })
- return file_internal_proto_s2a_s2a_proto_rawDescData
-}
-
-var file_internal_proto_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 10)
-var file_internal_proto_s2a_s2a_proto_goTypes = []interface{}{
- (*AuthenticationMechanism)(nil), // 0: s2a.proto.AuthenticationMechanism
- (*ClientSessionStartReq)(nil), // 1: s2a.proto.ClientSessionStartReq
- (*ServerSessionStartReq)(nil), // 2: s2a.proto.ServerSessionStartReq
- (*SessionNextReq)(nil), // 3: s2a.proto.SessionNextReq
- (*ResumptionTicketReq)(nil), // 4: s2a.proto.ResumptionTicketReq
- (*SessionReq)(nil), // 5: s2a.proto.SessionReq
- (*SessionState)(nil), // 6: s2a.proto.SessionState
- (*SessionResult)(nil), // 7: s2a.proto.SessionResult
- (*SessionStatus)(nil), // 8: s2a.proto.SessionStatus
- (*SessionResp)(nil), // 9: s2a.proto.SessionResp
- (*common_go_proto.Identity)(nil), // 10: s2a.proto.Identity
- (common_go_proto.TLSVersion)(0), // 11: s2a.proto.TLSVersion
- (common_go_proto.Ciphersuite)(0), // 12: s2a.proto.Ciphersuite
-}
-var file_internal_proto_s2a_s2a_proto_depIdxs = []int32{
- 10, // 0: s2a.proto.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity
- 11, // 1: s2a.proto.ClientSessionStartReq.min_tls_version:type_name -> s2a.proto.TLSVersion
- 11, // 2: s2a.proto.ClientSessionStartReq.max_tls_version:type_name -> s2a.proto.TLSVersion
- 12, // 3: s2a.proto.ClientSessionStartReq.tls_ciphersuites:type_name -> s2a.proto.Ciphersuite
- 10, // 4: s2a.proto.ClientSessionStartReq.target_identities:type_name -> s2a.proto.Identity
- 10, // 5: s2a.proto.ClientSessionStartReq.local_identity:type_name -> s2a.proto.Identity
- 11, // 6: s2a.proto.ServerSessionStartReq.min_tls_version:type_name -> s2a.proto.TLSVersion
- 11, // 7: s2a.proto.ServerSessionStartReq.max_tls_version:type_name -> s2a.proto.TLSVersion
- 12, // 8: s2a.proto.ServerSessionStartReq.tls_ciphersuites:type_name -> s2a.proto.Ciphersuite
- 10, // 9: s2a.proto.ServerSessionStartReq.local_identities:type_name -> s2a.proto.Identity
- 10, // 10: s2a.proto.ResumptionTicketReq.local_identity:type_name -> s2a.proto.Identity
- 1, // 11: s2a.proto.SessionReq.client_start:type_name -> s2a.proto.ClientSessionStartReq
- 2, // 12: s2a.proto.SessionReq.server_start:type_name -> s2a.proto.ServerSessionStartReq
- 3, // 13: s2a.proto.SessionReq.next:type_name -> s2a.proto.SessionNextReq
- 4, // 14: s2a.proto.SessionReq.resumption_ticket:type_name -> s2a.proto.ResumptionTicketReq
- 0, // 15: s2a.proto.SessionReq.auth_mechanisms:type_name -> s2a.proto.AuthenticationMechanism
- 11, // 16: s2a.proto.SessionState.tls_version:type_name -> s2a.proto.TLSVersion
- 12, // 17: s2a.proto.SessionState.tls_ciphersuite:type_name -> s2a.proto.Ciphersuite
- 6, // 18: s2a.proto.SessionResult.state:type_name -> s2a.proto.SessionState
- 10, // 19: s2a.proto.SessionResult.peer_identity:type_name -> s2a.proto.Identity
- 10, // 20: s2a.proto.SessionResult.local_identity:type_name -> s2a.proto.Identity
- 10, // 21: s2a.proto.SessionResp.local_identity:type_name -> s2a.proto.Identity
- 7, // 22: s2a.proto.SessionResp.result:type_name -> s2a.proto.SessionResult
- 8, // 23: s2a.proto.SessionResp.status:type_name -> s2a.proto.SessionStatus
- 5, // 24: s2a.proto.S2AService.SetUpSession:input_type -> s2a.proto.SessionReq
- 9, // 25: s2a.proto.S2AService.SetUpSession:output_type -> s2a.proto.SessionResp
- 25, // [25:26] is the sub-list for method output_type
- 24, // [24:25] is the sub-list for method input_type
- 24, // [24:24] is the sub-list for extension type_name
- 24, // [24:24] is the sub-list for extension extendee
- 0, // [0:24] is the sub-list for field type_name
-}
-
-func init() { file_internal_proto_s2a_s2a_proto_init() }
-func file_internal_proto_s2a_s2a_proto_init() {
- if File_internal_proto_s2a_s2a_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_internal_proto_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AuthenticationMechanism); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ClientSessionStartReq); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ServerSessionStartReq); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SessionNextReq); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ResumptionTicketReq); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SessionReq); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SessionState); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SessionResult); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SessionStatus); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SessionResp); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_internal_proto_s2a_s2a_proto_msgTypes[0].OneofWrappers = []interface{}{
- (*AuthenticationMechanism_Token)(nil),
- }
- file_internal_proto_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{
- (*SessionReq_ClientStart)(nil),
- (*SessionReq_ServerStart)(nil),
- (*SessionReq_Next)(nil),
- (*SessionReq_ResumptionTicket)(nil),
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_internal_proto_s2a_s2a_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 10,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_internal_proto_s2a_s2a_proto_goTypes,
- DependencyIndexes: file_internal_proto_s2a_s2a_proto_depIdxs,
- MessageInfos: file_internal_proto_s2a_s2a_proto_msgTypes,
- }.Build()
- File_internal_proto_s2a_s2a_proto = out.File
- file_internal_proto_s2a_s2a_proto_rawDesc = nil
- file_internal_proto_s2a_s2a_proto_goTypes = nil
- file_internal_proto_s2a_s2a_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go b/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go
deleted file mode 100644
index 0fa582fc..00000000
--- a/vendor/github.com/google/s2a-go/internal/proto/s2a_go_proto/s2a_grpc.pb.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2021 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
-// versions:
-// - protoc-gen-go-grpc v1.3.0
-// - protoc v3.21.12
-// source: internal/proto/s2a/s2a.proto
-
-package s2a_go_proto
-
-import (
- context "context"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
-)
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.32.0 or later.
-const _ = grpc.SupportPackageIsVersion7
-
-const (
- S2AService_SetUpSession_FullMethodName = "/s2a.proto.S2AService/SetUpSession"
-)
-
-// S2AServiceClient is the client API for S2AService service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
-type S2AServiceClient interface {
- // S2A service accepts a stream of session setup requests and returns a stream
- // of session setup responses. The client of this service is expected to send
- // exactly one client_start or server_start message followed by at least one
- // next message. Applications running TLS clients can send requests with
- // resumption_ticket messages only after the session is successfully set up.
- //
- // Every time S2A client sends a request, this service sends a response.
- // However, clients do not have to wait for service response before sending
- // the next request.
- SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error)
-}
-
-type s2AServiceClient struct {
- cc grpc.ClientConnInterface
-}
-
-func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient {
- return &s2AServiceClient{cc}
-}
-
-func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) {
- stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...)
- if err != nil {
- return nil, err
- }
- x := &s2AServiceSetUpSessionClient{stream}
- return x, nil
-}
-
-type S2AService_SetUpSessionClient interface {
- Send(*SessionReq) error
- Recv() (*SessionResp, error)
- grpc.ClientStream
-}
-
-type s2AServiceSetUpSessionClient struct {
- grpc.ClientStream
-}
-
-func (x *s2AServiceSetUpSessionClient) Send(m *SessionReq) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *s2AServiceSetUpSessionClient) Recv() (*SessionResp, error) {
- m := new(SessionResp)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// S2AServiceServer is the server API for S2AService service.
-// All implementations must embed UnimplementedS2AServiceServer
-// for forward compatibility
-type S2AServiceServer interface {
- // S2A service accepts a stream of session setup requests and returns a stream
- // of session setup responses. The client of this service is expected to send
- // exactly one client_start or server_start message followed by at least one
- // next message. Applications running TLS clients can send requests with
- // resumption_ticket messages only after the session is successfully set up.
- //
- // Every time S2A client sends a request, this service sends a response.
- // However, clients do not have to wait for service response before sending
- // the next request.
- SetUpSession(S2AService_SetUpSessionServer) error
- mustEmbedUnimplementedS2AServiceServer()
-}
-
-// UnimplementedS2AServiceServer must be embedded to have forward compatible implementations.
-type UnimplementedS2AServiceServer struct {
-}
-
-func (UnimplementedS2AServiceServer) SetUpSession(S2AService_SetUpSessionServer) error {
- return status.Errorf(codes.Unimplemented, "method SetUpSession not implemented")
-}
-func (UnimplementedS2AServiceServer) mustEmbedUnimplementedS2AServiceServer() {}
-
-// UnsafeS2AServiceServer may be embedded to opt out of forward compatibility for this service.
-// Use of this interface is not recommended, as added methods to S2AServiceServer will
-// result in compilation errors.
-type UnsafeS2AServiceServer interface {
- mustEmbedUnimplementedS2AServiceServer()
-}
-
-func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) {
- s.RegisterService(&S2AService_ServiceDesc, srv)
-}
-
-func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream})
-}
-
-type S2AService_SetUpSessionServer interface {
- Send(*SessionResp) error
- Recv() (*SessionReq, error)
- grpc.ServerStream
-}
-
-type s2AServiceSetUpSessionServer struct {
- grpc.ServerStream
-}
-
-func (x *s2AServiceSetUpSessionServer) Send(m *SessionResp) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *s2AServiceSetUpSessionServer) Recv() (*SessionReq, error) {
- m := new(SessionReq)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// S2AService_ServiceDesc is the grpc.ServiceDesc for S2AService service.
-// It's only intended for direct use with grpc.RegisterService,
-// and not to be introspected or modified (even as a copy)
-var S2AService_ServiceDesc = grpc.ServiceDesc{
- ServiceName: "s2a.proto.S2AService",
- HandlerType: (*S2AServiceServer)(nil),
- Methods: []grpc.MethodDesc{},
- Streams: []grpc.StreamDesc{
- {
- StreamName: "SetUpSession",
- Handler: _S2AService_SetUpSession_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- },
- Metadata: "internal/proto/s2a/s2a.proto",
-}
diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go
deleted file mode 100644
index c84bed97..00000000
--- a/vendor/github.com/google/s2a-go/internal/proto/v2/common_go_proto/common.pb.go
+++ /dev/null
@@ -1,367 +0,0 @@
-// Copyright 2022 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.30.0
-// protoc v3.21.12
-// source: internal/proto/v2/common/common.proto
-
-package common_go_proto
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// The TLS 1.0-1.2 ciphersuites that the application can negotiate when using
-// S2A.
-type Ciphersuite int32
-
-const (
- Ciphersuite_CIPHERSUITE_UNSPECIFIED Ciphersuite = 0
- Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 Ciphersuite = 1
- Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 Ciphersuite = 2
- Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 Ciphersuite = 3
- Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256 Ciphersuite = 4
- Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384 Ciphersuite = 5
- Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 Ciphersuite = 6
-)
-
-// Enum value maps for Ciphersuite.
-var (
- Ciphersuite_name = map[int32]string{
- 0: "CIPHERSUITE_UNSPECIFIED",
- 1: "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
- 2: "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
- 3: "CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
- 4: "CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
- 5: "CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
- 6: "CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256",
- }
- Ciphersuite_value = map[string]int32{
- "CIPHERSUITE_UNSPECIFIED": 0,
- "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": 1,
- "CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": 2,
- "CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": 3,
- "CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256": 4,
- "CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384": 5,
- "CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": 6,
- }
-)
-
-func (x Ciphersuite) Enum() *Ciphersuite {
- p := new(Ciphersuite)
- *p = x
- return p
-}
-
-func (x Ciphersuite) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (Ciphersuite) Descriptor() protoreflect.EnumDescriptor {
- return file_internal_proto_v2_common_common_proto_enumTypes[0].Descriptor()
-}
-
-func (Ciphersuite) Type() protoreflect.EnumType {
- return &file_internal_proto_v2_common_common_proto_enumTypes[0]
-}
-
-func (x Ciphersuite) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use Ciphersuite.Descriptor instead.
-func (Ciphersuite) EnumDescriptor() ([]byte, []int) {
- return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{0}
-}
-
-// The TLS versions supported by S2A's handshaker module.
-type TLSVersion int32
-
-const (
- TLSVersion_TLS_VERSION_UNSPECIFIED TLSVersion = 0
- TLSVersion_TLS_VERSION_1_0 TLSVersion = 1
- TLSVersion_TLS_VERSION_1_1 TLSVersion = 2
- TLSVersion_TLS_VERSION_1_2 TLSVersion = 3
- TLSVersion_TLS_VERSION_1_3 TLSVersion = 4
-)
-
-// Enum value maps for TLSVersion.
-var (
- TLSVersion_name = map[int32]string{
- 0: "TLS_VERSION_UNSPECIFIED",
- 1: "TLS_VERSION_1_0",
- 2: "TLS_VERSION_1_1",
- 3: "TLS_VERSION_1_2",
- 4: "TLS_VERSION_1_3",
- }
- TLSVersion_value = map[string]int32{
- "TLS_VERSION_UNSPECIFIED": 0,
- "TLS_VERSION_1_0": 1,
- "TLS_VERSION_1_1": 2,
- "TLS_VERSION_1_2": 3,
- "TLS_VERSION_1_3": 4,
- }
-)
-
-func (x TLSVersion) Enum() *TLSVersion {
- p := new(TLSVersion)
- *p = x
- return p
-}
-
-func (x TLSVersion) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (TLSVersion) Descriptor() protoreflect.EnumDescriptor {
- return file_internal_proto_v2_common_common_proto_enumTypes[1].Descriptor()
-}
-
-func (TLSVersion) Type() protoreflect.EnumType {
- return &file_internal_proto_v2_common_common_proto_enumTypes[1]
-}
-
-func (x TLSVersion) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use TLSVersion.Descriptor instead.
-func (TLSVersion) EnumDescriptor() ([]byte, []int) {
- return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{1}
-}
-
-// The side in the TLS connection.
-type ConnectionSide int32
-
-const (
- ConnectionSide_CONNECTION_SIDE_UNSPECIFIED ConnectionSide = 0
- ConnectionSide_CONNECTION_SIDE_CLIENT ConnectionSide = 1
- ConnectionSide_CONNECTION_SIDE_SERVER ConnectionSide = 2
-)
-
-// Enum value maps for ConnectionSide.
-var (
- ConnectionSide_name = map[int32]string{
- 0: "CONNECTION_SIDE_UNSPECIFIED",
- 1: "CONNECTION_SIDE_CLIENT",
- 2: "CONNECTION_SIDE_SERVER",
- }
- ConnectionSide_value = map[string]int32{
- "CONNECTION_SIDE_UNSPECIFIED": 0,
- "CONNECTION_SIDE_CLIENT": 1,
- "CONNECTION_SIDE_SERVER": 2,
- }
-)
-
-func (x ConnectionSide) Enum() *ConnectionSide {
- p := new(ConnectionSide)
- *p = x
- return p
-}
-
-func (x ConnectionSide) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (ConnectionSide) Descriptor() protoreflect.EnumDescriptor {
- return file_internal_proto_v2_common_common_proto_enumTypes[2].Descriptor()
-}
-
-func (ConnectionSide) Type() protoreflect.EnumType {
- return &file_internal_proto_v2_common_common_proto_enumTypes[2]
-}
-
-func (x ConnectionSide) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use ConnectionSide.Descriptor instead.
-func (ConnectionSide) EnumDescriptor() ([]byte, []int) {
- return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{2}
-}
-
-// The ALPN protocols that the application can negotiate during a TLS handshake.
-type AlpnProtocol int32
-
-const (
- AlpnProtocol_ALPN_PROTOCOL_UNSPECIFIED AlpnProtocol = 0
- AlpnProtocol_ALPN_PROTOCOL_GRPC AlpnProtocol = 1
- AlpnProtocol_ALPN_PROTOCOL_HTTP2 AlpnProtocol = 2
- AlpnProtocol_ALPN_PROTOCOL_HTTP1_1 AlpnProtocol = 3
-)
-
-// Enum value maps for AlpnProtocol.
-var (
- AlpnProtocol_name = map[int32]string{
- 0: "ALPN_PROTOCOL_UNSPECIFIED",
- 1: "ALPN_PROTOCOL_GRPC",
- 2: "ALPN_PROTOCOL_HTTP2",
- 3: "ALPN_PROTOCOL_HTTP1_1",
- }
- AlpnProtocol_value = map[string]int32{
- "ALPN_PROTOCOL_UNSPECIFIED": 0,
- "ALPN_PROTOCOL_GRPC": 1,
- "ALPN_PROTOCOL_HTTP2": 2,
- "ALPN_PROTOCOL_HTTP1_1": 3,
- }
-)
-
-func (x AlpnProtocol) Enum() *AlpnProtocol {
- p := new(AlpnProtocol)
- *p = x
- return p
-}
-
-func (x AlpnProtocol) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (AlpnProtocol) Descriptor() protoreflect.EnumDescriptor {
- return file_internal_proto_v2_common_common_proto_enumTypes[3].Descriptor()
-}
-
-func (AlpnProtocol) Type() protoreflect.EnumType {
- return &file_internal_proto_v2_common_common_proto_enumTypes[3]
-}
-
-func (x AlpnProtocol) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use AlpnProtocol.Descriptor instead.
-func (AlpnProtocol) EnumDescriptor() ([]byte, []int) {
- return file_internal_proto_v2_common_common_proto_rawDescGZIP(), []int{3}
-}
-
-var File_internal_proto_v2_common_common_proto protoreflect.FileDescriptor
-
-var file_internal_proto_v2_common_common_proto_rawDesc = []byte{
- 0x0a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
- 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2a, 0xee, 0x02, 0x0a, 0x0b, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72,
- 0x73, 0x75, 0x69, 0x74, 0x65, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53,
- 0x55, 0x49, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
- 0x10, 0x00, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54,
- 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49,
- 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53,
- 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x01, 0x12, 0x33, 0x0a, 0x2f, 0x43, 0x49, 0x50, 0x48, 0x45,
- 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x45, 0x43, 0x44,
- 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36, 0x5f,
- 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x39, 0x0a, 0x35,
- 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48,
- 0x45, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41,
- 0x43, 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53,
- 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x03, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49, 0x50, 0x48, 0x45,
- 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41,
- 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x31, 0x32, 0x38, 0x5f, 0x47, 0x43,
- 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x04, 0x12, 0x31, 0x0a, 0x2d, 0x43, 0x49,
- 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44, 0x48, 0x45, 0x5f,
- 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x41, 0x45, 0x53, 0x5f, 0x32, 0x35, 0x36,
- 0x5f, 0x47, 0x43, 0x4d, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x37, 0x0a,
- 0x33, 0x43, 0x49, 0x50, 0x48, 0x45, 0x52, 0x53, 0x55, 0x49, 0x54, 0x45, 0x5f, 0x45, 0x43, 0x44,
- 0x48, 0x45, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x43, 0x48, 0x41, 0x43,
- 0x48, 0x41, 0x32, 0x30, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x31, 0x33, 0x30, 0x35, 0x5f, 0x53, 0x48,
- 0x41, 0x32, 0x35, 0x36, 0x10, 0x06, 0x2a, 0x7d, 0x0a, 0x0a, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72,
- 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53,
- 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
- 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e,
- 0x5f, 0x31, 0x5f, 0x30, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45,
- 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x31, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x54,
- 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x32, 0x10, 0x03,
- 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x4c, 0x53, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x5f,
- 0x31, 0x5f, 0x33, 0x10, 0x04, 0x2a, 0x69, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74,
- 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x4e, 0x45,
- 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45,
- 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e,
- 0x45, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45,
- 0x4e, 0x54, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x49,
- 0x4f, 0x4e, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02,
- 0x2a, 0x79, 0x0a, 0x0c, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c,
- 0x12, 0x1d, 0x0a, 0x19, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f,
- 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12,
- 0x16, 0x0a, 0x12, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c,
- 0x5f, 0x47, 0x52, 0x50, 0x43, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x50, 0x4e, 0x5f,
- 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x32, 0x10, 0x02,
- 0x12, 0x19, 0x0a, 0x15, 0x41, 0x4c, 0x50, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f,
- 0x4c, 0x5f, 0x48, 0x54, 0x54, 0x50, 0x31, 0x5f, 0x31, 0x10, 0x03, 0x42, 0x39, 0x5a, 0x37, 0x67,
- 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x67, 0x6f,
- 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_internal_proto_v2_common_common_proto_rawDescOnce sync.Once
- file_internal_proto_v2_common_common_proto_rawDescData = file_internal_proto_v2_common_common_proto_rawDesc
-)
-
-func file_internal_proto_v2_common_common_proto_rawDescGZIP() []byte {
- file_internal_proto_v2_common_common_proto_rawDescOnce.Do(func() {
- file_internal_proto_v2_common_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_common_common_proto_rawDescData)
- })
- return file_internal_proto_v2_common_common_proto_rawDescData
-}
-
-var file_internal_proto_v2_common_common_proto_enumTypes = make([]protoimpl.EnumInfo, 4)
-var file_internal_proto_v2_common_common_proto_goTypes = []interface{}{
- (Ciphersuite)(0), // 0: s2a.proto.v2.Ciphersuite
- (TLSVersion)(0), // 1: s2a.proto.v2.TLSVersion
- (ConnectionSide)(0), // 2: s2a.proto.v2.ConnectionSide
- (AlpnProtocol)(0), // 3: s2a.proto.v2.AlpnProtocol
-}
-var file_internal_proto_v2_common_common_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_internal_proto_v2_common_common_proto_init() }
-func file_internal_proto_v2_common_common_proto_init() {
- if File_internal_proto_v2_common_common_proto != nil {
- return
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_internal_proto_v2_common_common_proto_rawDesc,
- NumEnums: 4,
- NumMessages: 0,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_internal_proto_v2_common_common_proto_goTypes,
- DependencyIndexes: file_internal_proto_v2_common_common_proto_depIdxs,
- EnumInfos: file_internal_proto_v2_common_common_proto_enumTypes,
- }.Build()
- File_internal_proto_v2_common_common_proto = out.File
- file_internal_proto_v2_common_common_proto_rawDesc = nil
- file_internal_proto_v2_common_common_proto_goTypes = nil
- file_internal_proto_v2_common_common_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go
deleted file mode 100644
index b7fd871c..00000000
--- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto/s2a_context.pb.go
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright 2022 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.30.0
-// protoc v3.21.12
-// source: internal/proto/v2/s2a_context/s2a_context.proto
-
-package s2a_context_go_proto
-
-import (
- common_go_proto "github.com/google/s2a-go/internal/proto/common_go_proto"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-type S2AContext struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The SPIFFE ID from the peer leaf certificate, if present.
- //
- // This field is only populated if the leaf certificate is a valid SPIFFE
- // SVID; in particular, there is a unique URI SAN and this URI SAN is a valid
- // SPIFFE ID.
- LeafCertSpiffeId string `protobuf:"bytes,1,opt,name=leaf_cert_spiffe_id,json=leafCertSpiffeId,proto3" json:"leaf_cert_spiffe_id,omitempty"`
- // The URIs that are present in the SubjectAltName extension of the peer leaf
- // certificate.
- //
- // Note that the extracted URIs are not validated and may not be properly
- // formatted.
- LeafCertUris []string `protobuf:"bytes,2,rep,name=leaf_cert_uris,json=leafCertUris,proto3" json:"leaf_cert_uris,omitempty"`
- // The DNSNames that are present in the SubjectAltName extension of the peer
- // leaf certificate.
- LeafCertDnsnames []string `protobuf:"bytes,3,rep,name=leaf_cert_dnsnames,json=leafCertDnsnames,proto3" json:"leaf_cert_dnsnames,omitempty"`
- // The (ordered) list of fingerprints in the certificate chain used to verify
- // the given leaf certificate. The order MUST be from leaf certificate
- // fingerprint to root certificate fingerprint.
- //
- // A fingerprint is the base-64 encoding of the SHA256 hash of the
- // DER-encoding of a certificate. The list MAY be populated even if the peer
- // certificate chain was NOT validated successfully.
- PeerCertificateChainFingerprints []string `protobuf:"bytes,4,rep,name=peer_certificate_chain_fingerprints,json=peerCertificateChainFingerprints,proto3" json:"peer_certificate_chain_fingerprints,omitempty"`
- // The local identity used during session setup.
- LocalIdentity *common_go_proto.Identity `protobuf:"bytes,5,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"`
- // The SHA256 hash of the DER-encoding of the local leaf certificate used in
- // the handshake.
- LocalLeafCertFingerprint []byte `protobuf:"bytes,6,opt,name=local_leaf_cert_fingerprint,json=localLeafCertFingerprint,proto3" json:"local_leaf_cert_fingerprint,omitempty"`
-}
-
-func (x *S2AContext) Reset() {
- *x = S2AContext{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *S2AContext) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*S2AContext) ProtoMessage() {}
-
-func (x *S2AContext) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use S2AContext.ProtoReflect.Descriptor instead.
-func (*S2AContext) Descriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *S2AContext) GetLeafCertSpiffeId() string {
- if x != nil {
- return x.LeafCertSpiffeId
- }
- return ""
-}
-
-func (x *S2AContext) GetLeafCertUris() []string {
- if x != nil {
- return x.LeafCertUris
- }
- return nil
-}
-
-func (x *S2AContext) GetLeafCertDnsnames() []string {
- if x != nil {
- return x.LeafCertDnsnames
- }
- return nil
-}
-
-func (x *S2AContext) GetPeerCertificateChainFingerprints() []string {
- if x != nil {
- return x.PeerCertificateChainFingerprints
- }
- return nil
-}
-
-func (x *S2AContext) GetLocalIdentity() *common_go_proto.Identity {
- if x != nil {
- return x.LocalIdentity
- }
- return nil
-}
-
-func (x *S2AContext) GetLocalLeafCertFingerprint() []byte {
- if x != nil {
- return x.LocalLeafCertFingerprint
- }
- return nil
-}
-
-var File_internal_proto_v2_s2a_context_s2a_context_proto protoreflect.FileDescriptor
-
-var file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = []byte{
- 0x0a, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f,
- 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a,
- 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
- 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x22, 0xd9, 0x02, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65,
- 0x78, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f,
- 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49,
- 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x75,
- 0x72, 0x69, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x66, 0x43,
- 0x65, 0x72, 0x74, 0x55, 0x72, 0x69, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x61, 0x66, 0x5f,
- 0x63, 0x65, 0x72, 0x74, 0x5f, 0x64, 0x6e, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20,
- 0x03, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x65, 0x61, 0x66, 0x43, 0x65, 0x72, 0x74, 0x44, 0x6e, 0x73,
- 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x23, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65,
- 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f,
- 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03,
- 0x28, 0x09, 0x52, 0x20, 0x70, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63,
- 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72,
- 0x69, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x69, 0x64,
- 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73,
- 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74,
- 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79,
- 0x12, 0x3d, 0x0a, 0x1b, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6c, 0x65, 0x61, 0x66, 0x5f, 0x63,
- 0x65, 0x72, 0x74, 0x5f, 0x66, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x18,
- 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4c, 0x65, 0x61, 0x66,
- 0x43, 0x65, 0x72, 0x74, 0x46, 0x69, 0x6e, 0x67, 0x65, 0x72, 0x70, 0x72, 0x69, 0x6e, 0x74, 0x42,
- 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61,
- 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63,
- 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescOnce sync.Once
- file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData = file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc
-)
-
-func file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescGZIP() []byte {
- file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescOnce.Do(func() {
- file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData)
- })
- return file_internal_proto_v2_s2a_context_s2a_context_proto_rawDescData
-}
-
-var file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = []interface{}{
- (*S2AContext)(nil), // 0: s2a.proto.v2.S2AContext
- (*common_go_proto.Identity)(nil), // 1: s2a.proto.Identity
-}
-var file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = []int32{
- 1, // 0: s2a.proto.v2.S2AContext.local_identity:type_name -> s2a.proto.Identity
- 1, // [1:1] is the sub-list for method output_type
- 1, // [1:1] is the sub-list for method input_type
- 1, // [1:1] is the sub-list for extension type_name
- 1, // [1:1] is the sub-list for extension extendee
- 0, // [0:1] is the sub-list for field type_name
-}
-
-func init() { file_internal_proto_v2_s2a_context_s2a_context_proto_init() }
-func file_internal_proto_v2_s2a_context_s2a_context_proto_init() {
- if File_internal_proto_v2_s2a_context_s2a_context_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*S2AContext); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 1,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes,
- DependencyIndexes: file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs,
- MessageInfos: file_internal_proto_v2_s2a_context_s2a_context_proto_msgTypes,
- }.Build()
- File_internal_proto_v2_s2a_context_s2a_context_proto = out.File
- file_internal_proto_v2_s2a_context_s2a_context_proto_rawDesc = nil
- file_internal_proto_v2_s2a_context_s2a_context_proto_goTypes = nil
- file_internal_proto_v2_s2a_context_s2a_context_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go
deleted file mode 100644
index e843450c..00000000
--- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a.pb.go
+++ /dev/null
@@ -1,2494 +0,0 @@
-// Copyright 2022 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.30.0
-// protoc v3.21.12
-// source: internal/proto/v2/s2a/s2a.proto
-
-package s2a_go_proto
-
-import (
- common_go_proto1 "github.com/google/s2a-go/internal/proto/common_go_proto"
- common_go_proto "github.com/google/s2a-go/internal/proto/v2/common_go_proto"
- s2a_context_go_proto "github.com/google/s2a-go/internal/proto/v2/s2a_context_go_proto"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-type SignatureAlgorithm int32
-
-const (
- SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED SignatureAlgorithm = 0
- // RSA Public-Key Cryptography Standards #1.
- SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA256 SignatureAlgorithm = 1
- SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA384 SignatureAlgorithm = 2
- SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA512 SignatureAlgorithm = 3
- // ECDSA.
- SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256 SignatureAlgorithm = 4
- SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384 SignatureAlgorithm = 5
- SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512 SignatureAlgorithm = 6
- // RSA Probabilistic Signature Scheme.
- SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256 SignatureAlgorithm = 7
- SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384 SignatureAlgorithm = 8
- SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512 SignatureAlgorithm = 9
- // ED25519.
- SignatureAlgorithm_S2A_SSL_SIGN_ED25519 SignatureAlgorithm = 10
-)
-
-// Enum value maps for SignatureAlgorithm.
-var (
- SignatureAlgorithm_name = map[int32]string{
- 0: "S2A_SSL_SIGN_UNSPECIFIED",
- 1: "S2A_SSL_SIGN_RSA_PKCS1_SHA256",
- 2: "S2A_SSL_SIGN_RSA_PKCS1_SHA384",
- 3: "S2A_SSL_SIGN_RSA_PKCS1_SHA512",
- 4: "S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256",
- 5: "S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384",
- 6: "S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512",
- 7: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256",
- 8: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384",
- 9: "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512",
- 10: "S2A_SSL_SIGN_ED25519",
- }
- SignatureAlgorithm_value = map[string]int32{
- "S2A_SSL_SIGN_UNSPECIFIED": 0,
- "S2A_SSL_SIGN_RSA_PKCS1_SHA256": 1,
- "S2A_SSL_SIGN_RSA_PKCS1_SHA384": 2,
- "S2A_SSL_SIGN_RSA_PKCS1_SHA512": 3,
- "S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256": 4,
- "S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384": 5,
- "S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512": 6,
- "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256": 7,
- "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384": 8,
- "S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512": 9,
- "S2A_SSL_SIGN_ED25519": 10,
- }
-)
-
-func (x SignatureAlgorithm) Enum() *SignatureAlgorithm {
- p := new(SignatureAlgorithm)
- *p = x
- return p
-}
-
-func (x SignatureAlgorithm) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (SignatureAlgorithm) Descriptor() protoreflect.EnumDescriptor {
- return file_internal_proto_v2_s2a_s2a_proto_enumTypes[0].Descriptor()
-}
-
-func (SignatureAlgorithm) Type() protoreflect.EnumType {
- return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[0]
-}
-
-func (x SignatureAlgorithm) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use SignatureAlgorithm.Descriptor instead.
-func (SignatureAlgorithm) EnumDescriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{0}
-}
-
-type GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate int32
-
-const (
- GetTlsConfigurationResp_ServerTlsConfiguration_UNSPECIFIED GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 0
- GetTlsConfigurationResp_ServerTlsConfiguration_DONT_REQUEST_CLIENT_CERTIFICATE GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 1
- GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 2
- GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 3
- GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 4
- GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate = 5
-)
-
-// Enum value maps for GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate.
-var (
- GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate_name = map[int32]string{
- 0: "UNSPECIFIED",
- 1: "DONT_REQUEST_CLIENT_CERTIFICATE",
- 2: "REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY",
- 3: "REQUEST_CLIENT_CERTIFICATE_AND_VERIFY",
- 4: "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY",
- 5: "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY",
- }
- GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate_value = map[string]int32{
- "UNSPECIFIED": 0,
- "DONT_REQUEST_CLIENT_CERTIFICATE": 1,
- "REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY": 2,
- "REQUEST_CLIENT_CERTIFICATE_AND_VERIFY": 3,
- "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY": 4,
- "REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY": 5,
- }
-)
-
-func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Enum() *GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate {
- p := new(GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate)
- *p = x
- return p
-}
-
-func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Descriptor() protoreflect.EnumDescriptor {
- return file_internal_proto_v2_s2a_s2a_proto_enumTypes[1].Descriptor()
-}
-
-func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Type() protoreflect.EnumType {
- return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[1]
-}
-
-func (x GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate.Descriptor instead.
-func (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate) EnumDescriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 1, 0}
-}
-
-type OffloadPrivateKeyOperationReq_PrivateKeyOperation int32
-
-const (
- OffloadPrivateKeyOperationReq_UNSPECIFIED OffloadPrivateKeyOperationReq_PrivateKeyOperation = 0
- // When performing a TLS 1.2 or 1.3 handshake, the (partial) transcript of
- // the TLS handshake must be signed to prove possession of the private key.
- //
- // See https://www.rfc-editor.org/rfc/rfc8446.html#section-4.4.3.
- OffloadPrivateKeyOperationReq_SIGN OffloadPrivateKeyOperationReq_PrivateKeyOperation = 1
- // When performing a TLS 1.2 handshake using an RSA algorithm, the key
- // exchange algorithm involves the client generating a premaster secret,
- // encrypting it using the server's public key, and sending this encrypted
- // blob to the server in a ClientKeyExchange message.
- //
- // See https://www.rfc-editor.org/rfc/rfc4346#section-7.4.7.1.
- OffloadPrivateKeyOperationReq_DECRYPT OffloadPrivateKeyOperationReq_PrivateKeyOperation = 2
-)
-
-// Enum value maps for OffloadPrivateKeyOperationReq_PrivateKeyOperation.
-var (
- OffloadPrivateKeyOperationReq_PrivateKeyOperation_name = map[int32]string{
- 0: "UNSPECIFIED",
- 1: "SIGN",
- 2: "DECRYPT",
- }
- OffloadPrivateKeyOperationReq_PrivateKeyOperation_value = map[string]int32{
- "UNSPECIFIED": 0,
- "SIGN": 1,
- "DECRYPT": 2,
- }
-)
-
-func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) Enum() *OffloadPrivateKeyOperationReq_PrivateKeyOperation {
- p := new(OffloadPrivateKeyOperationReq_PrivateKeyOperation)
- *p = x
- return p
-}
-
-func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) Descriptor() protoreflect.EnumDescriptor {
- return file_internal_proto_v2_s2a_s2a_proto_enumTypes[2].Descriptor()
-}
-
-func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) Type() protoreflect.EnumType {
- return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[2]
-}
-
-func (x OffloadPrivateKeyOperationReq_PrivateKeyOperation) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use OffloadPrivateKeyOperationReq_PrivateKeyOperation.Descriptor instead.
-func (OffloadPrivateKeyOperationReq_PrivateKeyOperation) EnumDescriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{5, 0}
-}
-
-type OffloadResumptionKeyOperationReq_ResumptionKeyOperation int32
-
-const (
- OffloadResumptionKeyOperationReq_UNSPECIFIED OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 0
- OffloadResumptionKeyOperationReq_ENCRYPT OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 1
- OffloadResumptionKeyOperationReq_DECRYPT OffloadResumptionKeyOperationReq_ResumptionKeyOperation = 2
-)
-
-// Enum value maps for OffloadResumptionKeyOperationReq_ResumptionKeyOperation.
-var (
- OffloadResumptionKeyOperationReq_ResumptionKeyOperation_name = map[int32]string{
- 0: "UNSPECIFIED",
- 1: "ENCRYPT",
- 2: "DECRYPT",
- }
- OffloadResumptionKeyOperationReq_ResumptionKeyOperation_value = map[string]int32{
- "UNSPECIFIED": 0,
- "ENCRYPT": 1,
- "DECRYPT": 2,
- }
-)
-
-func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Enum() *OffloadResumptionKeyOperationReq_ResumptionKeyOperation {
- p := new(OffloadResumptionKeyOperationReq_ResumptionKeyOperation)
- *p = x
- return p
-}
-
-func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Descriptor() protoreflect.EnumDescriptor {
- return file_internal_proto_v2_s2a_s2a_proto_enumTypes[3].Descriptor()
-}
-
-func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Type() protoreflect.EnumType {
- return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[3]
-}
-
-func (x OffloadResumptionKeyOperationReq_ResumptionKeyOperation) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use OffloadResumptionKeyOperationReq_ResumptionKeyOperation.Descriptor instead.
-func (OffloadResumptionKeyOperationReq_ResumptionKeyOperation) EnumDescriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{7, 0}
-}
-
-type ValidatePeerCertificateChainReq_VerificationMode int32
-
-const (
- // The default verification mode supported by S2A.
- ValidatePeerCertificateChainReq_UNSPECIFIED ValidatePeerCertificateChainReq_VerificationMode = 0
- // The SPIFFE verification mode selects the set of trusted certificates to
- // use for path building based on the SPIFFE trust domain in the peer's leaf
- // certificate.
- ValidatePeerCertificateChainReq_SPIFFE ValidatePeerCertificateChainReq_VerificationMode = 1
- // The connect-to-Google verification mode uses the trust bundle for
- // connecting to Google, e.g. *.mtls.googleapis.com endpoints.
- ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE ValidatePeerCertificateChainReq_VerificationMode = 2
-)
-
-// Enum value maps for ValidatePeerCertificateChainReq_VerificationMode.
-var (
- ValidatePeerCertificateChainReq_VerificationMode_name = map[int32]string{
- 0: "UNSPECIFIED",
- 1: "SPIFFE",
- 2: "CONNECT_TO_GOOGLE",
- }
- ValidatePeerCertificateChainReq_VerificationMode_value = map[string]int32{
- "UNSPECIFIED": 0,
- "SPIFFE": 1,
- "CONNECT_TO_GOOGLE": 2,
- }
-)
-
-func (x ValidatePeerCertificateChainReq_VerificationMode) Enum() *ValidatePeerCertificateChainReq_VerificationMode {
- p := new(ValidatePeerCertificateChainReq_VerificationMode)
- *p = x
- return p
-}
-
-func (x ValidatePeerCertificateChainReq_VerificationMode) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (ValidatePeerCertificateChainReq_VerificationMode) Descriptor() protoreflect.EnumDescriptor {
- return file_internal_proto_v2_s2a_s2a_proto_enumTypes[4].Descriptor()
-}
-
-func (ValidatePeerCertificateChainReq_VerificationMode) Type() protoreflect.EnumType {
- return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[4]
-}
-
-func (x ValidatePeerCertificateChainReq_VerificationMode) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use ValidatePeerCertificateChainReq_VerificationMode.Descriptor instead.
-func (ValidatePeerCertificateChainReq_VerificationMode) EnumDescriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 0}
-}
-
-type ValidatePeerCertificateChainResp_ValidationResult int32
-
-const (
- ValidatePeerCertificateChainResp_UNSPECIFIED ValidatePeerCertificateChainResp_ValidationResult = 0
- ValidatePeerCertificateChainResp_SUCCESS ValidatePeerCertificateChainResp_ValidationResult = 1
- ValidatePeerCertificateChainResp_FAILURE ValidatePeerCertificateChainResp_ValidationResult = 2
-)
-
-// Enum value maps for ValidatePeerCertificateChainResp_ValidationResult.
-var (
- ValidatePeerCertificateChainResp_ValidationResult_name = map[int32]string{
- 0: "UNSPECIFIED",
- 1: "SUCCESS",
- 2: "FAILURE",
- }
- ValidatePeerCertificateChainResp_ValidationResult_value = map[string]int32{
- "UNSPECIFIED": 0,
- "SUCCESS": 1,
- "FAILURE": 2,
- }
-)
-
-func (x ValidatePeerCertificateChainResp_ValidationResult) Enum() *ValidatePeerCertificateChainResp_ValidationResult {
- p := new(ValidatePeerCertificateChainResp_ValidationResult)
- *p = x
- return p
-}
-
-func (x ValidatePeerCertificateChainResp_ValidationResult) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (ValidatePeerCertificateChainResp_ValidationResult) Descriptor() protoreflect.EnumDescriptor {
- return file_internal_proto_v2_s2a_s2a_proto_enumTypes[5].Descriptor()
-}
-
-func (ValidatePeerCertificateChainResp_ValidationResult) Type() protoreflect.EnumType {
- return &file_internal_proto_v2_s2a_s2a_proto_enumTypes[5]
-}
-
-func (x ValidatePeerCertificateChainResp_ValidationResult) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use ValidatePeerCertificateChainResp_ValidationResult.Descriptor instead.
-func (ValidatePeerCertificateChainResp_ValidationResult) EnumDescriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{10, 0}
-}
-
-type AlpnPolicy struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // If true, the application MUST perform ALPN negotiation.
- EnableAlpnNegotiation bool `protobuf:"varint,1,opt,name=enable_alpn_negotiation,json=enableAlpnNegotiation,proto3" json:"enable_alpn_negotiation,omitempty"`
- // The ordered list of ALPN protocols that specify how the application SHOULD
- // negotiate ALPN during the TLS handshake.
- //
- // The application MAY ignore any ALPN protocols in this list that are not
- // supported by the application.
- AlpnProtocols []common_go_proto.AlpnProtocol `protobuf:"varint,2,rep,packed,name=alpn_protocols,json=alpnProtocols,proto3,enum=s2a.proto.v2.AlpnProtocol" json:"alpn_protocols,omitempty"`
-}
-
-func (x *AlpnPolicy) Reset() {
- *x = AlpnPolicy{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AlpnPolicy) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AlpnPolicy) ProtoMessage() {}
-
-func (x *AlpnPolicy) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AlpnPolicy.ProtoReflect.Descriptor instead.
-func (*AlpnPolicy) Descriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *AlpnPolicy) GetEnableAlpnNegotiation() bool {
- if x != nil {
- return x.EnableAlpnNegotiation
- }
- return false
-}
-
-func (x *AlpnPolicy) GetAlpnProtocols() []common_go_proto.AlpnProtocol {
- if x != nil {
- return x.AlpnProtocols
- }
- return nil
-}
-
-type AuthenticationMechanism struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Applications may specify an identity associated to an authentication
- // mechanism. Otherwise, S2A assumes that the authentication mechanism is
- // associated with the default identity. If the default identity cannot be
- // determined, the request is rejected.
- Identity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=identity,proto3" json:"identity,omitempty"`
- // Types that are assignable to MechanismOneof:
- //
- // *AuthenticationMechanism_Token
- MechanismOneof isAuthenticationMechanism_MechanismOneof `protobuf_oneof:"mechanism_oneof"`
-}
-
-func (x *AuthenticationMechanism) Reset() {
- *x = AuthenticationMechanism{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AuthenticationMechanism) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AuthenticationMechanism) ProtoMessage() {}
-
-func (x *AuthenticationMechanism) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AuthenticationMechanism.ProtoReflect.Descriptor instead.
-func (*AuthenticationMechanism) Descriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *AuthenticationMechanism) GetIdentity() *common_go_proto1.Identity {
- if x != nil {
- return x.Identity
- }
- return nil
-}
-
-func (m *AuthenticationMechanism) GetMechanismOneof() isAuthenticationMechanism_MechanismOneof {
- if m != nil {
- return m.MechanismOneof
- }
- return nil
-}
-
-func (x *AuthenticationMechanism) GetToken() string {
- if x, ok := x.GetMechanismOneof().(*AuthenticationMechanism_Token); ok {
- return x.Token
- }
- return ""
-}
-
-type isAuthenticationMechanism_MechanismOneof interface {
- isAuthenticationMechanism_MechanismOneof()
-}
-
-type AuthenticationMechanism_Token struct {
- // A token that the application uses to authenticate itself to S2A.
- Token string `protobuf:"bytes,2,opt,name=token,proto3,oneof"`
-}
-
-func (*AuthenticationMechanism_Token) isAuthenticationMechanism_MechanismOneof() {}
-
-type Status struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The status code that is specific to the application and the implementation
- // of S2A, e.g., gRPC status code.
- Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
- // The status details.
- Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"`
-}
-
-func (x *Status) Reset() {
- *x = Status{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Status) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Status) ProtoMessage() {}
-
-func (x *Status) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Status.ProtoReflect.Descriptor instead.
-func (*Status) Descriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *Status) GetCode() uint32 {
- if x != nil {
- return x.Code
- }
- return 0
-}
-
-func (x *Status) GetDetails() string {
- if x != nil {
- return x.Details
- }
- return ""
-}
-
-type GetTlsConfigurationReq struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The role of the application in the TLS connection.
- ConnectionSide common_go_proto.ConnectionSide `protobuf:"varint,1,opt,name=connection_side,json=connectionSide,proto3,enum=s2a.proto.v2.ConnectionSide" json:"connection_side,omitempty"`
- // The server name indication (SNI) extension, which MAY be populated when a
- // server is offloading to S2A. The SNI is used to determine the server
- // identity if the local identity in the request is empty.
- Sni string `protobuf:"bytes,2,opt,name=sni,proto3" json:"sni,omitempty"`
-}
-
-func (x *GetTlsConfigurationReq) Reset() {
- *x = GetTlsConfigurationReq{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetTlsConfigurationReq) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetTlsConfigurationReq) ProtoMessage() {}
-
-func (x *GetTlsConfigurationReq) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetTlsConfigurationReq.ProtoReflect.Descriptor instead.
-func (*GetTlsConfigurationReq) Descriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *GetTlsConfigurationReq) GetConnectionSide() common_go_proto.ConnectionSide {
- if x != nil {
- return x.ConnectionSide
- }
- return common_go_proto.ConnectionSide(0)
-}
-
-func (x *GetTlsConfigurationReq) GetSni() string {
- if x != nil {
- return x.Sni
- }
- return ""
-}
-
-type GetTlsConfigurationResp struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to TlsConfiguration:
- //
- // *GetTlsConfigurationResp_ClientTlsConfiguration_
- // *GetTlsConfigurationResp_ServerTlsConfiguration_
- TlsConfiguration isGetTlsConfigurationResp_TlsConfiguration `protobuf_oneof:"tls_configuration"`
-}
-
-func (x *GetTlsConfigurationResp) Reset() {
- *x = GetTlsConfigurationResp{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetTlsConfigurationResp) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetTlsConfigurationResp) ProtoMessage() {}
-
-func (x *GetTlsConfigurationResp) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetTlsConfigurationResp.ProtoReflect.Descriptor instead.
-func (*GetTlsConfigurationResp) Descriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4}
-}
-
-func (m *GetTlsConfigurationResp) GetTlsConfiguration() isGetTlsConfigurationResp_TlsConfiguration {
- if m != nil {
- return m.TlsConfiguration
- }
- return nil
-}
-
-func (x *GetTlsConfigurationResp) GetClientTlsConfiguration() *GetTlsConfigurationResp_ClientTlsConfiguration {
- if x, ok := x.GetTlsConfiguration().(*GetTlsConfigurationResp_ClientTlsConfiguration_); ok {
- return x.ClientTlsConfiguration
- }
- return nil
-}
-
-func (x *GetTlsConfigurationResp) GetServerTlsConfiguration() *GetTlsConfigurationResp_ServerTlsConfiguration {
- if x, ok := x.GetTlsConfiguration().(*GetTlsConfigurationResp_ServerTlsConfiguration_); ok {
- return x.ServerTlsConfiguration
- }
- return nil
-}
-
-type isGetTlsConfigurationResp_TlsConfiguration interface {
- isGetTlsConfigurationResp_TlsConfiguration()
-}
-
-type GetTlsConfigurationResp_ClientTlsConfiguration_ struct {
- ClientTlsConfiguration *GetTlsConfigurationResp_ClientTlsConfiguration `protobuf:"bytes,1,opt,name=client_tls_configuration,json=clientTlsConfiguration,proto3,oneof"`
-}
-
-type GetTlsConfigurationResp_ServerTlsConfiguration_ struct {
- ServerTlsConfiguration *GetTlsConfigurationResp_ServerTlsConfiguration `protobuf:"bytes,2,opt,name=server_tls_configuration,json=serverTlsConfiguration,proto3,oneof"`
-}
-
-func (*GetTlsConfigurationResp_ClientTlsConfiguration_) isGetTlsConfigurationResp_TlsConfiguration() {
-}
-
-func (*GetTlsConfigurationResp_ServerTlsConfiguration_) isGetTlsConfigurationResp_TlsConfiguration() {
-}
-
-type OffloadPrivateKeyOperationReq struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The operation the private key is used for.
- Operation OffloadPrivateKeyOperationReq_PrivateKeyOperation `protobuf:"varint,1,opt,name=operation,proto3,enum=s2a.proto.v2.OffloadPrivateKeyOperationReq_PrivateKeyOperation" json:"operation,omitempty"`
- // The signature algorithm to be used for signing operations.
- SignatureAlgorithm SignatureAlgorithm `protobuf:"varint,2,opt,name=signature_algorithm,json=signatureAlgorithm,proto3,enum=s2a.proto.v2.SignatureAlgorithm" json:"signature_algorithm,omitempty"`
- // The input bytes to be signed or decrypted.
- //
- // Types that are assignable to InBytes:
- //
- // *OffloadPrivateKeyOperationReq_RawBytes
- // *OffloadPrivateKeyOperationReq_Sha256Digest
- // *OffloadPrivateKeyOperationReq_Sha384Digest
- // *OffloadPrivateKeyOperationReq_Sha512Digest
- InBytes isOffloadPrivateKeyOperationReq_InBytes `protobuf_oneof:"in_bytes"`
-}
-
-func (x *OffloadPrivateKeyOperationReq) Reset() {
- *x = OffloadPrivateKeyOperationReq{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *OffloadPrivateKeyOperationReq) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*OffloadPrivateKeyOperationReq) ProtoMessage() {}
-
-func (x *OffloadPrivateKeyOperationReq) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use OffloadPrivateKeyOperationReq.ProtoReflect.Descriptor instead.
-func (*OffloadPrivateKeyOperationReq) Descriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *OffloadPrivateKeyOperationReq) GetOperation() OffloadPrivateKeyOperationReq_PrivateKeyOperation {
- if x != nil {
- return x.Operation
- }
- return OffloadPrivateKeyOperationReq_UNSPECIFIED
-}
-
-func (x *OffloadPrivateKeyOperationReq) GetSignatureAlgorithm() SignatureAlgorithm {
- if x != nil {
- return x.SignatureAlgorithm
- }
- return SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED
-}
-
-func (m *OffloadPrivateKeyOperationReq) GetInBytes() isOffloadPrivateKeyOperationReq_InBytes {
- if m != nil {
- return m.InBytes
- }
- return nil
-}
-
-func (x *OffloadPrivateKeyOperationReq) GetRawBytes() []byte {
- if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_RawBytes); ok {
- return x.RawBytes
- }
- return nil
-}
-
-func (x *OffloadPrivateKeyOperationReq) GetSha256Digest() []byte {
- if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha256Digest); ok {
- return x.Sha256Digest
- }
- return nil
-}
-
-func (x *OffloadPrivateKeyOperationReq) GetSha384Digest() []byte {
- if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha384Digest); ok {
- return x.Sha384Digest
- }
- return nil
-}
-
-func (x *OffloadPrivateKeyOperationReq) GetSha512Digest() []byte {
- if x, ok := x.GetInBytes().(*OffloadPrivateKeyOperationReq_Sha512Digest); ok {
- return x.Sha512Digest
- }
- return nil
-}
-
-type isOffloadPrivateKeyOperationReq_InBytes interface {
- isOffloadPrivateKeyOperationReq_InBytes()
-}
-
-type OffloadPrivateKeyOperationReq_RawBytes struct {
- // Raw bytes to be hashed and signed, or decrypted.
- RawBytes []byte `protobuf:"bytes,4,opt,name=raw_bytes,json=rawBytes,proto3,oneof"`
-}
-
-type OffloadPrivateKeyOperationReq_Sha256Digest struct {
- // A SHA256 hash to be signed. Must be 32 bytes.
- Sha256Digest []byte `protobuf:"bytes,5,opt,name=sha256_digest,json=sha256Digest,proto3,oneof"`
-}
-
-type OffloadPrivateKeyOperationReq_Sha384Digest struct {
- // A SHA384 hash to be signed. Must be 48 bytes.
- Sha384Digest []byte `protobuf:"bytes,6,opt,name=sha384_digest,json=sha384Digest,proto3,oneof"`
-}
-
-type OffloadPrivateKeyOperationReq_Sha512Digest struct {
- // A SHA512 hash to be signed. Must be 64 bytes.
- Sha512Digest []byte `protobuf:"bytes,7,opt,name=sha512_digest,json=sha512Digest,proto3,oneof"`
-}
-
-func (*OffloadPrivateKeyOperationReq_RawBytes) isOffloadPrivateKeyOperationReq_InBytes() {}
-
-func (*OffloadPrivateKeyOperationReq_Sha256Digest) isOffloadPrivateKeyOperationReq_InBytes() {}
-
-func (*OffloadPrivateKeyOperationReq_Sha384Digest) isOffloadPrivateKeyOperationReq_InBytes() {}
-
-func (*OffloadPrivateKeyOperationReq_Sha512Digest) isOffloadPrivateKeyOperationReq_InBytes() {}
-
-type OffloadPrivateKeyOperationResp struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The signed or decrypted output bytes.
- OutBytes []byte `protobuf:"bytes,1,opt,name=out_bytes,json=outBytes,proto3" json:"out_bytes,omitempty"`
-}
-
-func (x *OffloadPrivateKeyOperationResp) Reset() {
- *x = OffloadPrivateKeyOperationResp{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *OffloadPrivateKeyOperationResp) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*OffloadPrivateKeyOperationResp) ProtoMessage() {}
-
-func (x *OffloadPrivateKeyOperationResp) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use OffloadPrivateKeyOperationResp.ProtoReflect.Descriptor instead.
-func (*OffloadPrivateKeyOperationResp) Descriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *OffloadPrivateKeyOperationResp) GetOutBytes() []byte {
- if x != nil {
- return x.OutBytes
- }
- return nil
-}
-
-type OffloadResumptionKeyOperationReq struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The operation the resumption key is used for.
- Operation OffloadResumptionKeyOperationReq_ResumptionKeyOperation `protobuf:"varint,1,opt,name=operation,proto3,enum=s2a.proto.v2.OffloadResumptionKeyOperationReq_ResumptionKeyOperation" json:"operation,omitempty"`
- // The bytes to be encrypted or decrypted.
- InBytes []byte `protobuf:"bytes,2,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"`
-}
-
-func (x *OffloadResumptionKeyOperationReq) Reset() {
- *x = OffloadResumptionKeyOperationReq{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *OffloadResumptionKeyOperationReq) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*OffloadResumptionKeyOperationReq) ProtoMessage() {}
-
-func (x *OffloadResumptionKeyOperationReq) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use OffloadResumptionKeyOperationReq.ProtoReflect.Descriptor instead.
-func (*OffloadResumptionKeyOperationReq) Descriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *OffloadResumptionKeyOperationReq) GetOperation() OffloadResumptionKeyOperationReq_ResumptionKeyOperation {
- if x != nil {
- return x.Operation
- }
- return OffloadResumptionKeyOperationReq_UNSPECIFIED
-}
-
-func (x *OffloadResumptionKeyOperationReq) GetInBytes() []byte {
- if x != nil {
- return x.InBytes
- }
- return nil
-}
-
-type OffloadResumptionKeyOperationResp struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The encrypted or decrypted bytes.
- OutBytes []byte `protobuf:"bytes,1,opt,name=out_bytes,json=outBytes,proto3" json:"out_bytes,omitempty"`
-}
-
-func (x *OffloadResumptionKeyOperationResp) Reset() {
- *x = OffloadResumptionKeyOperationResp{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *OffloadResumptionKeyOperationResp) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*OffloadResumptionKeyOperationResp) ProtoMessage() {}
-
-func (x *OffloadResumptionKeyOperationResp) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use OffloadResumptionKeyOperationResp.ProtoReflect.Descriptor instead.
-func (*OffloadResumptionKeyOperationResp) Descriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{8}
-}
-
-func (x *OffloadResumptionKeyOperationResp) GetOutBytes() []byte {
- if x != nil {
- return x.OutBytes
- }
- return nil
-}
-
-type ValidatePeerCertificateChainReq struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The verification mode that S2A MUST use to validate the peer certificate
- // chain.
- Mode ValidatePeerCertificateChainReq_VerificationMode `protobuf:"varint,1,opt,name=mode,proto3,enum=s2a.proto.v2.ValidatePeerCertificateChainReq_VerificationMode" json:"mode,omitempty"`
- // Types that are assignable to PeerOneof:
- //
- // *ValidatePeerCertificateChainReq_ClientPeer_
- // *ValidatePeerCertificateChainReq_ServerPeer_
- PeerOneof isValidatePeerCertificateChainReq_PeerOneof `protobuf_oneof:"peer_oneof"`
-}
-
-func (x *ValidatePeerCertificateChainReq) Reset() {
- *x = ValidatePeerCertificateChainReq{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ValidatePeerCertificateChainReq) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ValidatePeerCertificateChainReq) ProtoMessage() {}
-
-func (x *ValidatePeerCertificateChainReq) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ValidatePeerCertificateChainReq.ProtoReflect.Descriptor instead.
-func (*ValidatePeerCertificateChainReq) Descriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9}
-}
-
-func (x *ValidatePeerCertificateChainReq) GetMode() ValidatePeerCertificateChainReq_VerificationMode {
- if x != nil {
- return x.Mode
- }
- return ValidatePeerCertificateChainReq_UNSPECIFIED
-}
-
-func (m *ValidatePeerCertificateChainReq) GetPeerOneof() isValidatePeerCertificateChainReq_PeerOneof {
- if m != nil {
- return m.PeerOneof
- }
- return nil
-}
-
-func (x *ValidatePeerCertificateChainReq) GetClientPeer() *ValidatePeerCertificateChainReq_ClientPeer {
- if x, ok := x.GetPeerOneof().(*ValidatePeerCertificateChainReq_ClientPeer_); ok {
- return x.ClientPeer
- }
- return nil
-}
-
-func (x *ValidatePeerCertificateChainReq) GetServerPeer() *ValidatePeerCertificateChainReq_ServerPeer {
- if x, ok := x.GetPeerOneof().(*ValidatePeerCertificateChainReq_ServerPeer_); ok {
- return x.ServerPeer
- }
- return nil
-}
-
-type isValidatePeerCertificateChainReq_PeerOneof interface {
- isValidatePeerCertificateChainReq_PeerOneof()
-}
-
-type ValidatePeerCertificateChainReq_ClientPeer_ struct {
- ClientPeer *ValidatePeerCertificateChainReq_ClientPeer `protobuf:"bytes,2,opt,name=client_peer,json=clientPeer,proto3,oneof"`
-}
-
-type ValidatePeerCertificateChainReq_ServerPeer_ struct {
- ServerPeer *ValidatePeerCertificateChainReq_ServerPeer `protobuf:"bytes,3,opt,name=server_peer,json=serverPeer,proto3,oneof"`
-}
-
-func (*ValidatePeerCertificateChainReq_ClientPeer_) isValidatePeerCertificateChainReq_PeerOneof() {}
-
-func (*ValidatePeerCertificateChainReq_ServerPeer_) isValidatePeerCertificateChainReq_PeerOneof() {}
-
-type ValidatePeerCertificateChainResp struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The result of validating the peer certificate chain.
- ValidationResult ValidatePeerCertificateChainResp_ValidationResult `protobuf:"varint,1,opt,name=validation_result,json=validationResult,proto3,enum=s2a.proto.v2.ValidatePeerCertificateChainResp_ValidationResult" json:"validation_result,omitempty"`
- // The validation details. This field is only populated when the validation
- // result is NOT SUCCESS.
- ValidationDetails string `protobuf:"bytes,2,opt,name=validation_details,json=validationDetails,proto3" json:"validation_details,omitempty"`
- // The S2A context contains information from the peer certificate chain.
- //
- // The S2A context MAY be populated even if validation of the peer certificate
- // chain fails.
- Context *s2a_context_go_proto.S2AContext `protobuf:"bytes,3,opt,name=context,proto3" json:"context,omitempty"`
-}
-
-func (x *ValidatePeerCertificateChainResp) Reset() {
- *x = ValidatePeerCertificateChainResp{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ValidatePeerCertificateChainResp) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ValidatePeerCertificateChainResp) ProtoMessage() {}
-
-func (x *ValidatePeerCertificateChainResp) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ValidatePeerCertificateChainResp.ProtoReflect.Descriptor instead.
-func (*ValidatePeerCertificateChainResp) Descriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{10}
-}
-
-func (x *ValidatePeerCertificateChainResp) GetValidationResult() ValidatePeerCertificateChainResp_ValidationResult {
- if x != nil {
- return x.ValidationResult
- }
- return ValidatePeerCertificateChainResp_UNSPECIFIED
-}
-
-func (x *ValidatePeerCertificateChainResp) GetValidationDetails() string {
- if x != nil {
- return x.ValidationDetails
- }
- return ""
-}
-
-func (x *ValidatePeerCertificateChainResp) GetContext() *s2a_context_go_proto.S2AContext {
- if x != nil {
- return x.Context
- }
- return nil
-}
-
-type SessionReq struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The identity corresponding to the TLS configurations that MUST be used for
- // the TLS handshake.
- //
- // If a managed identity already exists, the local identity and authentication
- // mechanisms are ignored. If a managed identity doesn't exist and the local
- // identity is not populated, S2A will try to deduce the managed identity to
- // use from the SNI extension. If that also fails, S2A uses the default
- // identity (if one exists).
- LocalIdentity *common_go_proto1.Identity `protobuf:"bytes,1,opt,name=local_identity,json=localIdentity,proto3" json:"local_identity,omitempty"`
- // The authentication mechanisms that the application wishes to use to
- // authenticate to S2A, ordered by preference. S2A will always use the first
- // authentication mechanism that matches the managed identity.
- AuthenticationMechanisms []*AuthenticationMechanism `protobuf:"bytes,2,rep,name=authentication_mechanisms,json=authenticationMechanisms,proto3" json:"authentication_mechanisms,omitempty"`
- // Types that are assignable to ReqOneof:
- //
- // *SessionReq_GetTlsConfigurationReq
- // *SessionReq_OffloadPrivateKeyOperationReq
- // *SessionReq_OffloadResumptionKeyOperationReq
- // *SessionReq_ValidatePeerCertificateChainReq
- ReqOneof isSessionReq_ReqOneof `protobuf_oneof:"req_oneof"`
-}
-
-func (x *SessionReq) Reset() {
- *x = SessionReq{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SessionReq) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SessionReq) ProtoMessage() {}
-
-func (x *SessionReq) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SessionReq.ProtoReflect.Descriptor instead.
-func (*SessionReq) Descriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{11}
-}
-
-func (x *SessionReq) GetLocalIdentity() *common_go_proto1.Identity {
- if x != nil {
- return x.LocalIdentity
- }
- return nil
-}
-
-func (x *SessionReq) GetAuthenticationMechanisms() []*AuthenticationMechanism {
- if x != nil {
- return x.AuthenticationMechanisms
- }
- return nil
-}
-
-func (m *SessionReq) GetReqOneof() isSessionReq_ReqOneof {
- if m != nil {
- return m.ReqOneof
- }
- return nil
-}
-
-func (x *SessionReq) GetGetTlsConfigurationReq() *GetTlsConfigurationReq {
- if x, ok := x.GetReqOneof().(*SessionReq_GetTlsConfigurationReq); ok {
- return x.GetTlsConfigurationReq
- }
- return nil
-}
-
-func (x *SessionReq) GetOffloadPrivateKeyOperationReq() *OffloadPrivateKeyOperationReq {
- if x, ok := x.GetReqOneof().(*SessionReq_OffloadPrivateKeyOperationReq); ok {
- return x.OffloadPrivateKeyOperationReq
- }
- return nil
-}
-
-func (x *SessionReq) GetOffloadResumptionKeyOperationReq() *OffloadResumptionKeyOperationReq {
- if x, ok := x.GetReqOneof().(*SessionReq_OffloadResumptionKeyOperationReq); ok {
- return x.OffloadResumptionKeyOperationReq
- }
- return nil
-}
-
-func (x *SessionReq) GetValidatePeerCertificateChainReq() *ValidatePeerCertificateChainReq {
- if x, ok := x.GetReqOneof().(*SessionReq_ValidatePeerCertificateChainReq); ok {
- return x.ValidatePeerCertificateChainReq
- }
- return nil
-}
-
-type isSessionReq_ReqOneof interface {
- isSessionReq_ReqOneof()
-}
-
-type SessionReq_GetTlsConfigurationReq struct {
- // Requests the certificate chain and TLS configuration corresponding to the
- // local identity, which the application MUST use to negotiate the TLS
- // handshake.
- GetTlsConfigurationReq *GetTlsConfigurationReq `protobuf:"bytes,3,opt,name=get_tls_configuration_req,json=getTlsConfigurationReq,proto3,oneof"`
-}
-
-type SessionReq_OffloadPrivateKeyOperationReq struct {
- // Signs or decrypts the input bytes using a private key corresponding to
- // the local identity in the request.
- //
- // WARNING: More than one OffloadPrivateKeyOperationReq may be sent to the
- // S2Av2 by a server during a TLS 1.2 handshake.
- OffloadPrivateKeyOperationReq *OffloadPrivateKeyOperationReq `protobuf:"bytes,4,opt,name=offload_private_key_operation_req,json=offloadPrivateKeyOperationReq,proto3,oneof"`
-}
-
-type SessionReq_OffloadResumptionKeyOperationReq struct {
- // Encrypts or decrypts the input bytes using a resumption key corresponding
- // to the local identity in the request.
- OffloadResumptionKeyOperationReq *OffloadResumptionKeyOperationReq `protobuf:"bytes,5,opt,name=offload_resumption_key_operation_req,json=offloadResumptionKeyOperationReq,proto3,oneof"`
-}
-
-type SessionReq_ValidatePeerCertificateChainReq struct {
- // Verifies the peer's certificate chain using
- // (a) trust bundles corresponding to the local identity in the request, and
- // (b) the verification mode in the request.
- ValidatePeerCertificateChainReq *ValidatePeerCertificateChainReq `protobuf:"bytes,6,opt,name=validate_peer_certificate_chain_req,json=validatePeerCertificateChainReq,proto3,oneof"`
-}
-
-func (*SessionReq_GetTlsConfigurationReq) isSessionReq_ReqOneof() {}
-
-func (*SessionReq_OffloadPrivateKeyOperationReq) isSessionReq_ReqOneof() {}
-
-func (*SessionReq_OffloadResumptionKeyOperationReq) isSessionReq_ReqOneof() {}
-
-func (*SessionReq_ValidatePeerCertificateChainReq) isSessionReq_ReqOneof() {}
-
-type SessionResp struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Status of the session response.
- //
- // The status field is populated so that if an error occurs when making an
- // individual request, then communication with the S2A may continue. If an
- // error is returned directly (e.g. at the gRPC layer), then it may result
- // that the bidirectional stream being closed.
- Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"`
- // Types that are assignable to RespOneof:
- //
- // *SessionResp_GetTlsConfigurationResp
- // *SessionResp_OffloadPrivateKeyOperationResp
- // *SessionResp_OffloadResumptionKeyOperationResp
- // *SessionResp_ValidatePeerCertificateChainResp
- RespOneof isSessionResp_RespOneof `protobuf_oneof:"resp_oneof"`
-}
-
-func (x *SessionResp) Reset() {
- *x = SessionResp{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *SessionResp) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*SessionResp) ProtoMessage() {}
-
-func (x *SessionResp) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use SessionResp.ProtoReflect.Descriptor instead.
-func (*SessionResp) Descriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{12}
-}
-
-func (x *SessionResp) GetStatus() *Status {
- if x != nil {
- return x.Status
- }
- return nil
-}
-
-func (m *SessionResp) GetRespOneof() isSessionResp_RespOneof {
- if m != nil {
- return m.RespOneof
- }
- return nil
-}
-
-func (x *SessionResp) GetGetTlsConfigurationResp() *GetTlsConfigurationResp {
- if x, ok := x.GetRespOneof().(*SessionResp_GetTlsConfigurationResp); ok {
- return x.GetTlsConfigurationResp
- }
- return nil
-}
-
-func (x *SessionResp) GetOffloadPrivateKeyOperationResp() *OffloadPrivateKeyOperationResp {
- if x, ok := x.GetRespOneof().(*SessionResp_OffloadPrivateKeyOperationResp); ok {
- return x.OffloadPrivateKeyOperationResp
- }
- return nil
-}
-
-func (x *SessionResp) GetOffloadResumptionKeyOperationResp() *OffloadResumptionKeyOperationResp {
- if x, ok := x.GetRespOneof().(*SessionResp_OffloadResumptionKeyOperationResp); ok {
- return x.OffloadResumptionKeyOperationResp
- }
- return nil
-}
-
-func (x *SessionResp) GetValidatePeerCertificateChainResp() *ValidatePeerCertificateChainResp {
- if x, ok := x.GetRespOneof().(*SessionResp_ValidatePeerCertificateChainResp); ok {
- return x.ValidatePeerCertificateChainResp
- }
- return nil
-}
-
-type isSessionResp_RespOneof interface {
- isSessionResp_RespOneof()
-}
-
-type SessionResp_GetTlsConfigurationResp struct {
- // Contains the certificate chain and TLS configurations corresponding to
- // the local identity.
- GetTlsConfigurationResp *GetTlsConfigurationResp `protobuf:"bytes,2,opt,name=get_tls_configuration_resp,json=getTlsConfigurationResp,proto3,oneof"`
-}
-
-type SessionResp_OffloadPrivateKeyOperationResp struct {
- // Contains the signed or encrypted output bytes using the private key
- // corresponding to the local identity.
- OffloadPrivateKeyOperationResp *OffloadPrivateKeyOperationResp `protobuf:"bytes,3,opt,name=offload_private_key_operation_resp,json=offloadPrivateKeyOperationResp,proto3,oneof"`
-}
-
-type SessionResp_OffloadResumptionKeyOperationResp struct {
- // Contains the encrypted or decrypted output bytes using the resumption key
- // corresponding to the local identity.
- OffloadResumptionKeyOperationResp *OffloadResumptionKeyOperationResp `protobuf:"bytes,4,opt,name=offload_resumption_key_operation_resp,json=offloadResumptionKeyOperationResp,proto3,oneof"`
-}
-
-type SessionResp_ValidatePeerCertificateChainResp struct {
- // Contains the validation result, peer identity and fingerprints of peer
- // certificates.
- ValidatePeerCertificateChainResp *ValidatePeerCertificateChainResp `protobuf:"bytes,5,opt,name=validate_peer_certificate_chain_resp,json=validatePeerCertificateChainResp,proto3,oneof"`
-}
-
-func (*SessionResp_GetTlsConfigurationResp) isSessionResp_RespOneof() {}
-
-func (*SessionResp_OffloadPrivateKeyOperationResp) isSessionResp_RespOneof() {}
-
-func (*SessionResp_OffloadResumptionKeyOperationResp) isSessionResp_RespOneof() {}
-
-func (*SessionResp_ValidatePeerCertificateChainResp) isSessionResp_RespOneof() {}
-
-// Next ID: 8
-type GetTlsConfigurationResp_ClientTlsConfiguration struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The certificate chain that the client MUST use for the TLS handshake.
- // It's a list of PEM-encoded certificates, ordered from leaf to root,
- // excluding the root.
- CertificateChain []string `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"`
- // The minimum TLS version number that the client MUST use for the TLS
- // handshake. If this field is not provided, the client MUST use the default
- // minimum version of the client's TLS library.
- MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"min_tls_version,omitempty"`
- // The maximum TLS version number that the client MUST use for the TLS
- // handshake. If this field is not provided, the client MUST use the default
- // maximum version of the client's TLS library.
- MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"max_tls_version,omitempty"`
- // The ordered list of TLS 1.0-1.2 ciphersuites that the client MAY offer to
- // negotiate in the TLS handshake.
- Ciphersuites []common_go_proto.Ciphersuite `protobuf:"varint,6,rep,packed,name=ciphersuites,proto3,enum=s2a.proto.v2.Ciphersuite" json:"ciphersuites,omitempty"`
- // The policy that dictates how the client negotiates ALPN during the TLS
- // handshake.
- AlpnPolicy *AlpnPolicy `protobuf:"bytes,7,opt,name=alpn_policy,json=alpnPolicy,proto3" json:"alpn_policy,omitempty"`
-}
-
-func (x *GetTlsConfigurationResp_ClientTlsConfiguration) Reset() {
- *x = GetTlsConfigurationResp_ClientTlsConfiguration{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetTlsConfigurationResp_ClientTlsConfiguration) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetTlsConfigurationResp_ClientTlsConfiguration) ProtoMessage() {}
-
-func (x *GetTlsConfigurationResp_ClientTlsConfiguration) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetTlsConfigurationResp_ClientTlsConfiguration.ProtoReflect.Descriptor instead.
-func (*GetTlsConfigurationResp_ClientTlsConfiguration) Descriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 0}
-}
-
-func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetCertificateChain() []string {
- if x != nil {
- return x.CertificateChain
- }
- return nil
-}
-
-func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetMinTlsVersion() common_go_proto.TLSVersion {
- if x != nil {
- return x.MinTlsVersion
- }
- return common_go_proto.TLSVersion(0)
-}
-
-func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetMaxTlsVersion() common_go_proto.TLSVersion {
- if x != nil {
- return x.MaxTlsVersion
- }
- return common_go_proto.TLSVersion(0)
-}
-
-func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetCiphersuites() []common_go_proto.Ciphersuite {
- if x != nil {
- return x.Ciphersuites
- }
- return nil
-}
-
-func (x *GetTlsConfigurationResp_ClientTlsConfiguration) GetAlpnPolicy() *AlpnPolicy {
- if x != nil {
- return x.AlpnPolicy
- }
- return nil
-}
-
-// Next ID: 12
-type GetTlsConfigurationResp_ServerTlsConfiguration struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The certificate chain that the server MUST use for the TLS handshake.
- // It's a list of PEM-encoded certificates, ordered from leaf to root,
- // excluding the root.
- CertificateChain []string `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"`
- // The minimum TLS version number that the server MUST use for the TLS
- // handshake. If this field is not provided, the server MUST use the default
- // minimum version of the server's TLS library.
- MinTlsVersion common_go_proto.TLSVersion `protobuf:"varint,2,opt,name=min_tls_version,json=minTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"min_tls_version,omitempty"`
- // The maximum TLS version number that the server MUST use for the TLS
- // handshake. If this field is not provided, the server MUST use the default
- // maximum version of the server's TLS library.
- MaxTlsVersion common_go_proto.TLSVersion `protobuf:"varint,3,opt,name=max_tls_version,json=maxTlsVersion,proto3,enum=s2a.proto.v2.TLSVersion" json:"max_tls_version,omitempty"`
- // The ordered list of TLS 1.0-1.2 ciphersuites that the server MAY offer to
- // negotiate in the TLS handshake.
- Ciphersuites []common_go_proto.Ciphersuite `protobuf:"varint,10,rep,packed,name=ciphersuites,proto3,enum=s2a.proto.v2.Ciphersuite" json:"ciphersuites,omitempty"`
- // Whether to enable TLS resumption.
- TlsResumptionEnabled bool `protobuf:"varint,6,opt,name=tls_resumption_enabled,json=tlsResumptionEnabled,proto3" json:"tls_resumption_enabled,omitempty"`
- // Whether the server MUST request a client certificate (i.e. to negotiate
- // TLS vs. mTLS).
- RequestClientCertificate GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate `protobuf:"varint,7,opt,name=request_client_certificate,json=requestClientCertificate,proto3,enum=s2a.proto.v2.GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate" json:"request_client_certificate,omitempty"`
- // Returns the maximum number of extra bytes that
- // |OffloadResumptionKeyOperation| can add to the number of unencrypted
- // bytes to form the encrypted bytes.
- MaxOverheadOfTicketAead uint32 `protobuf:"varint,9,opt,name=max_overhead_of_ticket_aead,json=maxOverheadOfTicketAead,proto3" json:"max_overhead_of_ticket_aead,omitempty"`
- // The policy that dictates how the server negotiates ALPN during the TLS
- // handshake.
- AlpnPolicy *AlpnPolicy `protobuf:"bytes,11,opt,name=alpn_policy,json=alpnPolicy,proto3" json:"alpn_policy,omitempty"`
-}
-
-func (x *GetTlsConfigurationResp_ServerTlsConfiguration) Reset() {
- *x = GetTlsConfigurationResp_ServerTlsConfiguration{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetTlsConfigurationResp_ServerTlsConfiguration) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetTlsConfigurationResp_ServerTlsConfiguration) ProtoMessage() {}
-
-func (x *GetTlsConfigurationResp_ServerTlsConfiguration) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetTlsConfigurationResp_ServerTlsConfiguration.ProtoReflect.Descriptor instead.
-func (*GetTlsConfigurationResp_ServerTlsConfiguration) Descriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{4, 1}
-}
-
-func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetCertificateChain() []string {
- if x != nil {
- return x.CertificateChain
- }
- return nil
-}
-
-func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMinTlsVersion() common_go_proto.TLSVersion {
- if x != nil {
- return x.MinTlsVersion
- }
- return common_go_proto.TLSVersion(0)
-}
-
-func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMaxTlsVersion() common_go_proto.TLSVersion {
- if x != nil {
- return x.MaxTlsVersion
- }
- return common_go_proto.TLSVersion(0)
-}
-
-func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetCiphersuites() []common_go_proto.Ciphersuite {
- if x != nil {
- return x.Ciphersuites
- }
- return nil
-}
-
-func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetTlsResumptionEnabled() bool {
- if x != nil {
- return x.TlsResumptionEnabled
- }
- return false
-}
-
-func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetRequestClientCertificate() GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate {
- if x != nil {
- return x.RequestClientCertificate
- }
- return GetTlsConfigurationResp_ServerTlsConfiguration_UNSPECIFIED
-}
-
-func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetMaxOverheadOfTicketAead() uint32 {
- if x != nil {
- return x.MaxOverheadOfTicketAead
- }
- return 0
-}
-
-func (x *GetTlsConfigurationResp_ServerTlsConfiguration) GetAlpnPolicy() *AlpnPolicy {
- if x != nil {
- return x.AlpnPolicy
- }
- return nil
-}
-
-type ValidatePeerCertificateChainReq_ClientPeer struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The certificate chain to be verified. The chain MUST be a list of
- // DER-encoded certificates, ordered from leaf to root, excluding the root.
- CertificateChain [][]byte `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"`
-}
-
-func (x *ValidatePeerCertificateChainReq_ClientPeer) Reset() {
- *x = ValidatePeerCertificateChainReq_ClientPeer{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ValidatePeerCertificateChainReq_ClientPeer) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ValidatePeerCertificateChainReq_ClientPeer) ProtoMessage() {}
-
-func (x *ValidatePeerCertificateChainReq_ClientPeer) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ValidatePeerCertificateChainReq_ClientPeer.ProtoReflect.Descriptor instead.
-func (*ValidatePeerCertificateChainReq_ClientPeer) Descriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 0}
-}
-
-func (x *ValidatePeerCertificateChainReq_ClientPeer) GetCertificateChain() [][]byte {
- if x != nil {
- return x.CertificateChain
- }
- return nil
-}
-
-type ValidatePeerCertificateChainReq_ServerPeer struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The certificate chain to be verified. The chain MUST be a list of
- // DER-encoded certificates, ordered from leaf to root, excluding the root.
- CertificateChain [][]byte `protobuf:"bytes,1,rep,name=certificate_chain,json=certificateChain,proto3" json:"certificate_chain,omitempty"`
- // The expected hostname of the server.
- ServerHostname string `protobuf:"bytes,2,opt,name=server_hostname,json=serverHostname,proto3" json:"server_hostname,omitempty"`
- // The UnrestrictedClientPolicy specified by the user.
- SerializedUnrestrictedClientPolicy []byte `protobuf:"bytes,3,opt,name=serialized_unrestricted_client_policy,json=serializedUnrestrictedClientPolicy,proto3" json:"serialized_unrestricted_client_policy,omitempty"`
-}
-
-func (x *ValidatePeerCertificateChainReq_ServerPeer) Reset() {
- *x = ValidatePeerCertificateChainReq_ServerPeer{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ValidatePeerCertificateChainReq_ServerPeer) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ValidatePeerCertificateChainReq_ServerPeer) ProtoMessage() {}
-
-func (x *ValidatePeerCertificateChainReq_ServerPeer) ProtoReflect() protoreflect.Message {
- mi := &file_internal_proto_v2_s2a_s2a_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ValidatePeerCertificateChainReq_ServerPeer.ProtoReflect.Descriptor instead.
-func (*ValidatePeerCertificateChainReq_ServerPeer) Descriptor() ([]byte, []int) {
- return file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP(), []int{9, 1}
-}
-
-func (x *ValidatePeerCertificateChainReq_ServerPeer) GetCertificateChain() [][]byte {
- if x != nil {
- return x.CertificateChain
- }
- return nil
-}
-
-func (x *ValidatePeerCertificateChainReq_ServerPeer) GetServerHostname() string {
- if x != nil {
- return x.ServerHostname
- }
- return ""
-}
-
-func (x *ValidatePeerCertificateChainReq_ServerPeer) GetSerializedUnrestrictedClientPolicy() []byte {
- if x != nil {
- return x.SerializedUnrestrictedClientPolicy
- }
- return nil
-}
-
-var File_internal_proto_v2_s2a_s2a_proto protoreflect.FileDescriptor
-
-var file_internal_proto_v2_s2a_s2a_proto_rawDesc = []byte{
- 0x0a, 0x1f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x2f, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x12, 0x0c, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x1a,
- 0x22, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
- 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f,
- 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65,
- 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32,
- 0x61, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x63, 0x6f,
- 0x6e, 0x74, 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x0a,
- 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e,
- 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x6e, 0x65, 0x67, 0x6f, 0x74, 0x69,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61,
- 0x62, 0x6c, 0x65, 0x41, 0x6c, 0x70, 0x6e, 0x4e, 0x65, 0x67, 0x6f, 0x74, 0x69, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0e, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x63, 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x73, 0x32, 0x61,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x0d, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x72, 0x6f, 0x74,
- 0x6f, 0x63, 0x6f, 0x6c, 0x73, 0x22, 0x75, 0x0a, 0x17, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d,
- 0x12, 0x2f, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49,
- 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74,
- 0x79, 0x12, 0x16, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
- 0x48, 0x00, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x6d, 0x65, 0x63,
- 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x36, 0x0a, 0x06,
- 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65,
- 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74,
- 0x61, 0x69, 0x6c, 0x73, 0x22, 0x71, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f,
- 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x45,
- 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x69, 0x64,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f,
- 0x6e, 0x53, 0x69, 0x64, 0x65, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f,
- 0x6e, 0x53, 0x69, 0x64, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x6e, 0x69, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x03, 0x73, 0x6e, 0x69, 0x22, 0xf1, 0x0b, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54,
- 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x65, 0x73, 0x70, 0x12, 0x78, 0x0a, 0x18, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x6c,
- 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x43, 0x6c, 0x69,
- 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x16, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x54, 0x6c, 0x73,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78, 0x0a,
- 0x18, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x3c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47,
- 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52,
- 0x16, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xcf, 0x02, 0x0a, 0x16, 0x43, 0x6c, 0x69, 0x65,
- 0x6e, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
- 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x63,
- 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12,
- 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72,
- 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72,
- 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73,
- 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69,
- 0x74, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73,
- 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75, 0x69, 0x74,
- 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63,
- 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63,
- 0x79, 0x52, 0x0a, 0x61, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4a, 0x04, 0x08,
- 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x1a, 0xfa, 0x06, 0x0a, 0x16, 0x53, 0x65,
- 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63,
- 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69,
- 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72,
- 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56, 0x65, 0x72,
- 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x73,
- 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76,
- 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x73,
- 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x4c, 0x53, 0x56,
- 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x54, 0x6c, 0x73, 0x56, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73,
- 0x75, 0x69, 0x74, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x73, 0x32,
- 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x69, 0x70, 0x68, 0x65,
- 0x72, 0x73, 0x75, 0x69, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x69, 0x70, 0x68, 0x65, 0x72, 0x73, 0x75,
- 0x69, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x74, 0x6c, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x75,
- 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x06,
- 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x74, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x93, 0x01, 0x0a, 0x1a, 0x72,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x65,
- 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32,
- 0x55, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47,
- 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x54, 0x6c, 0x73,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69,
- 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43,
- 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65,
- 0x12, 0x3c, 0x0a, 0x1b, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x68, 0x65, 0x61, 0x64,
- 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x61, 0x65, 0x61, 0x64, 0x18,
- 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x4f, 0x76, 0x65, 0x72, 0x68, 0x65,
- 0x61, 0x64, 0x4f, 0x66, 0x54, 0x69, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x65, 0x61, 0x64, 0x12, 0x39,
- 0x0a, 0x0b, 0x61, 0x6c, 0x70, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0b, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
- 0x76, 0x32, 0x2e, 0x41, 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0a, 0x61,
- 0x6c, 0x70, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x9e, 0x02, 0x0a, 0x18, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x65, 0x72, 0x74, 0x69,
- 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
- 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x23, 0x0a, 0x1f, 0x44, 0x4f, 0x4e, 0x54, 0x5f,
- 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43,
- 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x2e, 0x0a, 0x2a,
- 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43,
- 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44,
- 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x12, 0x29, 0x0a, 0x25,
- 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43,
- 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x56,
- 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x03, 0x12, 0x3a, 0x0a, 0x36, 0x52, 0x45, 0x51, 0x55, 0x45,
- 0x53, 0x54, 0x5f, 0x41, 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43,
- 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54,
- 0x45, 0x5f, 0x42, 0x55, 0x54, 0x5f, 0x44, 0x4f, 0x4e, 0x54, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46,
- 0x59, 0x10, 0x04, 0x12, 0x35, 0x0a, 0x31, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x5f, 0x41,
- 0x4e, 0x44, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e,
- 0x54, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x41, 0x4e,
- 0x44, 0x5f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05,
- 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x42, 0x13, 0x0a, 0x11, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f,
- 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb0, 0x03, 0x0a, 0x1d,
- 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65,
- 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x5d, 0x0a,
- 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e,
- 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e,
- 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65,
- 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x50, 0x72,
- 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x51, 0x0a, 0x13,
- 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69,
- 0x74, 0x68, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x73, 0x32, 0x61, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x52, 0x12, 0x73, 0x69, 0x67,
- 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12,
- 0x1d, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01,
- 0x28, 0x0c, 0x48, 0x00, 0x52, 0x08, 0x72, 0x61, 0x77, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x25,
- 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x44,
- 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d, 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x5f,
- 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c,
- 0x73, 0x68, 0x61, 0x33, 0x38, 0x34, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0d,
- 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x07, 0x20,
- 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x68, 0x61, 0x35, 0x31, 0x32, 0x44, 0x69, 0x67,
- 0x65, 0x73, 0x74, 0x22, 0x3d, 0x0a, 0x13, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65,
- 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e,
- 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53,
- 0x49, 0x47, 0x4e, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x43, 0x52, 0x59, 0x50, 0x54,
- 0x10, 0x02, 0x42, 0x0a, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x3d,
- 0x0a, 0x1e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65,
- 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70,
- 0x12, 0x1b, 0x0a, 0x09, 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe7, 0x01,
- 0x0a, 0x20, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x65, 0x71, 0x12, 0x63, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x45, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75,
- 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79,
- 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74,
- 0x65, 0x73, 0x22, 0x43, 0x0a, 0x16, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0f, 0x0a, 0x0b,
- 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a,
- 0x07, 0x45, 0x4e, 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45,
- 0x43, 0x52, 0x59, 0x50, 0x54, 0x10, 0x02, 0x22, 0x40, 0x0a, 0x21, 0x4f, 0x66, 0x66, 0x6c, 0x6f,
- 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f,
- 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1b, 0x0a, 0x09,
- 0x6f, 0x75, 0x74, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
- 0x08, 0x6f, 0x75, 0x74, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xf8, 0x04, 0x0a, 0x1f, 0x56, 0x61,
- 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66,
- 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x52, 0x0a,
- 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x73, 0x32,
- 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64,
- 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
- 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64,
- 0x65, 0x12, 0x5b, 0x0a, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x65, 0x72,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65,
- 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61,
- 0x69, 0x6e, 0x52, 0x65, 0x71, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72,
- 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x5b,
- 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
- 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43,
- 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52,
- 0x65, 0x71, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x48, 0x00, 0x52,
- 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x50, 0x65, 0x65, 0x72, 0x1a, 0x39, 0x0a, 0x0a, 0x43,
- 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72,
- 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x0c, 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
- 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x1a, 0xb5, 0x01, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65,
- 0x72, 0x50, 0x65, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69,
- 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c,
- 0x52, 0x10, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61,
- 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x68, 0x6f, 0x73,
- 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x72,
- 0x76, 0x65, 0x72, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x25, 0x73,
- 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x75, 0x6e, 0x72, 0x65, 0x73, 0x74,
- 0x72, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x6f,
- 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x73, 0x65, 0x72, 0x69,
- 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x55, 0x6e, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74,
- 0x65, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x46,
- 0x0a, 0x10, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f,
- 0x64, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
- 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x10, 0x01, 0x12,
- 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, 0x54, 0x4f, 0x5f, 0x47, 0x4f,
- 0x4f, 0x47, 0x4c, 0x45, 0x10, 0x02, 0x42, 0x0c, 0x0a, 0x0a, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6f,
- 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xb2, 0x02, 0x0a, 0x20, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
- 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65,
- 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x6c, 0x0a, 0x11, 0x76, 0x61, 0x6c,
- 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72,
- 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e,
- 0x52, 0x65, 0x73, 0x70, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x10, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x76, 0x61, 0x6c, 0x69, 0x64,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x11, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44,
- 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x32, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78,
- 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x32, 0x41, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78,
- 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x3d, 0x0a, 0x10, 0x56, 0x61,
- 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x0f,
- 0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12,
- 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07,
- 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x02, 0x22, 0x97, 0x05, 0x0a, 0x0a, 0x53, 0x65,
- 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x3a, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61,
- 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x13, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x64, 0x65,
- 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e,
- 0x74, 0x69, 0x74, 0x79, 0x12, 0x62, 0x0a, 0x19, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d,
- 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x52, 0x18,
- 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65,
- 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x73, 0x12, 0x61, 0x0a, 0x19, 0x67, 0x65, 0x74, 0x5f,
- 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x73, 0x32,
- 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c,
- 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65,
- 0x71, 0x48, 0x00, 0x52, 0x16, 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x77, 0x0a, 0x21, 0x6f,
- 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b,
- 0x65, 0x79, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69,
- 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1d, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72,
- 0x69, 0x76, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x65, 0x71, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64,
- 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f,
- 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
- 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x20, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65,
- 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x12, 0x7d, 0x0a, 0x23, 0x76, 0x61, 0x6c, 0x69, 0x64,
- 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69,
- 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x71, 0x18, 0x06,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72,
- 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e,
- 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x1f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50,
- 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43, 0x68,
- 0x61, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e,
- 0x65, 0x6f, 0x66, 0x22, 0xb4, 0x04, 0x0a, 0x0b, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52,
- 0x65, 0x73, 0x70, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
- 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x12, 0x64, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x17,
- 0x67, 0x65, 0x74, 0x54, 0x6c, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x7a, 0x0a, 0x22, 0x6f, 0x66, 0x66, 0x6c, 0x6f,
- 0x61, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f,
- 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
- 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74,
- 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73,
- 0x70, 0x48, 0x00, 0x52, 0x1e, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x72, 0x69, 0x76,
- 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x65, 0x73, 0x70, 0x12, 0x83, 0x01, 0x0a, 0x25, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x5f,
- 0x72, 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6f,
- 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
- 0x76, 0x32, 0x2e, 0x4f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x21, 0x6f, 0x66, 0x66, 0x6c, 0x6f, 0x61, 0x64, 0x52,
- 0x65, 0x73, 0x75, 0x6d, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x4f, 0x70, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x12, 0x80, 0x01, 0x0a, 0x24, 0x76, 0x61,
- 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x65, 0x72, 0x74,
- 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x72, 0x65,
- 0x73, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
- 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x43,
- 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x48, 0x00, 0x52, 0x20, 0x76, 0x61, 0x6c, 0x69,
- 0x64, 0x61, 0x74, 0x65, 0x50, 0x65, 0x65, 0x72, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63,
- 0x61, 0x74, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x42, 0x0c, 0x0a, 0x0a,
- 0x72, 0x65, 0x73, 0x70, 0x5f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x2a, 0xa2, 0x03, 0x0a, 0x12, 0x53,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68,
- 0x6d, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47,
- 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12,
- 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f,
- 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36,
- 0x10, 0x01, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49,
- 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f, 0x53, 0x48, 0x41,
- 0x33, 0x38, 0x34, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c,
- 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x4b, 0x43, 0x53, 0x31, 0x5f,
- 0x53, 0x48, 0x41, 0x35, 0x31, 0x32, 0x10, 0x03, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f,
- 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53,
- 0x45, 0x43, 0x50, 0x32, 0x35, 0x36, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10,
- 0x04, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47,
- 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41, 0x5f, 0x53, 0x45, 0x43, 0x50, 0x33, 0x38, 0x34, 0x52,
- 0x31, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x05, 0x12, 0x27, 0x0a, 0x23, 0x53, 0x32,
- 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x43, 0x44, 0x53, 0x41,
- 0x5f, 0x53, 0x45, 0x43, 0x50, 0x35, 0x32, 0x31, 0x52, 0x31, 0x5f, 0x53, 0x48, 0x41, 0x35, 0x31,
- 0x32, 0x10, 0x06, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53,
- 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45,
- 0x5f, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36, 0x10, 0x07, 0x12, 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41,
- 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53,
- 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41, 0x33, 0x38, 0x34, 0x10, 0x08, 0x12,
- 0x24, 0x0a, 0x20, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f,
- 0x52, 0x53, 0x41, 0x5f, 0x50, 0x53, 0x53, 0x5f, 0x52, 0x53, 0x41, 0x45, 0x5f, 0x53, 0x48, 0x41,
- 0x35, 0x31, 0x32, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x32, 0x41, 0x5f, 0x53, 0x53, 0x4c,
- 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x5f, 0x45, 0x44, 0x32, 0x35, 0x35, 0x31, 0x39, 0x10, 0x0a, 0x32,
- 0x57, 0x0a, 0x0a, 0x53, 0x32, 0x41, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x49, 0x0a,
- 0x0c, 0x53, 0x65, 0x74, 0x55, 0x70, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x2e,
- 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73,
- 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x1a, 0x19, 0x2e, 0x73, 0x32, 0x61, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65,
- 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x36, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68,
- 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x73, 0x32,
- 0x61, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x32, 0x61, 0x5f, 0x67, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_internal_proto_v2_s2a_s2a_proto_rawDescOnce sync.Once
- file_internal_proto_v2_s2a_s2a_proto_rawDescData = file_internal_proto_v2_s2a_s2a_proto_rawDesc
-)
-
-func file_internal_proto_v2_s2a_s2a_proto_rawDescGZIP() []byte {
- file_internal_proto_v2_s2a_s2a_proto_rawDescOnce.Do(func() {
- file_internal_proto_v2_s2a_s2a_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_proto_v2_s2a_s2a_proto_rawDescData)
- })
- return file_internal_proto_v2_s2a_s2a_proto_rawDescData
-}
-
-var file_internal_proto_v2_s2a_s2a_proto_enumTypes = make([]protoimpl.EnumInfo, 6)
-var file_internal_proto_v2_s2a_s2a_proto_msgTypes = make([]protoimpl.MessageInfo, 17)
-var file_internal_proto_v2_s2a_s2a_proto_goTypes = []interface{}{
- (SignatureAlgorithm)(0), // 0: s2a.proto.v2.SignatureAlgorithm
- (GetTlsConfigurationResp_ServerTlsConfiguration_RequestClientCertificate)(0), // 1: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate
- (OffloadPrivateKeyOperationReq_PrivateKeyOperation)(0), // 2: s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation
- (OffloadResumptionKeyOperationReq_ResumptionKeyOperation)(0), // 3: s2a.proto.v2.OffloadResumptionKeyOperationReq.ResumptionKeyOperation
- (ValidatePeerCertificateChainReq_VerificationMode)(0), // 4: s2a.proto.v2.ValidatePeerCertificateChainReq.VerificationMode
- (ValidatePeerCertificateChainResp_ValidationResult)(0), // 5: s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult
- (*AlpnPolicy)(nil), // 6: s2a.proto.v2.AlpnPolicy
- (*AuthenticationMechanism)(nil), // 7: s2a.proto.v2.AuthenticationMechanism
- (*Status)(nil), // 8: s2a.proto.v2.Status
- (*GetTlsConfigurationReq)(nil), // 9: s2a.proto.v2.GetTlsConfigurationReq
- (*GetTlsConfigurationResp)(nil), // 10: s2a.proto.v2.GetTlsConfigurationResp
- (*OffloadPrivateKeyOperationReq)(nil), // 11: s2a.proto.v2.OffloadPrivateKeyOperationReq
- (*OffloadPrivateKeyOperationResp)(nil), // 12: s2a.proto.v2.OffloadPrivateKeyOperationResp
- (*OffloadResumptionKeyOperationReq)(nil), // 13: s2a.proto.v2.OffloadResumptionKeyOperationReq
- (*OffloadResumptionKeyOperationResp)(nil), // 14: s2a.proto.v2.OffloadResumptionKeyOperationResp
- (*ValidatePeerCertificateChainReq)(nil), // 15: s2a.proto.v2.ValidatePeerCertificateChainReq
- (*ValidatePeerCertificateChainResp)(nil), // 16: s2a.proto.v2.ValidatePeerCertificateChainResp
- (*SessionReq)(nil), // 17: s2a.proto.v2.SessionReq
- (*SessionResp)(nil), // 18: s2a.proto.v2.SessionResp
- (*GetTlsConfigurationResp_ClientTlsConfiguration)(nil), // 19: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration
- (*GetTlsConfigurationResp_ServerTlsConfiguration)(nil), // 20: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration
- (*ValidatePeerCertificateChainReq_ClientPeer)(nil), // 21: s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer
- (*ValidatePeerCertificateChainReq_ServerPeer)(nil), // 22: s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer
- (common_go_proto.AlpnProtocol)(0), // 23: s2a.proto.v2.AlpnProtocol
- (*common_go_proto1.Identity)(nil), // 24: s2a.proto.Identity
- (common_go_proto.ConnectionSide)(0), // 25: s2a.proto.v2.ConnectionSide
- (*s2a_context_go_proto.S2AContext)(nil), // 26: s2a.proto.v2.S2AContext
- (common_go_proto.TLSVersion)(0), // 27: s2a.proto.v2.TLSVersion
- (common_go_proto.Ciphersuite)(0), // 28: s2a.proto.v2.Ciphersuite
-}
-var file_internal_proto_v2_s2a_s2a_proto_depIdxs = []int32{
- 23, // 0: s2a.proto.v2.AlpnPolicy.alpn_protocols:type_name -> s2a.proto.v2.AlpnProtocol
- 24, // 1: s2a.proto.v2.AuthenticationMechanism.identity:type_name -> s2a.proto.Identity
- 25, // 2: s2a.proto.v2.GetTlsConfigurationReq.connection_side:type_name -> s2a.proto.v2.ConnectionSide
- 19, // 3: s2a.proto.v2.GetTlsConfigurationResp.client_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration
- 20, // 4: s2a.proto.v2.GetTlsConfigurationResp.server_tls_configuration:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration
- 2, // 5: s2a.proto.v2.OffloadPrivateKeyOperationReq.operation:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq.PrivateKeyOperation
- 0, // 6: s2a.proto.v2.OffloadPrivateKeyOperationReq.signature_algorithm:type_name -> s2a.proto.v2.SignatureAlgorithm
- 3, // 7: s2a.proto.v2.OffloadResumptionKeyOperationReq.operation:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationReq.ResumptionKeyOperation
- 4, // 8: s2a.proto.v2.ValidatePeerCertificateChainReq.mode:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.VerificationMode
- 21, // 9: s2a.proto.v2.ValidatePeerCertificateChainReq.client_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ClientPeer
- 22, // 10: s2a.proto.v2.ValidatePeerCertificateChainReq.server_peer:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq.ServerPeer
- 5, // 11: s2a.proto.v2.ValidatePeerCertificateChainResp.validation_result:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp.ValidationResult
- 26, // 12: s2a.proto.v2.ValidatePeerCertificateChainResp.context:type_name -> s2a.proto.v2.S2AContext
- 24, // 13: s2a.proto.v2.SessionReq.local_identity:type_name -> s2a.proto.Identity
- 7, // 14: s2a.proto.v2.SessionReq.authentication_mechanisms:type_name -> s2a.proto.v2.AuthenticationMechanism
- 9, // 15: s2a.proto.v2.SessionReq.get_tls_configuration_req:type_name -> s2a.proto.v2.GetTlsConfigurationReq
- 11, // 16: s2a.proto.v2.SessionReq.offload_private_key_operation_req:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationReq
- 13, // 17: s2a.proto.v2.SessionReq.offload_resumption_key_operation_req:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationReq
- 15, // 18: s2a.proto.v2.SessionReq.validate_peer_certificate_chain_req:type_name -> s2a.proto.v2.ValidatePeerCertificateChainReq
- 8, // 19: s2a.proto.v2.SessionResp.status:type_name -> s2a.proto.v2.Status
- 10, // 20: s2a.proto.v2.SessionResp.get_tls_configuration_resp:type_name -> s2a.proto.v2.GetTlsConfigurationResp
- 12, // 21: s2a.proto.v2.SessionResp.offload_private_key_operation_resp:type_name -> s2a.proto.v2.OffloadPrivateKeyOperationResp
- 14, // 22: s2a.proto.v2.SessionResp.offload_resumption_key_operation_resp:type_name -> s2a.proto.v2.OffloadResumptionKeyOperationResp
- 16, // 23: s2a.proto.v2.SessionResp.validate_peer_certificate_chain_resp:type_name -> s2a.proto.v2.ValidatePeerCertificateChainResp
- 27, // 24: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.min_tls_version:type_name -> s2a.proto.v2.TLSVersion
- 27, // 25: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.max_tls_version:type_name -> s2a.proto.v2.TLSVersion
- 28, // 26: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.ciphersuites:type_name -> s2a.proto.v2.Ciphersuite
- 6, // 27: s2a.proto.v2.GetTlsConfigurationResp.ClientTlsConfiguration.alpn_policy:type_name -> s2a.proto.v2.AlpnPolicy
- 27, // 28: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.min_tls_version:type_name -> s2a.proto.v2.TLSVersion
- 27, // 29: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.max_tls_version:type_name -> s2a.proto.v2.TLSVersion
- 28, // 30: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.ciphersuites:type_name -> s2a.proto.v2.Ciphersuite
- 1, // 31: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.request_client_certificate:type_name -> s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.RequestClientCertificate
- 6, // 32: s2a.proto.v2.GetTlsConfigurationResp.ServerTlsConfiguration.alpn_policy:type_name -> s2a.proto.v2.AlpnPolicy
- 17, // 33: s2a.proto.v2.S2AService.SetUpSession:input_type -> s2a.proto.v2.SessionReq
- 18, // 34: s2a.proto.v2.S2AService.SetUpSession:output_type -> s2a.proto.v2.SessionResp
- 34, // [34:35] is the sub-list for method output_type
- 33, // [33:34] is the sub-list for method input_type
- 33, // [33:33] is the sub-list for extension type_name
- 33, // [33:33] is the sub-list for extension extendee
- 0, // [0:33] is the sub-list for field type_name
-}
-
-func init() { file_internal_proto_v2_s2a_s2a_proto_init() }
-func file_internal_proto_v2_s2a_s2a_proto_init() {
- if File_internal_proto_v2_s2a_s2a_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_internal_proto_v2_s2a_s2a_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AlpnPolicy); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AuthenticationMechanism); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_v2_s2a_s2a_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Status); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_v2_s2a_s2a_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetTlsConfigurationReq); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetTlsConfigurationResp); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*OffloadPrivateKeyOperationReq); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_v2_s2a_s2a_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*OffloadPrivateKeyOperationResp); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_v2_s2a_s2a_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*OffloadResumptionKeyOperationReq); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_v2_s2a_s2a_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*OffloadResumptionKeyOperationResp); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidatePeerCertificateChainReq); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_v2_s2a_s2a_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidatePeerCertificateChainResp); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SessionReq); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SessionResp); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_v2_s2a_s2a_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetTlsConfigurationResp_ClientTlsConfiguration); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_v2_s2a_s2a_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetTlsConfigurationResp_ServerTlsConfiguration); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_v2_s2a_s2a_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidatePeerCertificateChainReq_ClientPeer); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_proto_v2_s2a_s2a_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidatePeerCertificateChainReq_ServerPeer); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_internal_proto_v2_s2a_s2a_proto_msgTypes[1].OneofWrappers = []interface{}{
- (*AuthenticationMechanism_Token)(nil),
- }
- file_internal_proto_v2_s2a_s2a_proto_msgTypes[4].OneofWrappers = []interface{}{
- (*GetTlsConfigurationResp_ClientTlsConfiguration_)(nil),
- (*GetTlsConfigurationResp_ServerTlsConfiguration_)(nil),
- }
- file_internal_proto_v2_s2a_s2a_proto_msgTypes[5].OneofWrappers = []interface{}{
- (*OffloadPrivateKeyOperationReq_RawBytes)(nil),
- (*OffloadPrivateKeyOperationReq_Sha256Digest)(nil),
- (*OffloadPrivateKeyOperationReq_Sha384Digest)(nil),
- (*OffloadPrivateKeyOperationReq_Sha512Digest)(nil),
- }
- file_internal_proto_v2_s2a_s2a_proto_msgTypes[9].OneofWrappers = []interface{}{
- (*ValidatePeerCertificateChainReq_ClientPeer_)(nil),
- (*ValidatePeerCertificateChainReq_ServerPeer_)(nil),
- }
- file_internal_proto_v2_s2a_s2a_proto_msgTypes[11].OneofWrappers = []interface{}{
- (*SessionReq_GetTlsConfigurationReq)(nil),
- (*SessionReq_OffloadPrivateKeyOperationReq)(nil),
- (*SessionReq_OffloadResumptionKeyOperationReq)(nil),
- (*SessionReq_ValidatePeerCertificateChainReq)(nil),
- }
- file_internal_proto_v2_s2a_s2a_proto_msgTypes[12].OneofWrappers = []interface{}{
- (*SessionResp_GetTlsConfigurationResp)(nil),
- (*SessionResp_OffloadPrivateKeyOperationResp)(nil),
- (*SessionResp_OffloadResumptionKeyOperationResp)(nil),
- (*SessionResp_ValidatePeerCertificateChainResp)(nil),
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_internal_proto_v2_s2a_s2a_proto_rawDesc,
- NumEnums: 6,
- NumMessages: 17,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_internal_proto_v2_s2a_s2a_proto_goTypes,
- DependencyIndexes: file_internal_proto_v2_s2a_s2a_proto_depIdxs,
- EnumInfos: file_internal_proto_v2_s2a_s2a_proto_enumTypes,
- MessageInfos: file_internal_proto_v2_s2a_s2a_proto_msgTypes,
- }.Build()
- File_internal_proto_v2_s2a_s2a_proto = out.File
- file_internal_proto_v2_s2a_s2a_proto_rawDesc = nil
- file_internal_proto_v2_s2a_s2a_proto_goTypes = nil
- file_internal_proto_v2_s2a_s2a_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go b/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go
deleted file mode 100644
index 2566df6c..00000000
--- a/vendor/github.com/google/s2a-go/internal/proto/v2/s2a_go_proto/s2a_grpc.pb.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2022 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
-// versions:
-// - protoc-gen-go-grpc v1.3.0
-// - protoc v3.21.12
-// source: internal/proto/v2/s2a/s2a.proto
-
-package s2a_go_proto
-
-import (
- context "context"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
-)
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.32.0 or later.
-const _ = grpc.SupportPackageIsVersion7
-
-const (
- S2AService_SetUpSession_FullMethodName = "/s2a.proto.v2.S2AService/SetUpSession"
-)
-
-// S2AServiceClient is the client API for S2AService service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
-type S2AServiceClient interface {
- // SetUpSession is a bidirectional stream used by applications to offload
- // operations from the TLS handshake.
- SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error)
-}
-
-type s2AServiceClient struct {
- cc grpc.ClientConnInterface
-}
-
-func NewS2AServiceClient(cc grpc.ClientConnInterface) S2AServiceClient {
- return &s2AServiceClient{cc}
-}
-
-func (c *s2AServiceClient) SetUpSession(ctx context.Context, opts ...grpc.CallOption) (S2AService_SetUpSessionClient, error) {
- stream, err := c.cc.NewStream(ctx, &S2AService_ServiceDesc.Streams[0], S2AService_SetUpSession_FullMethodName, opts...)
- if err != nil {
- return nil, err
- }
- x := &s2AServiceSetUpSessionClient{stream}
- return x, nil
-}
-
-type S2AService_SetUpSessionClient interface {
- Send(*SessionReq) error
- Recv() (*SessionResp, error)
- grpc.ClientStream
-}
-
-type s2AServiceSetUpSessionClient struct {
- grpc.ClientStream
-}
-
-func (x *s2AServiceSetUpSessionClient) Send(m *SessionReq) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *s2AServiceSetUpSessionClient) Recv() (*SessionResp, error) {
- m := new(SessionResp)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// S2AServiceServer is the server API for S2AService service.
-// All implementations must embed UnimplementedS2AServiceServer
-// for forward compatibility
-type S2AServiceServer interface {
- // SetUpSession is a bidirectional stream used by applications to offload
- // operations from the TLS handshake.
- SetUpSession(S2AService_SetUpSessionServer) error
- mustEmbedUnimplementedS2AServiceServer()
-}
-
-// UnimplementedS2AServiceServer must be embedded to have forward compatible implementations.
-type UnimplementedS2AServiceServer struct {
-}
-
-func (UnimplementedS2AServiceServer) SetUpSession(S2AService_SetUpSessionServer) error {
- return status.Errorf(codes.Unimplemented, "method SetUpSession not implemented")
-}
-func (UnimplementedS2AServiceServer) mustEmbedUnimplementedS2AServiceServer() {}
-
-// UnsafeS2AServiceServer may be embedded to opt out of forward compatibility for this service.
-// Use of this interface is not recommended, as added methods to S2AServiceServer will
-// result in compilation errors.
-type UnsafeS2AServiceServer interface {
- mustEmbedUnimplementedS2AServiceServer()
-}
-
-func RegisterS2AServiceServer(s grpc.ServiceRegistrar, srv S2AServiceServer) {
- s.RegisterService(&S2AService_ServiceDesc, srv)
-}
-
-func _S2AService_SetUpSession_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(S2AServiceServer).SetUpSession(&s2AServiceSetUpSessionServer{stream})
-}
-
-type S2AService_SetUpSessionServer interface {
- Send(*SessionResp) error
- Recv() (*SessionReq, error)
- grpc.ServerStream
-}
-
-type s2AServiceSetUpSessionServer struct {
- grpc.ServerStream
-}
-
-func (x *s2AServiceSetUpSessionServer) Send(m *SessionResp) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *s2AServiceSetUpSessionServer) Recv() (*SessionReq, error) {
- m := new(SessionReq)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// S2AService_ServiceDesc is the grpc.ServiceDesc for S2AService service.
-// It's only intended for direct use with grpc.RegisterService,
-// and not to be introspected or modified (even as a copy)
-var S2AService_ServiceDesc = grpc.ServiceDesc{
- ServiceName: "s2a.proto.v2.S2AService",
- HandlerType: (*S2AServiceServer)(nil),
- Methods: []grpc.MethodDesc{},
- Streams: []grpc.StreamDesc{
- {
- StreamName: "SetUpSession",
- Handler: _S2AService_SetUpSession_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- },
- Metadata: "internal/proto/v2/s2a/s2a.proto",
-}
diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go
deleted file mode 100644
index 486f4ec4..00000000
--- a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aeadcrypter.go
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- *
- * Copyright 2021 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package aeadcrypter provides the interface for AEAD cipher implementations
-// used by S2A's record protocol.
-package aeadcrypter
-
-// S2AAEADCrypter is the interface for an AEAD cipher used by the S2A record
-// protocol.
-type S2AAEADCrypter interface {
- // Encrypt encrypts the plaintext and computes the tag of dst and plaintext.
- // dst and plaintext may fully overlap or not at all.
- Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error)
- // Decrypt decrypts ciphertext and verifies the tag. dst and ciphertext may
- // fully overlap or not at all.
- Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error)
- // TagSize returns the tag size in bytes.
- TagSize() int
-}
diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go
deleted file mode 100644
index 85c4e595..00000000
--- a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/aesgcm.go
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- *
- * Copyright 2021 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package aeadcrypter
-
-import (
- "crypto/aes"
- "crypto/cipher"
- "fmt"
-)
-
-// Supported key sizes in bytes.
-const (
- AES128GCMKeySize = 16
- AES256GCMKeySize = 32
-)
-
-// aesgcm is the struct that holds an AES-GCM cipher for the S2A AEAD crypter.
-type aesgcm struct {
- aead cipher.AEAD
-}
-
-// NewAESGCM creates an AES-GCM crypter instance. Note that the key must be
-// either 128 bits or 256 bits.
-func NewAESGCM(key []byte) (S2AAEADCrypter, error) {
- if len(key) != AES128GCMKeySize && len(key) != AES256GCMKeySize {
- return nil, fmt.Errorf("%d or %d bytes, given: %d", AES128GCMKeySize, AES256GCMKeySize, len(key))
- }
- c, err := aes.NewCipher(key)
- if err != nil {
- return nil, err
- }
- a, err := cipher.NewGCM(c)
- if err != nil {
- return nil, err
- }
- return &aesgcm{aead: a}, nil
-}
-
-// Encrypt is the encryption function. dst can contain bytes at the beginning of
-// the ciphertext that will not be encrypted but will be authenticated. If dst
-// has enough capacity to hold these bytes, the ciphertext and the tag, no
-// allocation and copy operations will be performed. dst and plaintext may
-// fully overlap or not at all.
-func (s *aesgcm) Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) {
- return encrypt(s.aead, dst, plaintext, nonce, aad)
-}
-
-func (s *aesgcm) Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) {
- return decrypt(s.aead, dst, ciphertext, nonce, aad)
-}
-
-func (s *aesgcm) TagSize() int {
- return TagSize
-}
diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go
deleted file mode 100644
index 214df4ca..00000000
--- a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/chachapoly.go
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- *
- * Copyright 2021 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package aeadcrypter
-
-import (
- "crypto/cipher"
- "fmt"
-
- "golang.org/x/crypto/chacha20poly1305"
-)
-
-// Supported key size in bytes.
-const (
- Chacha20Poly1305KeySize = 32
-)
-
-// chachapoly is the struct that holds a CHACHA-POLY cipher for the S2A AEAD
-// crypter.
-type chachapoly struct {
- aead cipher.AEAD
-}
-
-// NewChachaPoly creates a Chacha-Poly crypter instance. Note that the key must
-// be Chacha20Poly1305KeySize bytes in length.
-func NewChachaPoly(key []byte) (S2AAEADCrypter, error) {
- if len(key) != Chacha20Poly1305KeySize {
- return nil, fmt.Errorf("%d bytes, given: %d", Chacha20Poly1305KeySize, len(key))
- }
- c, err := chacha20poly1305.New(key)
- if err != nil {
- return nil, err
- }
- return &chachapoly{aead: c}, nil
-}
-
-// Encrypt is the encryption function. dst can contain bytes at the beginning of
-// the ciphertext that will not be encrypted but will be authenticated. If dst
-// has enough capacity to hold these bytes, the ciphertext and the tag, no
-// allocation and copy operations will be performed. dst and plaintext may
-// fully overlap or not at all.
-func (s *chachapoly) Encrypt(dst, plaintext, nonce, aad []byte) ([]byte, error) {
- return encrypt(s.aead, dst, plaintext, nonce, aad)
-}
-
-func (s *chachapoly) Decrypt(dst, ciphertext, nonce, aad []byte) ([]byte, error) {
- return decrypt(s.aead, dst, ciphertext, nonce, aad)
-}
-
-func (s *chachapoly) TagSize() int {
- return TagSize
-}
diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go b/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go
deleted file mode 100644
index b3c36ad9..00000000
--- a/vendor/github.com/google/s2a-go/internal/record/internal/aeadcrypter/common.go
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- *
- * Copyright 2021 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package aeadcrypter
-
-import (
- "crypto/cipher"
- "fmt"
-)
-
-const (
- // TagSize is the tag size in bytes for AES-128-GCM-SHA256,
- // AES-256-GCM-SHA384, and CHACHA20-POLY1305-SHA256.
- TagSize = 16
- // NonceSize is the size of the nonce in number of bytes for
- // AES-128-GCM-SHA256, AES-256-GCM-SHA384, and CHACHA20-POLY1305-SHA256.
- NonceSize = 12
- // SHA256DigestSize is the digest size of sha256 in bytes.
- SHA256DigestSize = 32
- // SHA384DigestSize is the digest size of sha384 in bytes.
- SHA384DigestSize = 48
-)
-
-// sliceForAppend takes a slice and a requested number of bytes. It returns a
-// slice with the contents of the given slice followed by that many bytes and a
-// second slice that aliases into it and contains only the extra bytes. If the
-// original slice has sufficient capacity then no allocation is performed.
-func sliceForAppend(in []byte, n int) (head, tail []byte) {
- if total := len(in) + n; cap(in) >= total {
- head = in[:total]
- } else {
- head = make([]byte, total)
- copy(head, in)
- }
- tail = head[len(in):]
- return head, tail
-}
-
-// encrypt is the encryption function for an AEAD crypter. aead determines
-// the type of AEAD crypter. dst can contain bytes at the beginning of the
-// ciphertext that will not be encrypted but will be authenticated. If dst has
-// enough capacity to hold these bytes, the ciphertext and the tag, no
-// allocation and copy operations will be performed. dst and plaintext may
-// fully overlap or not at all.
-func encrypt(aead cipher.AEAD, dst, plaintext, nonce, aad []byte) ([]byte, error) {
- if len(nonce) != NonceSize {
- return nil, fmt.Errorf("nonce size must be %d bytes. received: %d", NonceSize, len(nonce))
- }
- // If we need to allocate an output buffer, we want to include space for
- // the tag to avoid forcing the caller to reallocate as well.
- dlen := len(dst)
- dst, out := sliceForAppend(dst, len(plaintext)+TagSize)
- data := out[:len(plaintext)]
- copy(data, plaintext) // data may fully overlap plaintext
-
- // Seal appends the ciphertext and the tag to its first argument and
- // returns the updated slice. However, sliceForAppend above ensures that
- // dst has enough capacity to avoid a reallocation and copy due to the
- // append.
- dst = aead.Seal(dst[:dlen], nonce, data, aad)
- return dst, nil
-}
-
-// decrypt is the decryption function for an AEAD crypter, where aead determines
-// the type of AEAD crypter, and dst the destination bytes for the decrypted
-// ciphertext. The dst buffer may fully overlap with plaintext or not at all.
-func decrypt(aead cipher.AEAD, dst, ciphertext, nonce, aad []byte) ([]byte, error) {
- if len(nonce) != NonceSize {
- return nil, fmt.Errorf("nonce size must be %d bytes. received: %d", NonceSize, len(nonce))
- }
- // If dst is equal to ciphertext[:0], ciphertext storage is reused.
- plaintext, err := aead.Open(dst, nonce, ciphertext, aad)
- if err != nil {
- return nil, fmt.Errorf("message auth failed: %v", err)
- }
- return plaintext, nil
-}
diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go
deleted file mode 100644
index ddeaa6d7..00000000
--- a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/ciphersuite.go
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- *
- * Copyright 2021 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package halfconn
-
-import (
- "crypto/sha256"
- "crypto/sha512"
- "fmt"
- "hash"
-
- s2apb "github.com/google/s2a-go/internal/proto/common_go_proto"
- "github.com/google/s2a-go/internal/record/internal/aeadcrypter"
-)
-
-// ciphersuite is the interface for retrieving ciphersuite-specific information
-// and utilities.
-type ciphersuite interface {
- // keySize returns the key size in bytes. This refers to the key used by
- // the AEAD crypter. This is derived by calling HKDF expand on the traffic
- // secret.
- keySize() int
- // nonceSize returns the nonce size in bytes.
- nonceSize() int
- // trafficSecretSize returns the traffic secret size in bytes. This refers
- // to the secret used to derive the traffic key and nonce, as specified in
- // https://tools.ietf.org/html/rfc8446#section-7.
- trafficSecretSize() int
- // hashFunction returns the hash function for the ciphersuite.
- hashFunction() func() hash.Hash
- // aeadCrypter takes a key and creates an AEAD crypter for the ciphersuite
- // using that key.
- aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error)
-}
-
-func newCiphersuite(ciphersuite s2apb.Ciphersuite) (ciphersuite, error) {
- switch ciphersuite {
- case s2apb.Ciphersuite_AES_128_GCM_SHA256:
- return &aesgcm128sha256{}, nil
- case s2apb.Ciphersuite_AES_256_GCM_SHA384:
- return &aesgcm256sha384{}, nil
- case s2apb.Ciphersuite_CHACHA20_POLY1305_SHA256:
- return &chachapolysha256{}, nil
- default:
- return nil, fmt.Errorf("unrecognized ciphersuite: %v", ciphersuite)
- }
-}
-
-// aesgcm128sha256 is the AES-128-GCM-SHA256 implementation of the ciphersuite
-// interface.
-type aesgcm128sha256 struct{}
-
-func (aesgcm128sha256) keySize() int { return aeadcrypter.AES128GCMKeySize }
-func (aesgcm128sha256) nonceSize() int { return aeadcrypter.NonceSize }
-func (aesgcm128sha256) trafficSecretSize() int { return aeadcrypter.SHA256DigestSize }
-func (aesgcm128sha256) hashFunction() func() hash.Hash { return sha256.New }
-func (aesgcm128sha256) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) {
- return aeadcrypter.NewAESGCM(key)
-}
-
-// aesgcm256sha384 is the AES-256-GCM-SHA384 implementation of the ciphersuite
-// interface.
-type aesgcm256sha384 struct{}
-
-func (aesgcm256sha384) keySize() int { return aeadcrypter.AES256GCMKeySize }
-func (aesgcm256sha384) nonceSize() int { return aeadcrypter.NonceSize }
-func (aesgcm256sha384) trafficSecretSize() int { return aeadcrypter.SHA384DigestSize }
-func (aesgcm256sha384) hashFunction() func() hash.Hash { return sha512.New384 }
-func (aesgcm256sha384) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) {
- return aeadcrypter.NewAESGCM(key)
-}
-
-// chachapolysha256 is the ChaChaPoly-SHA256 implementation of the ciphersuite
-// interface.
-type chachapolysha256 struct{}
-
-func (chachapolysha256) keySize() int { return aeadcrypter.Chacha20Poly1305KeySize }
-func (chachapolysha256) nonceSize() int { return aeadcrypter.NonceSize }
-func (chachapolysha256) trafficSecretSize() int { return aeadcrypter.SHA256DigestSize }
-func (chachapolysha256) hashFunction() func() hash.Hash { return sha256.New }
-func (chachapolysha256) aeadCrypter(key []byte) (aeadcrypter.S2AAEADCrypter, error) {
- return aeadcrypter.NewChachaPoly(key)
-}
diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go
deleted file mode 100644
index 9499cdca..00000000
--- a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/counter.go
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- *
- * Copyright 2021 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package halfconn
-
-import "errors"
-
-// counter is a 64-bit counter.
-type counter struct {
- val uint64
- hasOverflowed bool
-}
-
-// newCounter creates a new counter with the initial value set to val.
-func newCounter(val uint64) counter {
- return counter{val: val}
-}
-
-// value returns the current value of the counter.
-func (c *counter) value() (uint64, error) {
- if c.hasOverflowed {
- return 0, errors.New("counter has overflowed")
- }
- return c.val, nil
-}
-
-// increment increments the counter and checks for overflow.
-func (c *counter) increment() {
- // If the counter is already invalid due to overflow, there is no need to
- // increase it. We check for the hasOverflowed flag in the call to value().
- if c.hasOverflowed {
- return
- }
- c.val++
- if c.val == 0 {
- c.hasOverflowed = true
- }
-}
-
-// reset sets the counter value to zero and sets the hasOverflowed flag to
-// false.
-func (c *counter) reset() {
- c.val = 0
- c.hasOverflowed = false
-}
diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go
deleted file mode 100644
index e05f2c36..00000000
--- a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/expander.go
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- *
- * Copyright 2021 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package halfconn
-
-import (
- "fmt"
- "hash"
-
- "golang.org/x/crypto/hkdf"
-)
-
-// hkdfExpander is the interface for the HKDF expansion function; see
-// https://tools.ietf.org/html/rfc5869 for details. its use in TLS 1.3 is
-// specified in https://tools.ietf.org/html/rfc8446#section-7.2
-type hkdfExpander interface {
- // expand takes a secret, a label, and the output length in bytes, and
- // returns the resulting expanded key.
- expand(secret, label []byte, length int) ([]byte, error)
-}
-
-// defaultHKDFExpander is the default HKDF expander which uses Go's crypto/hkdf
-// for HKDF expansion.
-type defaultHKDFExpander struct {
- h func() hash.Hash
-}
-
-// newDefaultHKDFExpander creates an instance of the default HKDF expander
-// using the given hash function.
-func newDefaultHKDFExpander(h func() hash.Hash) hkdfExpander {
- return &defaultHKDFExpander{h: h}
-}
-
-func (d *defaultHKDFExpander) expand(secret, label []byte, length int) ([]byte, error) {
- outBuf := make([]byte, length)
- n, err := hkdf.Expand(d.h, secret, label).Read(outBuf)
- if err != nil {
- return nil, fmt.Errorf("hkdf.Expand.Read failed with error: %v", err)
- }
- if n < length {
- return nil, fmt.Errorf("hkdf.Expand.Read returned unexpected length, got %d, want %d", n, length)
- }
- return outBuf, nil
-}
diff --git a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go b/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go
deleted file mode 100644
index dff99ff5..00000000
--- a/vendor/github.com/google/s2a-go/internal/record/internal/halfconn/halfconn.go
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
- *
- * Copyright 2021 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package halfconn manages the inbound or outbound traffic of a TLS 1.3
-// connection.
-package halfconn
-
-import (
- "fmt"
- "sync"
-
- s2apb "github.com/google/s2a-go/internal/proto/common_go_proto"
- "github.com/google/s2a-go/internal/record/internal/aeadcrypter"
- "golang.org/x/crypto/cryptobyte"
-)
-
-// The constants below were taken from Section 7.2 and 7.3 in
-// https://tools.ietf.org/html/rfc8446#section-7. They are used as the label
-// in HKDF-Expand-Label.
-const (
- tls13Key = "tls13 key"
- tls13Nonce = "tls13 iv"
- tls13Update = "tls13 traffic upd"
-)
-
-// S2AHalfConnection stores the state of the TLS 1.3 connection in the
-// inbound or outbound direction.
-type S2AHalfConnection struct {
- cs ciphersuite
- expander hkdfExpander
- // mutex guards sequence, aeadCrypter, trafficSecret, and nonce.
- mutex sync.Mutex
- aeadCrypter aeadcrypter.S2AAEADCrypter
- sequence counter
- trafficSecret []byte
- nonce []byte
-}
-
-// New creates a new instance of S2AHalfConnection given a ciphersuite and a
-// traffic secret.
-func New(ciphersuite s2apb.Ciphersuite, trafficSecret []byte, sequence uint64) (*S2AHalfConnection, error) {
- cs, err := newCiphersuite(ciphersuite)
- if err != nil {
- return nil, fmt.Errorf("failed to create new ciphersuite: %v", ciphersuite)
- }
- if cs.trafficSecretSize() != len(trafficSecret) {
- return nil, fmt.Errorf("supplied traffic secret must be %v bytes, given: %v bytes", cs.trafficSecretSize(), len(trafficSecret))
- }
-
- hc := &S2AHalfConnection{cs: cs, expander: newDefaultHKDFExpander(cs.hashFunction()), sequence: newCounter(sequence), trafficSecret: trafficSecret}
- if err = hc.updateCrypterAndNonce(hc.trafficSecret); err != nil {
- return nil, fmt.Errorf("failed to create half connection using traffic secret: %v", err)
- }
-
- return hc, nil
-}
-
-// Encrypt encrypts the plaintext and computes the tag of dst and plaintext.
-// dst and plaintext may fully overlap or not at all. Note that the sequence
-// number will still be incremented on failure, unless the sequence has
-// overflowed.
-func (hc *S2AHalfConnection) Encrypt(dst, plaintext, aad []byte) ([]byte, error) {
- hc.mutex.Lock()
- sequence, err := hc.getAndIncrementSequence()
- if err != nil {
- hc.mutex.Unlock()
- return nil, err
- }
- nonce := hc.maskedNonce(sequence)
- crypter := hc.aeadCrypter
- hc.mutex.Unlock()
- return crypter.Encrypt(dst, plaintext, nonce, aad)
-}
-
-// Decrypt decrypts ciphertext and verifies the tag. dst and ciphertext may
-// fully overlap or not at all. Note that the sequence number will still be
-// incremented on failure, unless the sequence has overflowed.
-func (hc *S2AHalfConnection) Decrypt(dst, ciphertext, aad []byte) ([]byte, error) {
- hc.mutex.Lock()
- sequence, err := hc.getAndIncrementSequence()
- if err != nil {
- hc.mutex.Unlock()
- return nil, err
- }
- nonce := hc.maskedNonce(sequence)
- crypter := hc.aeadCrypter
- hc.mutex.Unlock()
- return crypter.Decrypt(dst, ciphertext, nonce, aad)
-}
-
-// UpdateKey advances the traffic secret key, as specified in
-// https://tools.ietf.org/html/rfc8446#section-7.2. In addition, it derives
-// a new key and nonce, and resets the sequence number.
-func (hc *S2AHalfConnection) UpdateKey() error {
- hc.mutex.Lock()
- defer hc.mutex.Unlock()
-
- var err error
- hc.trafficSecret, err = hc.deriveSecret(hc.trafficSecret, []byte(tls13Update), hc.cs.trafficSecretSize())
- if err != nil {
- return fmt.Errorf("failed to derive traffic secret: %v", err)
- }
-
- if err = hc.updateCrypterAndNonce(hc.trafficSecret); err != nil {
- return fmt.Errorf("failed to update half connection: %v", err)
- }
-
- hc.sequence.reset()
- return nil
-}
-
-// TagSize returns the tag size in bytes of the underlying AEAD crypter.
-func (hc *S2AHalfConnection) TagSize() int {
- return hc.aeadCrypter.TagSize()
-}
-
-// updateCrypterAndNonce takes a new traffic secret and updates the crypter
-// and nonce. Note that the mutex must be held while calling this function.
-func (hc *S2AHalfConnection) updateCrypterAndNonce(newTrafficSecret []byte) error {
- key, err := hc.deriveSecret(newTrafficSecret, []byte(tls13Key), hc.cs.keySize())
- if err != nil {
- return fmt.Errorf("failed to update key: %v", err)
- }
-
- hc.nonce, err = hc.deriveSecret(newTrafficSecret, []byte(tls13Nonce), hc.cs.nonceSize())
- if err != nil {
- return fmt.Errorf("failed to update nonce: %v", err)
- }
-
- hc.aeadCrypter, err = hc.cs.aeadCrypter(key)
- if err != nil {
- return fmt.Errorf("failed to update AEAD crypter: %v", err)
- }
- return nil
-}
-
-// getAndIncrement returns the current sequence number and increments it. Note
-// that the mutex must be held while calling this function.
-func (hc *S2AHalfConnection) getAndIncrementSequence() (uint64, error) {
- sequence, err := hc.sequence.value()
- if err != nil {
- return 0, err
- }
- hc.sequence.increment()
- return sequence, nil
-}
-
-// maskedNonce creates a copy of the nonce that is masked with the sequence
-// number. Note that the mutex must be held while calling this function.
-func (hc *S2AHalfConnection) maskedNonce(sequence uint64) []byte {
- const uint64Size = 8
- nonce := make([]byte, len(hc.nonce))
- copy(nonce, hc.nonce)
- for i := 0; i < uint64Size; i++ {
- nonce[aeadcrypter.NonceSize-uint64Size+i] ^= byte(sequence >> uint64(56-uint64Size*i))
- }
- return nonce
-}
-
-// deriveSecret implements the Derive-Secret function, as specified in
-// https://tools.ietf.org/html/rfc8446#section-7.1.
-func (hc *S2AHalfConnection) deriveSecret(secret, label []byte, length int) ([]byte, error) {
- var hkdfLabel cryptobyte.Builder
- hkdfLabel.AddUint16(uint16(length))
- hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes(label)
- })
- // Append an empty `Context` field to the label, as specified in the RFC.
- // The half connection does not use the `Context` field.
- hkdfLabel.AddUint8LengthPrefixed(func(b *cryptobyte.Builder) {
- b.AddBytes([]byte(""))
- })
- hkdfLabelBytes, err := hkdfLabel.Bytes()
- if err != nil {
- return nil, fmt.Errorf("deriveSecret failed: %v", err)
- }
- return hc.expander.expand(secret, hkdfLabelBytes, length)
-}
diff --git a/vendor/github.com/google/s2a-go/internal/record/record.go b/vendor/github.com/google/s2a-go/internal/record/record.go
deleted file mode 100644
index c6051551..00000000
--- a/vendor/github.com/google/s2a-go/internal/record/record.go
+++ /dev/null
@@ -1,757 +0,0 @@
-/*
- *
- * Copyright 2021 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package record implements the TLS 1.3 record protocol used by the S2A
-// transport credentials.
-package record
-
-import (
- "encoding/binary"
- "errors"
- "fmt"
- "math"
- "net"
- "sync"
-
- commonpb "github.com/google/s2a-go/internal/proto/common_go_proto"
- "github.com/google/s2a-go/internal/record/internal/halfconn"
- "github.com/google/s2a-go/internal/tokenmanager"
- "google.golang.org/grpc/grpclog"
-)
-
-// recordType is the `ContentType` as described in
-// https://tools.ietf.org/html/rfc8446#section-5.1.
-type recordType byte
-
-const (
- alert recordType = 21
- handshake recordType = 22
- applicationData recordType = 23
-)
-
-// keyUpdateRequest is the `KeyUpdateRequest` as described in
-// https://tools.ietf.org/html/rfc8446#section-4.6.3.
-type keyUpdateRequest byte
-
-const (
- updateNotRequested keyUpdateRequest = 0
- updateRequested keyUpdateRequest = 1
-)
-
-// alertDescription is the `AlertDescription` as described in
-// https://tools.ietf.org/html/rfc8446#section-6.
-type alertDescription byte
-
-const (
- closeNotify alertDescription = 0
-)
-
-// sessionTicketState is used to determine whether session tickets have not yet
-// been received, are in the process of being received, or have finished
-// receiving.
-type sessionTicketState byte
-
-const (
- ticketsNotYetReceived sessionTicketState = 0
- receivingTickets sessionTicketState = 1
- notReceivingTickets sessionTicketState = 2
-)
-
-const (
- // The TLS 1.3-specific constants below (tlsRecordMaxPlaintextSize,
- // tlsRecordHeaderSize, tlsRecordTypeSize) were taken from
- // https://tools.ietf.org/html/rfc8446#section-5.1.
-
- // tlsRecordMaxPlaintextSize is the maximum size in bytes of the plaintext
- // in a single TLS 1.3 record.
- tlsRecordMaxPlaintextSize = 16384 // 2^14
- // tlsRecordTypeSize is the size in bytes of the TLS 1.3 record type.
- tlsRecordTypeSize = 1
- // tlsTagSize is the size in bytes of the tag of the following three
- // ciphersuites: AES-128-GCM-SHA256, AES-256-GCM-SHA384,
- // CHACHA20-POLY1305-SHA256.
- tlsTagSize = 16
- // tlsRecordMaxPayloadSize is the maximum size in bytes of the payload in a
- // single TLS 1.3 record. This is the maximum size of the plaintext plus the
- // record type byte and 16 bytes of the tag.
- tlsRecordMaxPayloadSize = tlsRecordMaxPlaintextSize + tlsRecordTypeSize + tlsTagSize
- // tlsRecordHeaderTypeSize is the size in bytes of the TLS 1.3 record
- // header type.
- tlsRecordHeaderTypeSize = 1
- // tlsRecordHeaderLegacyRecordVersionSize is the size in bytes of the TLS
- // 1.3 record header legacy record version.
- tlsRecordHeaderLegacyRecordVersionSize = 2
- // tlsRecordHeaderPayloadLengthSize is the size in bytes of the TLS 1.3
- // record header payload length.
- tlsRecordHeaderPayloadLengthSize = 2
- // tlsRecordHeaderSize is the size in bytes of the TLS 1.3 record header.
- tlsRecordHeaderSize = tlsRecordHeaderTypeSize + tlsRecordHeaderLegacyRecordVersionSize + tlsRecordHeaderPayloadLengthSize
- // tlsRecordMaxSize
- tlsRecordMaxSize = tlsRecordMaxPayloadSize + tlsRecordHeaderSize
- // tlsApplicationData is the application data type of the TLS 1.3 record
- // header.
- tlsApplicationData = 23
- // tlsLegacyRecordVersion is the legacy record version of the TLS record.
- tlsLegacyRecordVersion = 3
- // tlsAlertSize is the size in bytes of an alert of TLS 1.3.
- tlsAlertSize = 2
-)
-
-const (
- // These are TLS 1.3 handshake-specific constants.
-
- // tlsHandshakeNewSessionTicketType is the prefix of a handshake new session
- // ticket message of TLS 1.3.
- tlsHandshakeNewSessionTicketType = 4
- // tlsHandshakeKeyUpdateType is the prefix of a handshake key update message
- // of TLS 1.3.
- tlsHandshakeKeyUpdateType = 24
- // tlsHandshakeMsgTypeSize is the size in bytes of the TLS 1.3 handshake
- // message type field.
- tlsHandshakeMsgTypeSize = 1
- // tlsHandshakeLengthSize is the size in bytes of the TLS 1.3 handshake
- // message length field.
- tlsHandshakeLengthSize = 3
- // tlsHandshakeKeyUpdateMsgSize is the size in bytes of the TLS 1.3
- // handshake key update message.
- tlsHandshakeKeyUpdateMsgSize = 1
- // tlsHandshakePrefixSize is the size in bytes of the prefix of the TLS 1.3
- // handshake message.
- tlsHandshakePrefixSize = 4
- // tlsMaxSessionTicketSize is the maximum size of a NewSessionTicket message
- // in TLS 1.3. This is the sum of the max sizes of all the fields in the
- // NewSessionTicket struct specified in
- // https://tools.ietf.org/html/rfc8446#section-4.6.1.
- tlsMaxSessionTicketSize = 131338
-)
-
-const (
- // outBufMaxRecords is the maximum number of records that can fit in the
- // ourRecordsBuf buffer.
- outBufMaxRecords = 16
- // outBufMaxSize is the maximum size (in bytes) of the outRecordsBuf buffer.
- outBufMaxSize = outBufMaxRecords * tlsRecordMaxSize
- // maxAllowedTickets is the maximum number of session tickets that are
- // allowed. The number of tickets are limited to ensure that the size of the
- // ticket queue does not grow indefinitely. S2A also keeps a limit on the
- // number of tickets that it caches.
- maxAllowedTickets = 5
-)
-
-// preConstructedKeyUpdateMsg holds the key update message. This is needed as an
-// optimization so that the same message does not need to be constructed every
-// time a key update message is sent.
-var preConstructedKeyUpdateMsg = buildKeyUpdateRequest()
-
-// conn represents a secured TLS connection. It implements the net.Conn
-// interface.
-type conn struct {
- net.Conn
- // inConn is the half connection responsible for decrypting incoming bytes.
- inConn *halfconn.S2AHalfConnection
- // outConn is the half connection responsible for encrypting outgoing bytes.
- outConn *halfconn.S2AHalfConnection
- // pendingApplicationData holds data that has been read from the connection
- // and decrypted, but has not yet been returned by Read.
- pendingApplicationData []byte
- // unusedBuf holds data read from the network that has not yet been
- // decrypted. This data might not consist of a complete record. It may
- // consist of several records, the last of which could be incomplete.
- unusedBuf []byte
- // outRecordsBuf is a buffer used to store outgoing TLS records before
- // they are written to the network.
- outRecordsBuf []byte
- // nextRecord stores the next record info in the unusedBuf buffer.
- nextRecord []byte
- // overheadSize is the overhead size in bytes of each TLS 1.3 record, which
- // is computed as overheadSize = header size + record type byte + tag size.
- // Note that there is no padding by zeros in the overhead calculation.
- overheadSize int
- // readMutex guards against concurrent calls to Read. This is required since
- // Close may be called during a Read.
- readMutex sync.Mutex
- // writeMutex guards against concurrent calls to Write. This is required
- // since Close may be called during a Write, and also because a key update
- // message may be written during a Read.
- writeMutex sync.Mutex
- // handshakeBuf holds handshake messages while they are being processed.
- handshakeBuf []byte
- // ticketState is the current processing state of the session tickets.
- ticketState sessionTicketState
- // sessionTickets holds the completed session tickets until they are sent to
- // the handshaker service for processing.
- sessionTickets [][]byte
- // ticketSender sends session tickets to the S2A handshaker service.
- ticketSender s2aTicketSender
- // callComplete is a channel that blocks closing the record protocol until a
- // pending call to the S2A completes.
- callComplete chan bool
-}
-
-// ConnParameters holds the parameters used for creating a new conn object.
-type ConnParameters struct {
- // NetConn is the TCP connection to the peer. This parameter is required.
- NetConn net.Conn
- // Ciphersuite is the TLS ciphersuite negotiated by the S2A handshaker
- // service. This parameter is required.
- Ciphersuite commonpb.Ciphersuite
- // TLSVersion is the TLS version number negotiated by the S2A handshaker
- // service. This parameter is required.
- TLSVersion commonpb.TLSVersion
- // InTrafficSecret is the traffic secret used to derive the session key for
- // the inbound direction. This parameter is required.
- InTrafficSecret []byte
- // OutTrafficSecret is the traffic secret used to derive the session key
- // for the outbound direction. This parameter is required.
- OutTrafficSecret []byte
- // UnusedBuf is the data read from the network that has not yet been
- // decrypted. This parameter is optional. If not provided, then no
- // application data was sent in the same flight of messages as the final
- // handshake message.
- UnusedBuf []byte
- // InSequence is the sequence number of the next, incoming, TLS record.
- // This parameter is required.
- InSequence uint64
- // OutSequence is the sequence number of the next, outgoing, TLS record.
- // This parameter is required.
- OutSequence uint64
- // HSAddr stores the address of the S2A handshaker service. This parameter
- // is optional. If not provided, then TLS resumption is disabled.
- HSAddr string
- // ConnectionId is the connection identifier that was created and sent by
- // S2A at the end of a handshake.
- ConnectionID uint64
- // LocalIdentity is the local identity that was used by S2A during session
- // setup and included in the session result.
- LocalIdentity *commonpb.Identity
- // EnsureProcessSessionTickets allows users to wait and ensure that all
- // available session tickets are sent to S2A before a process completes.
- EnsureProcessSessionTickets *sync.WaitGroup
-}
-
-// NewConn creates a TLS record protocol that wraps the TCP connection.
-func NewConn(o *ConnParameters) (net.Conn, error) {
- if o == nil {
- return nil, errors.New("conn options must not be nil")
- }
- if o.TLSVersion != commonpb.TLSVersion_TLS1_3 {
- return nil, errors.New("TLS version must be TLS 1.3")
- }
-
- inConn, err := halfconn.New(o.Ciphersuite, o.InTrafficSecret, o.InSequence)
- if err != nil {
- return nil, fmt.Errorf("failed to create inbound half connection: %v", err)
- }
- outConn, err := halfconn.New(o.Ciphersuite, o.OutTrafficSecret, o.OutSequence)
- if err != nil {
- return nil, fmt.Errorf("failed to create outbound half connection: %v", err)
- }
-
- // The tag size for the in/out connections should be the same.
- overheadSize := tlsRecordHeaderSize + tlsRecordTypeSize + inConn.TagSize()
- var unusedBuf []byte
- if o.UnusedBuf == nil {
- // We pre-allocate unusedBuf to be of size
- // 2*tlsRecordMaxSize-1 during initialization. We only read from the
- // network into unusedBuf when unusedBuf does not contain a complete
- // record and the incomplete record is at most tlsRecordMaxSize-1
- // (bytes). And we read at most tlsRecordMaxSize bytes of data from the
- // network into unusedBuf at one time. Therefore, 2*tlsRecordMaxSize-1
- // is large enough to buffer data read from the network.
- unusedBuf = make([]byte, 0, 2*tlsRecordMaxSize-1)
- } else {
- unusedBuf = make([]byte, len(o.UnusedBuf))
- copy(unusedBuf, o.UnusedBuf)
- }
-
- tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager()
- if err != nil {
- grpclog.Infof("failed to create single token access token manager: %v", err)
- }
-
- s2aConn := &conn{
- Conn: o.NetConn,
- inConn: inConn,
- outConn: outConn,
- unusedBuf: unusedBuf,
- outRecordsBuf: make([]byte, tlsRecordMaxSize),
- nextRecord: unusedBuf,
- overheadSize: overheadSize,
- ticketState: ticketsNotYetReceived,
- // Pre-allocate the buffer for one session ticket message and the max
- // plaintext size. This is the largest size that handshakeBuf will need
- // to hold. The largest incomplete handshake message is the
- // [handshake header size] + [max session ticket size] - 1.
- // Then, tlsRecordMaxPlaintextSize is the maximum size that will be
- // appended to the handshakeBuf before the handshake message is
- // completed. Therefore, the buffer size below should be large enough to
- // buffer any handshake messages.
- handshakeBuf: make([]byte, 0, tlsHandshakePrefixSize+tlsMaxSessionTicketSize+tlsRecordMaxPlaintextSize-1),
- ticketSender: &ticketSender{
- hsAddr: o.HSAddr,
- connectionID: o.ConnectionID,
- localIdentity: o.LocalIdentity,
- tokenManager: tokenManager,
- ensureProcessSessionTickets: o.EnsureProcessSessionTickets,
- },
- callComplete: make(chan bool),
- }
- return s2aConn, nil
-}
-
-// Read reads and decrypts a TLS 1.3 record from the underlying connection, and
-// copies any application data received from the peer into b. If the size of the
-// payload is greater than len(b), Read retains the remaining bytes in an
-// internal buffer, and subsequent calls to Read will read from this buffer
-// until it is exhausted. At most 1 TLS record worth of application data is
-// written to b for each call to Read.
-//
-// Note that for the user to efficiently call this method, the user should
-// ensure that the buffer b is allocated such that the buffer does not have any
-// unused segments. This can be done by calling Read via io.ReadFull, which
-// continually calls Read until the specified buffer has been filled. Also note
-// that the user should close the connection via Close() if an error is thrown
-// by a call to Read.
-func (p *conn) Read(b []byte) (n int, err error) {
- p.readMutex.Lock()
- defer p.readMutex.Unlock()
- // Check if p.pendingApplication data has leftover application data from
- // the previous call to Read.
- if len(p.pendingApplicationData) == 0 {
- // Read a full record from the wire.
- record, err := p.readFullRecord()
- if err != nil {
- return 0, err
- }
- // Now we have a complete record, so split the header and validate it
- // The TLS record is split into 2 pieces: the record header and the
- // payload. The payload has the following form:
- // [payload] = [ciphertext of application data]
- // + [ciphertext of record type byte]
- // + [(optionally) ciphertext of padding by zeros]
- // + [tag]
- header, payload, err := splitAndValidateHeader(record)
- if err != nil {
- return 0, err
- }
- // Decrypt the ciphertext.
- p.pendingApplicationData, err = p.inConn.Decrypt(payload[:0], payload, header)
- if err != nil {
- return 0, err
- }
- // Remove the padding by zeros and the record type byte from the
- // p.pendingApplicationData buffer.
- msgType, err := p.stripPaddingAndType()
- if err != nil {
- return 0, err
- }
- // Check that the length of the plaintext after stripping the padding
- // and record type byte is under the maximum plaintext size.
- if len(p.pendingApplicationData) > tlsRecordMaxPlaintextSize {
- return 0, errors.New("plaintext size larger than maximum")
- }
- // The expected message types are application data, alert, and
- // handshake. For application data, the bytes are directly copied into
- // b. For an alert, the type of the alert is checked and the connection
- // is closed on a close notify alert. For a handshake message, the
- // handshake message type is checked. The handshake message type can be
- // a key update type, for which we advance the traffic secret, and a
- // new session ticket type, for which we send the received ticket to S2A
- // for processing.
- switch msgType {
- case applicationData:
- if len(p.handshakeBuf) > 0 {
- return 0, errors.New("application data received while processing fragmented handshake messages")
- }
- if p.ticketState == receivingTickets {
- p.ticketState = notReceivingTickets
- grpclog.Infof("Sending session tickets to S2A.")
- p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete)
- }
- case alert:
- return 0, p.handleAlertMessage()
- case handshake:
- if err = p.handleHandshakeMessage(); err != nil {
- return 0, err
- }
- return 0, nil
- default:
- return 0, errors.New("unknown record type")
- }
- }
- // Write as much application data as possible to b, the output buffer.
- n = copy(b, p.pendingApplicationData)
- p.pendingApplicationData = p.pendingApplicationData[n:]
- return n, nil
-}
-
-// Write divides b into segments of size tlsRecordMaxPlaintextSize, builds a
-// TLS 1.3 record (of type "application data") from each segment, and sends
-// the record to the peer. It returns the number of plaintext bytes that were
-// successfully sent to the peer.
-func (p *conn) Write(b []byte) (n int, err error) {
- p.writeMutex.Lock()
- defer p.writeMutex.Unlock()
- return p.writeTLSRecord(b, tlsApplicationData)
-}
-
-// writeTLSRecord divides b into segments of size maxPlaintextBytesPerRecord,
-// builds a TLS 1.3 record (of type recordType) from each segment, and sends
-// the record to the peer. It returns the number of plaintext bytes that were
-// successfully sent to the peer.
-func (p *conn) writeTLSRecord(b []byte, recordType byte) (n int, err error) {
- // Create a record of only header, record type, and tag if given empty
- // byte array.
- if len(b) == 0 {
- recordEndIndex, _, err := p.buildRecord(b, recordType, 0)
- if err != nil {
- return 0, err
- }
-
- // Write the bytes stored in outRecordsBuf to p.Conn. Since we return
- // the number of plaintext bytes written without overhead, we will
- // always return 0 while p.Conn.Write returns the entire record length.
- _, err = p.Conn.Write(p.outRecordsBuf[:recordEndIndex])
- return 0, err
- }
-
- numRecords := int(math.Ceil(float64(len(b)) / float64(tlsRecordMaxPlaintextSize)))
- totalRecordsSize := len(b) + numRecords*p.overheadSize
- partialBSize := len(b)
- if totalRecordsSize > outBufMaxSize {
- totalRecordsSize = outBufMaxSize
- partialBSize = outBufMaxRecords * tlsRecordMaxPlaintextSize
- }
- if len(p.outRecordsBuf) < totalRecordsSize {
- p.outRecordsBuf = make([]byte, totalRecordsSize)
- }
- for bStart := 0; bStart < len(b); bStart += partialBSize {
- bEnd := bStart + partialBSize
- if bEnd > len(b) {
- bEnd = len(b)
- }
- partialB := b[bStart:bEnd]
- recordEndIndex := 0
- for len(partialB) > 0 {
- recordEndIndex, partialB, err = p.buildRecord(partialB, recordType, recordEndIndex)
- if err != nil {
- // Return the amount of bytes written prior to the error.
- return bStart, err
- }
- }
- // Write the bytes stored in outRecordsBuf to p.Conn. If there is an
- // error, calculate the total number of plaintext bytes of complete
- // records successfully written to the peer and return it.
- nn, err := p.Conn.Write(p.outRecordsBuf[:recordEndIndex])
- if err != nil {
- numberOfCompletedRecords := int(math.Floor(float64(nn) / float64(tlsRecordMaxSize)))
- return bStart + numberOfCompletedRecords*tlsRecordMaxPlaintextSize, err
- }
- }
- return len(b), nil
-}
-
-// buildRecord builds a TLS 1.3 record of type recordType from plaintext,
-// and writes the record to outRecordsBuf at recordStartIndex. The record will
-// have at most tlsRecordMaxPlaintextSize bytes of payload. It returns the
-// index of outRecordsBuf where the current record ends, as well as any
-// remaining plaintext bytes.
-func (p *conn) buildRecord(plaintext []byte, recordType byte, recordStartIndex int) (n int, remainingPlaintext []byte, err error) {
- // Construct the payload, which consists of application data and record type.
- dataLen := len(plaintext)
- if dataLen > tlsRecordMaxPlaintextSize {
- dataLen = tlsRecordMaxPlaintextSize
- }
- remainingPlaintext = plaintext[dataLen:]
- newRecordBuf := p.outRecordsBuf[recordStartIndex:]
-
- copy(newRecordBuf[tlsRecordHeaderSize:], plaintext[:dataLen])
- newRecordBuf[tlsRecordHeaderSize+dataLen] = recordType
- payload := newRecordBuf[tlsRecordHeaderSize : tlsRecordHeaderSize+dataLen+1] // 1 is for the recordType.
- // Construct the header.
- newRecordBuf[0] = tlsApplicationData
- newRecordBuf[1] = tlsLegacyRecordVersion
- newRecordBuf[2] = tlsLegacyRecordVersion
- binary.BigEndian.PutUint16(newRecordBuf[3:], uint16(len(payload)+tlsTagSize))
- header := newRecordBuf[:tlsRecordHeaderSize]
-
- // Encrypt the payload using header as aad.
- encryptedPayload, err := p.outConn.Encrypt(newRecordBuf[tlsRecordHeaderSize:][:0], payload, header)
- if err != nil {
- return 0, plaintext, err
- }
- recordStartIndex += len(header) + len(encryptedPayload)
- return recordStartIndex, remainingPlaintext, nil
-}
-
-func (p *conn) Close() error {
- p.readMutex.Lock()
- defer p.readMutex.Unlock()
- p.writeMutex.Lock()
- defer p.writeMutex.Unlock()
- // If p.ticketState is equal to notReceivingTickets, then S2A has
- // been sent a flight of session tickets, and we must wait for the
- // call to S2A to complete before closing the record protocol.
- if p.ticketState == notReceivingTickets {
- <-p.callComplete
- grpclog.Infof("Safe to close the connection because sending tickets to S2A is (already) complete.")
- }
- return p.Conn.Close()
-}
-
-// stripPaddingAndType strips the padding by zeros and record type from
-// p.pendingApplicationData and returns the record type. Note that
-// p.pendingApplicationData should be of the form:
-// [application data] + [record type byte] + [trailing zeros]
-func (p *conn) stripPaddingAndType() (recordType, error) {
- if len(p.pendingApplicationData) == 0 {
- return 0, errors.New("application data had length 0")
- }
- i := len(p.pendingApplicationData) - 1
- // Search for the index of the record type byte.
- for i > 0 {
- if p.pendingApplicationData[i] != 0 {
- break
- }
- i--
- }
- rt := recordType(p.pendingApplicationData[i])
- p.pendingApplicationData = p.pendingApplicationData[:i]
- return rt, nil
-}
-
-// readFullRecord reads from the wire until a record is completed and returns
-// the full record.
-func (p *conn) readFullRecord() (fullRecord []byte, err error) {
- fullRecord, p.nextRecord, err = parseReadBuffer(p.nextRecord, tlsRecordMaxPayloadSize)
- if err != nil {
- return nil, err
- }
- // Check whether the next record to be decrypted has been completely
- // received.
- if len(fullRecord) == 0 {
- copy(p.unusedBuf, p.nextRecord)
- p.unusedBuf = p.unusedBuf[:len(p.nextRecord)]
- // Always copy next incomplete record to the beginning of the
- // unusedBuf buffer and reset nextRecord to it.
- p.nextRecord = p.unusedBuf
- }
- // Keep reading from the wire until we have a complete record.
- for len(fullRecord) == 0 {
- if len(p.unusedBuf) == cap(p.unusedBuf) {
- tmp := make([]byte, len(p.unusedBuf), cap(p.unusedBuf)+tlsRecordMaxSize)
- copy(tmp, p.unusedBuf)
- p.unusedBuf = tmp
- }
- n, err := p.Conn.Read(p.unusedBuf[len(p.unusedBuf):min(cap(p.unusedBuf), len(p.unusedBuf)+tlsRecordMaxSize)])
- if err != nil {
- return nil, err
- }
- p.unusedBuf = p.unusedBuf[:len(p.unusedBuf)+n]
- fullRecord, p.nextRecord, err = parseReadBuffer(p.unusedBuf, tlsRecordMaxPayloadSize)
- if err != nil {
- return nil, err
- }
- }
- return fullRecord, nil
-}
-
-// parseReadBuffer parses the provided buffer and returns a full record and any
-// remaining bytes in that buffer. If the record is incomplete, nil is returned
-// for the first return value and the given byte buffer is returned for the
-// second return value. The length of the payload specified by the header should
-// not be greater than maxLen, otherwise an error is returned. Note that this
-// function does not allocate or copy any buffers.
-func parseReadBuffer(b []byte, maxLen uint16) (fullRecord, remaining []byte, err error) {
- // If the header is not complete, return the provided buffer as remaining
- // buffer.
- if len(b) < tlsRecordHeaderSize {
- return nil, b, nil
- }
- msgLenField := b[tlsRecordHeaderTypeSize+tlsRecordHeaderLegacyRecordVersionSize : tlsRecordHeaderSize]
- length := binary.BigEndian.Uint16(msgLenField)
- if length > maxLen {
- return nil, nil, fmt.Errorf("record length larger than the limit %d", maxLen)
- }
- if len(b) < int(length)+tlsRecordHeaderSize {
- // Record is not complete yet.
- return nil, b, nil
- }
- return b[:tlsRecordHeaderSize+length], b[tlsRecordHeaderSize+length:], nil
-}
-
-// splitAndValidateHeader splits the header from the payload in the TLS 1.3
-// record and returns them. Note that the header is checked for validity, and an
-// error is returned when an invalid header is parsed. Also note that this
-// function does not allocate or copy any buffers.
-func splitAndValidateHeader(record []byte) (header, payload []byte, err error) {
- if len(record) < tlsRecordHeaderSize {
- return nil, nil, fmt.Errorf("record was smaller than the header size")
- }
- header = record[:tlsRecordHeaderSize]
- payload = record[tlsRecordHeaderSize:]
- if header[0] != tlsApplicationData {
- return nil, nil, fmt.Errorf("incorrect type in the header")
- }
- // Check the legacy record version, which should be 0x03, 0x03.
- if header[1] != 0x03 || header[2] != 0x03 {
- return nil, nil, fmt.Errorf("incorrect legacy record version in the header")
- }
- return header, payload, nil
-}
-
-// handleAlertMessage handles an alert message.
-func (p *conn) handleAlertMessage() error {
- if len(p.pendingApplicationData) != tlsAlertSize {
- return errors.New("invalid alert message size")
- }
- alertType := p.pendingApplicationData[1]
- // Clear the body of the alert message.
- p.pendingApplicationData = p.pendingApplicationData[:0]
- if alertType == byte(closeNotify) {
- return errors.New("received a close notify alert")
- }
- // TODO(matthewstevenson88): Add support for more alert types.
- return fmt.Errorf("received an unrecognized alert type: %v", alertType)
-}
-
-// parseHandshakeHeader parses a handshake message from the handshake buffer.
-// It returns the message type, the message length, the message, the raw message
-// that includes the type and length bytes and a flag indicating whether the
-// handshake message has been fully parsed. i.e. whether the entire handshake
-// message was in the handshake buffer.
-func (p *conn) parseHandshakeMsg() (msgType byte, msgLen uint32, msg []byte, rawMsg []byte, ok bool) {
- // Handle the case where the 4 byte handshake header is fragmented.
- if len(p.handshakeBuf) < tlsHandshakePrefixSize {
- return 0, 0, nil, nil, false
- }
- msgType = p.handshakeBuf[0]
- msgLen = bigEndianInt24(p.handshakeBuf[tlsHandshakeMsgTypeSize : tlsHandshakeMsgTypeSize+tlsHandshakeLengthSize])
- if msgLen > uint32(len(p.handshakeBuf)-tlsHandshakePrefixSize) {
- return 0, 0, nil, nil, false
- }
- msg = p.handshakeBuf[tlsHandshakePrefixSize : tlsHandshakePrefixSize+msgLen]
- rawMsg = p.handshakeBuf[:tlsHandshakeMsgTypeSize+tlsHandshakeLengthSize+msgLen]
- p.handshakeBuf = p.handshakeBuf[tlsHandshakePrefixSize+msgLen:]
- return msgType, msgLen, msg, rawMsg, true
-}
-
-// handleHandshakeMessage handles a handshake message. Note that the first
-// complete handshake message from the handshake buffer is removed, if it
-// exists.
-func (p *conn) handleHandshakeMessage() error {
- // Copy the pending application data to the handshake buffer. At this point,
- // we are guaranteed that the pending application data contains only parts
- // of a handshake message.
- p.handshakeBuf = append(p.handshakeBuf, p.pendingApplicationData...)
- p.pendingApplicationData = p.pendingApplicationData[:0]
- // Several handshake messages may be coalesced into a single record.
- // Continue reading them until the handshake buffer is empty.
- for len(p.handshakeBuf) > 0 {
- handshakeMsgType, msgLen, msg, rawMsg, ok := p.parseHandshakeMsg()
- if !ok {
- // The handshake could not be fully parsed, so read in another
- // record and try again later.
- break
- }
- switch handshakeMsgType {
- case tlsHandshakeKeyUpdateType:
- if msgLen != tlsHandshakeKeyUpdateMsgSize {
- return errors.New("invalid handshake key update message length")
- }
- if len(p.handshakeBuf) != 0 {
- return errors.New("key update message must be the last message of a handshake record")
- }
- if err := p.handleKeyUpdateMsg(msg); err != nil {
- return err
- }
- case tlsHandshakeNewSessionTicketType:
- // Ignore tickets that are received after a batch of tickets has
- // been sent to S2A.
- if p.ticketState == notReceivingTickets {
- continue
- }
- if p.ticketState == ticketsNotYetReceived {
- p.ticketState = receivingTickets
- }
- p.sessionTickets = append(p.sessionTickets, rawMsg)
- if len(p.sessionTickets) == maxAllowedTickets {
- p.ticketState = notReceivingTickets
- grpclog.Infof("Sending session tickets to S2A.")
- p.ticketSender.sendTicketsToS2A(p.sessionTickets, p.callComplete)
- }
- default:
- return errors.New("unknown handshake message type")
- }
- }
- return nil
-}
-
-func buildKeyUpdateRequest() []byte {
- b := make([]byte, tlsHandshakePrefixSize+tlsHandshakeKeyUpdateMsgSize)
- b[0] = tlsHandshakeKeyUpdateType
- b[1] = 0
- b[2] = 0
- b[3] = tlsHandshakeKeyUpdateMsgSize
- b[4] = byte(updateNotRequested)
- return b
-}
-
-// handleKeyUpdateMsg handles a key update message.
-func (p *conn) handleKeyUpdateMsg(msg []byte) error {
- keyUpdateRequest := msg[0]
- if keyUpdateRequest != byte(updateNotRequested) &&
- keyUpdateRequest != byte(updateRequested) {
- return errors.New("invalid handshake key update message")
- }
- if err := p.inConn.UpdateKey(); err != nil {
- return err
- }
- // Send a key update message back to the peer if requested.
- if keyUpdateRequest == byte(updateRequested) {
- p.writeMutex.Lock()
- defer p.writeMutex.Unlock()
- n, err := p.writeTLSRecord(preConstructedKeyUpdateMsg, byte(handshake))
- if err != nil {
- return err
- }
- if n != tlsHandshakePrefixSize+tlsHandshakeKeyUpdateMsgSize {
- return errors.New("key update request message wrote less bytes than expected")
- }
- if err = p.outConn.UpdateKey(); err != nil {
- return err
- }
- }
- return nil
-}
-
-// bidEndianInt24 converts the given byte buffer of at least size 3 and
-// outputs the resulting 24 bit integer as a uint32. This is needed because
-// TLS 1.3 requires 3 byte integers, and the binary.BigEndian package does
-// not provide a way to transform a byte buffer into a 3 byte integer.
-func bigEndianInt24(b []byte) uint32 {
- _ = b[2] // bounds check hint to compiler; see golang.org/issue/14808
- return uint32(b[2]) | uint32(b[1])<<8 | uint32(b[0])<<16
-}
-
-func min(a, b int) int {
- if a < b {
- return a
- }
- return b
-}
diff --git a/vendor/github.com/google/s2a-go/internal/record/ticketsender.go b/vendor/github.com/google/s2a-go/internal/record/ticketsender.go
deleted file mode 100644
index e51199ab..00000000
--- a/vendor/github.com/google/s2a-go/internal/record/ticketsender.go
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- *
- * Copyright 2021 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package record
-
-import (
- "context"
- "fmt"
- "sync"
- "time"
-
- "github.com/google/s2a-go/internal/handshaker/service"
- commonpb "github.com/google/s2a-go/internal/proto/common_go_proto"
- s2apb "github.com/google/s2a-go/internal/proto/s2a_go_proto"
- "github.com/google/s2a-go/internal/tokenmanager"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
-)
-
-// sessionTimeout is the timeout for creating a session with the S2A handshaker
-// service.
-const sessionTimeout = time.Second * 5
-
-// s2aTicketSender sends session tickets to the S2A handshaker service.
-type s2aTicketSender interface {
- // sendTicketsToS2A sends the given session tickets to the S2A handshaker
- // service.
- sendTicketsToS2A(sessionTickets [][]byte, callComplete chan bool)
-}
-
-// ticketStream is the stream used to send and receive session information.
-type ticketStream interface {
- Send(*s2apb.SessionReq) error
- Recv() (*s2apb.SessionResp, error)
-}
-
-type ticketSender struct {
- // hsAddr stores the address of the S2A handshaker service.
- hsAddr string
- // connectionID is the connection identifier that was created and sent by
- // S2A at the end of a handshake.
- connectionID uint64
- // localIdentity is the local identity that was used by S2A during session
- // setup and included in the session result.
- localIdentity *commonpb.Identity
- // tokenManager manages access tokens for authenticating to S2A.
- tokenManager tokenmanager.AccessTokenManager
- // ensureProcessSessionTickets allows users to wait and ensure that all
- // available session tickets are sent to S2A before a process completes.
- ensureProcessSessionTickets *sync.WaitGroup
-}
-
-// sendTicketsToS2A sends the given sessionTickets to the S2A handshaker
-// service. This is done asynchronously and writes to the error logs if an error
-// occurs.
-func (t *ticketSender) sendTicketsToS2A(sessionTickets [][]byte, callComplete chan bool) {
- // Note that the goroutine is in the function rather than at the caller
- // because the fake ticket sender used for testing must run synchronously
- // so that the session tickets can be accessed from it after the tests have
- // been run.
- if t.ensureProcessSessionTickets != nil {
- t.ensureProcessSessionTickets.Add(1)
- }
- go func() {
- if err := func() error {
- defer func() {
- if t.ensureProcessSessionTickets != nil {
- t.ensureProcessSessionTickets.Done()
- }
- }()
- ctx, cancel := context.WithTimeout(context.Background(), sessionTimeout)
- defer cancel()
- // The transportCreds only needs to be set when talking to S2AV2 and also
- // if mTLS is required.
- hsConn, err := service.Dial(ctx, t.hsAddr, nil)
- if err != nil {
- return err
- }
- client := s2apb.NewS2AServiceClient(hsConn)
- session, err := client.SetUpSession(ctx)
- if err != nil {
- return err
- }
- defer func() {
- if err := session.CloseSend(); err != nil {
- grpclog.Error(err)
- }
- }()
- return t.writeTicketsToStream(session, sessionTickets)
- }(); err != nil {
- grpclog.Errorf("failed to send resumption tickets to S2A with identity: %v, %v",
- t.localIdentity, err)
- }
- callComplete <- true
- close(callComplete)
- }()
-}
-
-// writeTicketsToStream writes the given session tickets to the given stream.
-func (t *ticketSender) writeTicketsToStream(stream ticketStream, sessionTickets [][]byte) error {
- if err := stream.Send(
- &s2apb.SessionReq{
- ReqOneof: &s2apb.SessionReq_ResumptionTicket{
- ResumptionTicket: &s2apb.ResumptionTicketReq{
- InBytes: sessionTickets,
- ConnectionId: t.connectionID,
- LocalIdentity: t.localIdentity,
- },
- },
- AuthMechanisms: t.getAuthMechanisms(),
- },
- ); err != nil {
- return err
- }
- sessionResp, err := stream.Recv()
- if err != nil {
- return err
- }
- if sessionResp.GetStatus().GetCode() != uint32(codes.OK) {
- return fmt.Errorf("s2a session ticket response had error status: %v, %v",
- sessionResp.GetStatus().GetCode(), sessionResp.GetStatus().GetDetails())
- }
- return nil
-}
-
-func (t *ticketSender) getAuthMechanisms() []*s2apb.AuthenticationMechanism {
- if t.tokenManager == nil {
- return nil
- }
- // First handle the special case when no local identity has been provided
- // by the application. In this case, an AuthenticationMechanism with no local
- // identity will be sent.
- if t.localIdentity == nil {
- token, err := t.tokenManager.DefaultToken()
- if err != nil {
- grpclog.Infof("unable to get token for empty local identity: %v", err)
- return nil
- }
- return []*s2apb.AuthenticationMechanism{
- {
- MechanismOneof: &s2apb.AuthenticationMechanism_Token{
- Token: token,
- },
- },
- }
- }
-
- // Next, handle the case where the application (or the S2A) has specified
- // a local identity.
- token, err := t.tokenManager.Token(t.localIdentity)
- if err != nil {
- grpclog.Infof("unable to get token for local identity %v: %v", t.localIdentity, err)
- return nil
- }
- return []*s2apb.AuthenticationMechanism{
- {
- Identity: t.localIdentity,
- MechanismOneof: &s2apb.AuthenticationMechanism_Token{
- Token: token,
- },
- },
- }
-}
diff --git a/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go b/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go
deleted file mode 100644
index ec96ba3b..00000000
--- a/vendor/github.com/google/s2a-go/internal/tokenmanager/tokenmanager.go
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- *
- * Copyright 2021 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package tokenmanager provides tokens for authenticating to S2A.
-package tokenmanager
-
-import (
- "fmt"
- "os"
-
- commonpb "github.com/google/s2a-go/internal/proto/common_go_proto"
-)
-
-const (
- s2aAccessTokenEnvironmentVariable = "S2A_ACCESS_TOKEN"
-)
-
-// AccessTokenManager manages tokens for authenticating to S2A.
-type AccessTokenManager interface {
- // DefaultToken returns a token that an application with no specified local
- // identity must use to authenticate to S2A.
- DefaultToken() (token string, err error)
- // Token returns a token that an application with local identity equal to
- // identity must use to authenticate to S2A.
- Token(identity *commonpb.Identity) (token string, err error)
-}
-
-type singleTokenAccessTokenManager struct {
- token string
-}
-
-// NewSingleTokenAccessTokenManager returns a new AccessTokenManager instance
-// that will always manage the same token.
-//
-// The token to be managed is read from the s2aAccessTokenEnvironmentVariable
-// environment variable. If this environment variable is not set, then this
-// function returns an error.
-func NewSingleTokenAccessTokenManager() (AccessTokenManager, error) {
- token, variableExists := os.LookupEnv(s2aAccessTokenEnvironmentVariable)
- if !variableExists {
- return nil, fmt.Errorf("%s environment variable is not set", s2aAccessTokenEnvironmentVariable)
- }
- return &singleTokenAccessTokenManager{token: token}, nil
-}
-
-// DefaultToken always returns the token managed by the
-// singleTokenAccessTokenManager.
-func (m *singleTokenAccessTokenManager) DefaultToken() (string, error) {
- return m.token, nil
-}
-
-// Token always returns the token managed by the singleTokenAccessTokenManager.
-func (m *singleTokenAccessTokenManager) Token(*commonpb.Identity) (string, error) {
- return m.token, nil
-}
diff --git a/vendor/github.com/google/s2a-go/internal/v2/README.md b/vendor/github.com/google/s2a-go/internal/v2/README.md
deleted file mode 100644
index 3806d1e9..00000000
--- a/vendor/github.com/google/s2a-go/internal/v2/README.md
+++ /dev/null
@@ -1 +0,0 @@
-**This directory has the implementation of the S2Av2's gRPC-Go client libraries**
diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go b/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go
deleted file mode 100644
index cc811879..00000000
--- a/vendor/github.com/google/s2a-go/internal/v2/certverifier/certverifier.go
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- *
- * Copyright 2022 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package certverifier offloads verifications to S2Av2.
-package certverifier
-
-import (
- "crypto/x509"
- "fmt"
-
- "github.com/google/s2a-go/stream"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
-
- s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto"
-)
-
-// VerifyClientCertificateChain builds a SessionReq, sends it to S2Av2 and
-// receives a SessionResp.
-func VerifyClientCertificateChain(verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
- return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
- // Offload verification to S2Av2.
- if grpclog.V(1) {
- grpclog.Infof("Sending request to S2Av2 for client peer cert chain validation.")
- }
- if err := s2AStream.Send(&s2av2pb.SessionReq{
- ReqOneof: &s2av2pb.SessionReq_ValidatePeerCertificateChainReq{
- ValidatePeerCertificateChainReq: &s2av2pb.ValidatePeerCertificateChainReq{
- Mode: verificationMode,
- PeerOneof: &s2av2pb.ValidatePeerCertificateChainReq_ClientPeer_{
- ClientPeer: &s2av2pb.ValidatePeerCertificateChainReq_ClientPeer{
- CertificateChain: rawCerts,
- },
- },
- },
- },
- }); err != nil {
- grpclog.Infof("Failed to send request to S2Av2 for client peer cert chain validation.")
- return err
- }
-
- // Get the response from S2Av2.
- resp, err := s2AStream.Recv()
- if err != nil {
- grpclog.Infof("Failed to receive client peer cert chain validation response from S2Av2.")
- return err
- }
-
- // Parse the response.
- if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) {
- return fmt.Errorf("failed to offload client cert verification to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details)
-
- }
-
- if resp.GetValidatePeerCertificateChainResp().ValidationResult != s2av2pb.ValidatePeerCertificateChainResp_SUCCESS {
- return fmt.Errorf("client cert verification failed: %v", resp.GetValidatePeerCertificateChainResp().ValidationDetails)
- }
-
- return nil
- }
-}
-
-// VerifyServerCertificateChain builds a SessionReq, sends it to S2Av2 and
-// receives a SessionResp.
-func VerifyServerCertificateChain(hostname string, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream, serverAuthorizationPolicy []byte) func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
- return func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
- // Offload verification to S2Av2.
- if grpclog.V(1) {
- grpclog.Infof("Sending request to S2Av2 for server peer cert chain validation.")
- }
- if err := s2AStream.Send(&s2av2pb.SessionReq{
- ReqOneof: &s2av2pb.SessionReq_ValidatePeerCertificateChainReq{
- ValidatePeerCertificateChainReq: &s2av2pb.ValidatePeerCertificateChainReq{
- Mode: verificationMode,
- PeerOneof: &s2av2pb.ValidatePeerCertificateChainReq_ServerPeer_{
- ServerPeer: &s2av2pb.ValidatePeerCertificateChainReq_ServerPeer{
- CertificateChain: rawCerts,
- ServerHostname: hostname,
- SerializedUnrestrictedClientPolicy: serverAuthorizationPolicy,
- },
- },
- },
- },
- }); err != nil {
- grpclog.Infof("Failed to send request to S2Av2 for server peer cert chain validation.")
- return err
- }
-
- // Get the response from S2Av2.
- resp, err := s2AStream.Recv()
- if err != nil {
- grpclog.Infof("Failed to receive server peer cert chain validation response from S2Av2.")
- return err
- }
-
- // Parse the response.
- if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) {
- return fmt.Errorf("failed to offload server cert verification to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details)
- }
-
- if resp.GetValidatePeerCertificateChainResp().ValidationResult != s2av2pb.ValidatePeerCertificateChainResp_SUCCESS {
- return fmt.Errorf("server cert verification failed: %v", resp.GetValidatePeerCertificateChainResp().ValidationDetails)
- }
-
- return nil
- }
-}
diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der
deleted file mode 100644
index 958f3cfa..00000000
Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_intermediate_cert.der and /dev/null differ
diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der
deleted file mode 100644
index d2817641..00000000
Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_leaf_cert.der and /dev/null differ
diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der
deleted file mode 100644
index d8c3710c..00000000
Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/client_root_cert.der and /dev/null differ
diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der
deleted file mode 100644
index dae619c0..00000000
Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_intermediate_cert.der and /dev/null differ
diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der
deleted file mode 100644
index ce7f8d31..00000000
Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_leaf_cert.der and /dev/null differ
diff --git a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der b/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der
deleted file mode 100644
index 04b0d736..00000000
Binary files a/vendor/github.com/google/s2a-go/internal/v2/certverifier/testdata/server_root_cert.der and /dev/null differ
diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go
deleted file mode 100644
index e7478d43..00000000
--- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/remotesigner.go
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- *
- * Copyright 2022 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package remotesigner offloads private key operations to S2Av2.
-package remotesigner
-
-import (
- "crypto"
- "crypto/rsa"
- "crypto/x509"
- "fmt"
- "io"
-
- "github.com/google/s2a-go/stream"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
-
- s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto"
-)
-
-// remoteSigner implementes the crypto.Signer interface.
-type remoteSigner struct {
- leafCert *x509.Certificate
- s2AStream stream.S2AStream
-}
-
-// New returns an instance of RemoteSigner, an implementation of the
-// crypto.Signer interface.
-func New(leafCert *x509.Certificate, s2AStream stream.S2AStream) crypto.Signer {
- return &remoteSigner{leafCert, s2AStream}
-}
-
-func (s *remoteSigner) Public() crypto.PublicKey {
- return s.leafCert.PublicKey
-}
-
-func (s *remoteSigner) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) {
- signatureAlgorithm, err := getSignatureAlgorithm(opts, s.leafCert)
- if err != nil {
- return nil, err
- }
-
- req, err := getSignReq(signatureAlgorithm, digest)
- if err != nil {
- return nil, err
- }
- if grpclog.V(1) {
- grpclog.Infof("Sending request to S2Av2 for signing operation.")
- }
- if err := s.s2AStream.Send(&s2av2pb.SessionReq{
- ReqOneof: &s2av2pb.SessionReq_OffloadPrivateKeyOperationReq{
- OffloadPrivateKeyOperationReq: req,
- },
- }); err != nil {
- grpclog.Infof("Failed to send request to S2Av2 for signing operation.")
- return nil, err
- }
-
- resp, err := s.s2AStream.Recv()
- if err != nil {
- grpclog.Infof("Failed to receive signing operation response from S2Av2.")
- return nil, err
- }
-
- if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) {
- return nil, fmt.Errorf("failed to offload signing with private key to S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details)
- }
-
- return resp.GetOffloadPrivateKeyOperationResp().GetOutBytes(), nil
-}
-
-// getCert returns the leafCert field in s.
-func (s *remoteSigner) getCert() *x509.Certificate {
- return s.leafCert
-}
-
-// getStream returns the s2AStream field in s.
-func (s *remoteSigner) getStream() stream.S2AStream {
- return s.s2AStream
-}
-
-func getSignReq(signatureAlgorithm s2av2pb.SignatureAlgorithm, digest []byte) (*s2av2pb.OffloadPrivateKeyOperationReq, error) {
- if (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA256) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256) {
- return &s2av2pb.OffloadPrivateKeyOperationReq{
- Operation: s2av2pb.OffloadPrivateKeyOperationReq_SIGN,
- SignatureAlgorithm: signatureAlgorithm,
- InBytes: &s2av2pb.OffloadPrivateKeyOperationReq_Sha256Digest{
- Sha256Digest: digest,
- },
- }, nil
- } else if (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA384) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384) {
- return &s2av2pb.OffloadPrivateKeyOperationReq{
- Operation: s2av2pb.OffloadPrivateKeyOperationReq_SIGN,
- SignatureAlgorithm: signatureAlgorithm,
- InBytes: &s2av2pb.OffloadPrivateKeyOperationReq_Sha384Digest{
- Sha384Digest: digest,
- },
- }, nil
- } else if (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA512) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512) || (signatureAlgorithm == s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ED25519) {
- return &s2av2pb.OffloadPrivateKeyOperationReq{
- Operation: s2av2pb.OffloadPrivateKeyOperationReq_SIGN,
- SignatureAlgorithm: signatureAlgorithm,
- InBytes: &s2av2pb.OffloadPrivateKeyOperationReq_Sha512Digest{
- Sha512Digest: digest,
- },
- }, nil
- } else {
- return nil, fmt.Errorf("unknown signature algorithm: %v", signatureAlgorithm)
- }
-}
-
-// getSignatureAlgorithm returns the signature algorithm that S2A must use when
-// performing a signing operation that has been offloaded by an application
-// using the crypto/tls libraries.
-func getSignatureAlgorithm(opts crypto.SignerOpts, leafCert *x509.Certificate) (s2av2pb.SignatureAlgorithm, error) {
- if opts == nil || leafCert == nil {
- return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm")
- }
- switch leafCert.PublicKeyAlgorithm {
- case x509.RSA:
- if rsaPSSOpts, ok := opts.(*rsa.PSSOptions); ok {
- return rsaPSSAlgorithm(rsaPSSOpts)
- }
- return rsaPPKCS1Algorithm(opts)
- case x509.ECDSA:
- return ecdsaAlgorithm(opts)
- case x509.Ed25519:
- return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ED25519, nil
- default:
- return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm: %q", leafCert.PublicKeyAlgorithm)
- }
-}
-
-func rsaPSSAlgorithm(opts *rsa.PSSOptions) (s2av2pb.SignatureAlgorithm, error) {
- switch opts.HashFunc() {
- case crypto.SHA256:
- return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA256, nil
- case crypto.SHA384:
- return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA384, nil
- case crypto.SHA512:
- return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PSS_RSAE_SHA512, nil
- default:
- return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm")
- }
-}
-
-func rsaPPKCS1Algorithm(opts crypto.SignerOpts) (s2av2pb.SignatureAlgorithm, error) {
- switch opts.HashFunc() {
- case crypto.SHA256:
- return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA256, nil
- case crypto.SHA384:
- return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA384, nil
- case crypto.SHA512:
- return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_RSA_PKCS1_SHA512, nil
- default:
- return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm")
- }
-}
-
-func ecdsaAlgorithm(opts crypto.SignerOpts) (s2av2pb.SignatureAlgorithm, error) {
- switch opts.HashFunc() {
- case crypto.SHA256:
- return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP256R1_SHA256, nil
- case crypto.SHA384:
- return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP384R1_SHA384, nil
- case crypto.SHA512:
- return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_ECDSA_SECP521R1_SHA512, nil
- default:
- return s2av2pb.SignatureAlgorithm_S2A_SSL_SIGN_UNSPECIFIED, fmt.Errorf("unknown signature algorithm")
- }
-}
diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der
deleted file mode 100644
index d8c3710c..00000000
Binary files a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.der and /dev/null differ
diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem
deleted file mode 100644
index 493a5a26..00000000
--- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_cert.pem
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL
-BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
-YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
-AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
-MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
-BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
-ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
-KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9
-a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0
-OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3
-RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK
-P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316
-HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu
-0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl
-MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6
-EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9
-/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA
-QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ
-nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD
-X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco
-pKklVz0=
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem
deleted file mode 100644
index 55a7f10c..00000000
--- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/client_key.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF
-l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj
-+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G
-4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA
-xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh
-68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ
-/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL
-Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA
-VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9
-9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH
-MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt
-aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq
-xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx
-2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv
-EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z
-aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq
-udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs
-VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm
-56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT
-GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V
-Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm
-HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q
-BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH
-qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh
-GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w=
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der
deleted file mode 100644
index 04b0d736..00000000
Binary files a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.der and /dev/null differ
diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem
deleted file mode 100644
index 0f98322c..00000000
--- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_cert.pem
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL
-BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
-YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
-AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
-MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
-BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
-ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
-KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT
-fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ
-qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE
-xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es
-Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2
-Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM
-ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX
-MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR
-e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X
-POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl
-AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg
-odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+
-PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN
-Dhm6uZM=
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem b/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem
deleted file mode 100644
index 81afea78..00000000
--- a/vendor/github.com/google/s2a-go/internal/v2/remotesigner/testdata/server_key.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs
-8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO
-QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk
-XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA
-Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc
-gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf
-LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl
-jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0
-4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q
-Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P
-nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1
-drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE
-duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50
-L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG
-06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm
-eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD
-uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7
-lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL
-a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb
-hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ
-7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j
-r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7
-eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD
-B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz
-7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g==
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go b/vendor/github.com/google/s2a-go/internal/v2/s2av2.go
deleted file mode 100644
index 85a8379d..00000000
--- a/vendor/github.com/google/s2a-go/internal/v2/s2av2.go
+++ /dev/null
@@ -1,391 +0,0 @@
-/*
- *
- * Copyright 2022 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package v2 provides the S2Av2 transport credentials used by a gRPC
-// application.
-package v2
-
-import (
- "context"
- "crypto/tls"
- "errors"
- "net"
- "os"
- "time"
-
- "github.com/golang/protobuf/proto"
- "github.com/google/s2a-go/fallback"
- "github.com/google/s2a-go/internal/handshaker/service"
- "github.com/google/s2a-go/internal/tokenmanager"
- "github.com/google/s2a-go/internal/v2/tlsconfigstore"
- "github.com/google/s2a-go/retry"
- "github.com/google/s2a-go/stream"
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
-
- commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto"
- s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto"
-)
-
-const (
- s2aSecurityProtocol = "tls"
- defaultS2ATimeout = 6 * time.Second
-)
-
-// An environment variable, which sets the timeout enforced on the connection to the S2A service for handshake.
-const s2aTimeoutEnv = "S2A_TIMEOUT"
-
-type s2av2TransportCreds struct {
- info *credentials.ProtocolInfo
- isClient bool
- serverName string
- s2av2Address string
- transportCreds credentials.TransportCredentials
- tokenManager *tokenmanager.AccessTokenManager
- // localIdentity should only be used by the client.
- localIdentity *commonpbv1.Identity
- // localIdentities should only be used by the server.
- localIdentities []*commonpbv1.Identity
- verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode
- fallbackClientHandshake fallback.ClientHandshake
- getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)
- serverAuthorizationPolicy []byte
-}
-
-// NewClientCreds returns a client-side transport credentials object that uses
-// the S2Av2 to establish a secure connection with a server.
-func NewClientCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, fallbackClientHandshakeFunc fallback.ClientHandshake, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error), serverAuthorizationPolicy []byte) (credentials.TransportCredentials, error) {
- // Create an AccessTokenManager instance to use to authenticate to S2Av2.
- accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager()
-
- creds := &s2av2TransportCreds{
- info: &credentials.ProtocolInfo{
- SecurityProtocol: s2aSecurityProtocol,
- },
- isClient: true,
- serverName: "",
- s2av2Address: s2av2Address,
- transportCreds: transportCreds,
- localIdentity: localIdentity,
- verificationMode: verificationMode,
- fallbackClientHandshake: fallbackClientHandshakeFunc,
- getS2AStream: getS2AStream,
- serverAuthorizationPolicy: serverAuthorizationPolicy,
- }
- if err != nil {
- creds.tokenManager = nil
- } else {
- creds.tokenManager = &accessTokenManager
- }
- if grpclog.V(1) {
- grpclog.Info("Created client S2Av2 transport credentials.")
- }
- return creds, nil
-}
-
-// NewServerCreds returns a server-side transport credentials object that uses
-// the S2Av2 to establish a secure connection with a client.
-func NewServerCreds(s2av2Address string, transportCreds credentials.TransportCredentials, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (credentials.TransportCredentials, error) {
- // Create an AccessTokenManager instance to use to authenticate to S2Av2.
- accessTokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager()
- creds := &s2av2TransportCreds{
- info: &credentials.ProtocolInfo{
- SecurityProtocol: s2aSecurityProtocol,
- },
- isClient: false,
- s2av2Address: s2av2Address,
- transportCreds: transportCreds,
- localIdentities: localIdentities,
- verificationMode: verificationMode,
- getS2AStream: getS2AStream,
- }
- if err != nil {
- creds.tokenManager = nil
- } else {
- creds.tokenManager = &accessTokenManager
- }
- if grpclog.V(1) {
- grpclog.Info("Created server S2Av2 transport credentials.")
- }
- return creds, nil
-}
-
-// ClientHandshake performs a client-side mTLS handshake using the S2Av2.
-func (c *s2av2TransportCreds) ClientHandshake(ctx context.Context, serverAuthority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
- if !c.isClient {
- return nil, nil, errors.New("client handshake called using server transport credentials")
- }
- // Remove the port from serverAuthority.
- serverName := removeServerNamePort(serverAuthority)
- timeoutCtx, cancel := context.WithTimeout(ctx, GetS2ATimeout())
- defer cancel()
- var s2AStream stream.S2AStream
- var err error
- retry.Run(timeoutCtx,
- func() error {
- s2AStream, err = createStream(timeoutCtx, c.s2av2Address, c.transportCreds, c.getS2AStream)
- return err
- })
- if err != nil {
- grpclog.Infof("Failed to connect to S2Av2: %v", err)
- if c.fallbackClientHandshake != nil {
- return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err)
- }
- return nil, nil, err
- }
- defer s2AStream.CloseSend()
- if grpclog.V(1) {
- grpclog.Infof("Connected to S2Av2.")
- }
- var config *tls.Config
-
- var tokenManager tokenmanager.AccessTokenManager
- if c.tokenManager == nil {
- tokenManager = nil
- } else {
- tokenManager = *c.tokenManager
- }
-
- sn := serverName
- if c.serverName != "" {
- sn = c.serverName
- }
- retry.Run(timeoutCtx,
- func() error {
- config, err = tlsconfigstore.GetTLSConfigurationForClient(sn, s2AStream, tokenManager, c.localIdentity, c.verificationMode, c.serverAuthorizationPolicy)
- return err
- })
- if err != nil {
- grpclog.Info("Failed to get client TLS config from S2Av2: %v", err)
- if c.fallbackClientHandshake != nil {
- return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err)
- }
- return nil, nil, err
- }
- if grpclog.V(1) {
- grpclog.Infof("Got client TLS config from S2Av2.")
- }
-
- creds := credentials.NewTLS(config)
- var conn net.Conn
- var authInfo credentials.AuthInfo
- retry.Run(timeoutCtx,
- func() error {
- conn, authInfo, err = creds.ClientHandshake(timeoutCtx, serverName, rawConn)
- return err
- })
- if err != nil {
- grpclog.Infof("Failed to do client handshake using S2Av2: %v", err)
- if c.fallbackClientHandshake != nil {
- return c.fallbackClientHandshake(ctx, serverAuthority, rawConn, err)
- }
- return nil, nil, err
- }
- grpclog.Infof("Successfully done client handshake using S2Av2 to: %s", serverName)
-
- return conn, authInfo, err
-}
-
-// ServerHandshake performs a server-side mTLS handshake using the S2Av2.
-func (c *s2av2TransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
- if c.isClient {
- return nil, nil, errors.New("server handshake called using client transport credentials")
- }
- ctx, cancel := context.WithTimeout(context.Background(), GetS2ATimeout())
- defer cancel()
- var s2AStream stream.S2AStream
- var err error
- retry.Run(ctx,
- func() error {
- s2AStream, err = createStream(ctx, c.s2av2Address, c.transportCreds, c.getS2AStream)
- return err
- })
- if err != nil {
- grpclog.Infof("Failed to connect to S2Av2: %v", err)
- return nil, nil, err
- }
- defer s2AStream.CloseSend()
- if grpclog.V(1) {
- grpclog.Infof("Connected to S2Av2.")
- }
-
- var tokenManager tokenmanager.AccessTokenManager
- if c.tokenManager == nil {
- tokenManager = nil
- } else {
- tokenManager = *c.tokenManager
- }
-
- var config *tls.Config
- retry.Run(ctx,
- func() error {
- config, err = tlsconfigstore.GetTLSConfigurationForServer(s2AStream, tokenManager, c.localIdentities, c.verificationMode)
- return err
- })
- if err != nil {
- grpclog.Infof("Failed to get server TLS config from S2Av2: %v", err)
- return nil, nil, err
- }
- if grpclog.V(1) {
- grpclog.Infof("Got server TLS config from S2Av2.")
- }
-
- creds := credentials.NewTLS(config)
- var conn net.Conn
- var authInfo credentials.AuthInfo
- retry.Run(ctx,
- func() error {
- conn, authInfo, err = creds.ServerHandshake(rawConn)
- return err
- })
- if err != nil {
- grpclog.Infof("Failed to do server handshake using S2Av2: %v", err)
- return nil, nil, err
- }
- return conn, authInfo, err
-}
-
-// Info returns protocol info of s2av2TransportCreds.
-func (c *s2av2TransportCreds) Info() credentials.ProtocolInfo {
- return *c.info
-}
-
-// Clone makes a deep copy of s2av2TransportCreds.
-func (c *s2av2TransportCreds) Clone() credentials.TransportCredentials {
- info := *c.info
- serverName := c.serverName
- fallbackClientHandshake := c.fallbackClientHandshake
-
- s2av2Address := c.s2av2Address
- var tokenManager tokenmanager.AccessTokenManager
- if c.tokenManager == nil {
- tokenManager = nil
- } else {
- tokenManager = *c.tokenManager
- }
- verificationMode := c.verificationMode
- var localIdentity *commonpbv1.Identity
- if c.localIdentity != nil {
- localIdentity = proto.Clone(c.localIdentity).(*commonpbv1.Identity)
- }
- var localIdentities []*commonpbv1.Identity
- if c.localIdentities != nil {
- localIdentities = make([]*commonpbv1.Identity, len(c.localIdentities))
- for i, localIdentity := range c.localIdentities {
- localIdentities[i] = proto.Clone(localIdentity).(*commonpbv1.Identity)
- }
- }
- creds := &s2av2TransportCreds{
- info: &info,
- isClient: c.isClient,
- serverName: serverName,
- fallbackClientHandshake: fallbackClientHandshake,
- s2av2Address: s2av2Address,
- localIdentity: localIdentity,
- localIdentities: localIdentities,
- verificationMode: verificationMode,
- }
- if c.tokenManager == nil {
- creds.tokenManager = nil
- } else {
- creds.tokenManager = &tokenManager
- }
- return creds
-}
-
-// NewClientTLSConfig returns a tls.Config instance that uses S2Av2 to establish a TLS connection as
-// a client. The tls.Config MUST only be used to establish a single TLS connection.
-func NewClientTLSConfig(
- ctx context.Context,
- s2av2Address string,
- transportCreds credentials.TransportCredentials,
- tokenManager tokenmanager.AccessTokenManager,
- verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode,
- serverName string,
- serverAuthorizationPolicy []byte) (*tls.Config, error) {
- s2AStream, err := createStream(ctx, s2av2Address, transportCreds, nil)
- if err != nil {
- grpclog.Infof("Failed to connect to S2Av2: %v", err)
- return nil, err
- }
-
- return tlsconfigstore.GetTLSConfigurationForClient(removeServerNamePort(serverName), s2AStream, tokenManager, nil, verificationMode, serverAuthorizationPolicy)
-}
-
-// OverrideServerName sets the ServerName in the s2av2TransportCreds protocol
-// info. The ServerName MUST be a hostname.
-func (c *s2av2TransportCreds) OverrideServerName(serverNameOverride string) error {
- serverName := removeServerNamePort(serverNameOverride)
- c.info.ServerName = serverName
- c.serverName = serverName
- return nil
-}
-
-// Remove the trailing port from server name.
-func removeServerNamePort(serverName string) string {
- name, _, err := net.SplitHostPort(serverName)
- if err != nil {
- name = serverName
- }
- return name
-}
-
-type s2AGrpcStream struct {
- stream s2av2pb.S2AService_SetUpSessionClient
-}
-
-func (x s2AGrpcStream) Send(m *s2av2pb.SessionReq) error {
- return x.stream.Send(m)
-}
-
-func (x s2AGrpcStream) Recv() (*s2av2pb.SessionResp, error) {
- return x.stream.Recv()
-}
-
-func (x s2AGrpcStream) CloseSend() error {
- return x.stream.CloseSend()
-}
-
-func createStream(ctx context.Context, s2av2Address string, transportCreds credentials.TransportCredentials, getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)) (stream.S2AStream, error) {
- if getS2AStream != nil {
- return getS2AStream(ctx, s2av2Address)
- }
- // TODO(rmehta19): Consider whether to close the connection to S2Av2.
- conn, err := service.Dial(ctx, s2av2Address, transportCreds)
- if err != nil {
- return nil, err
- }
- client := s2av2pb.NewS2AServiceClient(conn)
- gRPCStream, err := client.SetUpSession(ctx, []grpc.CallOption{}...)
- if err != nil {
- return nil, err
- }
- return &s2AGrpcStream{
- stream: gRPCStream,
- }, nil
-}
-
-// GetS2ATimeout returns the timeout enforced on the connection to the S2A service for handshake.
-func GetS2ATimeout() time.Duration {
- timeout, err := time.ParseDuration(os.Getenv(s2aTimeoutEnv))
- if err != nil {
- return defaultS2ATimeout
- }
- return timeout
-}
diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem
deleted file mode 100644
index 493a5a26..00000000
--- a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_cert.pem
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL
-BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
-YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
-AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
-MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
-BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
-ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
-KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9
-a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0
-OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3
-RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK
-P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316
-HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu
-0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl
-MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6
-EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9
-/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA
-QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ
-nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD
-X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco
-pKklVz0=
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem
deleted file mode 100644
index 55a7f10c..00000000
--- a/vendor/github.com/google/s2a-go/internal/v2/testdata/client_key.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF
-l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj
-+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G
-4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA
-xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh
-68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ
-/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL
-Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA
-VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9
-9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH
-MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt
-aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq
-xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx
-2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv
-EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z
-aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq
-udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs
-VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm
-56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT
-GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V
-Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm
-HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q
-BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH
-qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh
-GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w=
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem
deleted file mode 100644
index 0f98322c..00000000
--- a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_cert.pem
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL
-BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
-YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
-AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
-MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
-BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
-ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
-KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT
-fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ
-qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE
-xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es
-Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2
-Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM
-ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX
-MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR
-e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X
-POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl
-AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg
-odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+
-PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN
-Dhm6uZM=
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem b/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem
deleted file mode 100644
index 81afea78..00000000
--- a/vendor/github.com/google/s2a-go/internal/v2/testdata/server_key.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs
-8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO
-QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk
-XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA
-Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc
-gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf
-LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl
-jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0
-4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q
-Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P
-nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1
-drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE
-duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50
-L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG
-06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm
-eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD
-uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7
-lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL
-a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb
-hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ
-7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j
-r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7
-eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD
-B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz
-7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g==
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem
deleted file mode 100644
index 493a5a26..00000000
--- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_cert.pem
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL
-BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
-YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
-AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
-MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
-BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
-ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
-KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9
-a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0
-OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3
-RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK
-P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316
-HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu
-0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl
-MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6
-EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9
-/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA
-QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ
-nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD
-X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco
-pKklVz0=
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem
deleted file mode 100644
index 55a7f10c..00000000
--- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/client_key.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF
-l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj
-+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G
-4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA
-xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh
-68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ
-/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL
-Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA
-VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9
-9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH
-MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt
-aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq
-xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx
-2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv
-EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z
-aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq
-udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs
-VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm
-56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT
-GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V
-Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm
-HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q
-BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH
-qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh
-GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w=
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem
deleted file mode 100644
index 0f98322c..00000000
--- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_cert.pem
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL
-BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
-YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
-AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
-MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
-BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
-ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
-KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT
-fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ
-qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE
-xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es
-Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2
-Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM
-ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX
-MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR
-e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X
-POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl
-AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg
-odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+
-PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN
-Dhm6uZM=
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem
deleted file mode 100644
index 81afea78..00000000
--- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/testdata/server_key.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs
-8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO
-QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk
-XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA
-Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc
-gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf
-LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl
-jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0
-4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q
-Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P
-nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1
-drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE
-duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50
-L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG
-06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm
-eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD
-uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7
-lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL
-a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb
-hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ
-7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j
-r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7
-eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD
-B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz
-7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g==
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go b/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go
deleted file mode 100644
index 4d919132..00000000
--- a/vendor/github.com/google/s2a-go/internal/v2/tlsconfigstore/tlsconfigstore.go
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- *
- * Copyright 2022 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package tlsconfigstore offloads operations to S2Av2.
-package tlsconfigstore
-
-import (
- "crypto/tls"
- "crypto/x509"
- "encoding/pem"
- "errors"
- "fmt"
-
- "github.com/google/s2a-go/internal/tokenmanager"
- "github.com/google/s2a-go/internal/v2/certverifier"
- "github.com/google/s2a-go/internal/v2/remotesigner"
- "github.com/google/s2a-go/stream"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
-
- commonpbv1 "github.com/google/s2a-go/internal/proto/common_go_proto"
- commonpb "github.com/google/s2a-go/internal/proto/v2/common_go_proto"
- s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto"
-)
-
-const (
- // HTTP/2
- h2 = "h2"
-)
-
-// GetTLSConfigurationForClient returns a tls.Config instance for use by a client application.
-func GetTLSConfigurationForClient(serverHostname string, s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentity *commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, serverAuthorizationPolicy []byte) (*tls.Config, error) {
- authMechanisms := getAuthMechanisms(tokenManager, []*commonpbv1.Identity{localIdentity})
-
- if grpclog.V(1) {
- grpclog.Infof("Sending request to S2Av2 for client TLS config.")
- }
- // Send request to S2Av2 for config.
- if err := s2AStream.Send(&s2av2pb.SessionReq{
- LocalIdentity: localIdentity,
- AuthenticationMechanisms: authMechanisms,
- ReqOneof: &s2av2pb.SessionReq_GetTlsConfigurationReq{
- GetTlsConfigurationReq: &s2av2pb.GetTlsConfigurationReq{
- ConnectionSide: commonpb.ConnectionSide_CONNECTION_SIDE_CLIENT,
- },
- },
- }); err != nil {
- grpclog.Infof("Failed to send request to S2Av2 for client TLS config")
- return nil, err
- }
-
- // Get the response containing config from S2Av2.
- resp, err := s2AStream.Recv()
- if err != nil {
- grpclog.Infof("Failed to receive client TLS config response from S2Av2.")
- return nil, err
- }
-
- // TODO(rmehta19): Add unit test for this if statement.
- if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) {
- return nil, fmt.Errorf("failed to get TLS configuration from S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details)
- }
-
- // Extract TLS configiguration from SessionResp.
- tlsConfig := resp.GetGetTlsConfigurationResp().GetClientTlsConfiguration()
-
- var cert tls.Certificate
- for i, v := range tlsConfig.CertificateChain {
- // Populate Certificates field.
- block, _ := pem.Decode([]byte(v))
- if block == nil {
- return nil, errors.New("certificate in CertificateChain obtained from S2Av2 is empty")
- }
- x509Cert, err := x509.ParseCertificate(block.Bytes)
- if err != nil {
- return nil, err
- }
- cert.Certificate = append(cert.Certificate, x509Cert.Raw)
- if i == 0 {
- cert.Leaf = x509Cert
- }
- }
-
- if len(tlsConfig.CertificateChain) > 0 {
- cert.PrivateKey = remotesigner.New(cert.Leaf, s2AStream)
- if cert.PrivateKey == nil {
- return nil, errors.New("failed to retrieve Private Key from Remote Signer Library")
- }
- }
-
- minVersion, maxVersion, err := getTLSMinMaxVersionsClient(tlsConfig)
- if err != nil {
- return nil, err
- }
-
- // Create mTLS credentials for client.
- config := &tls.Config{
- VerifyPeerCertificate: certverifier.VerifyServerCertificateChain(serverHostname, verificationMode, s2AStream, serverAuthorizationPolicy),
- ServerName: serverHostname,
- InsecureSkipVerify: true, // NOLINT
- ClientSessionCache: nil,
- SessionTicketsDisabled: true,
- MinVersion: minVersion,
- MaxVersion: maxVersion,
- NextProtos: []string{h2},
- }
- if len(tlsConfig.CertificateChain) > 0 {
- config.Certificates = []tls.Certificate{cert}
- }
- return config, nil
-}
-
-// GetTLSConfigurationForServer returns a tls.Config instance for use by a server application.
-func GetTLSConfigurationForServer(s2AStream stream.S2AStream, tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode) (*tls.Config, error) {
- return &tls.Config{
- GetConfigForClient: ClientConfig(tokenManager, localIdentities, verificationMode, s2AStream),
- }, nil
-}
-
-// ClientConfig builds a TLS config for a server to establish a secure
-// connection with a client, based on SNI communicated during ClientHello.
-// Ensures that server presents the correct certificate to establish a TLS
-// connection.
-func ClientConfig(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode, s2AStream stream.S2AStream) func(chi *tls.ClientHelloInfo) (*tls.Config, error) {
- return func(chi *tls.ClientHelloInfo) (*tls.Config, error) {
- tlsConfig, err := getServerConfigFromS2Av2(tokenManager, localIdentities, chi.ServerName, s2AStream)
- if err != nil {
- return nil, err
- }
-
- var cert tls.Certificate
- for i, v := range tlsConfig.CertificateChain {
- // Populate Certificates field.
- block, _ := pem.Decode([]byte(v))
- if block == nil {
- return nil, errors.New("certificate in CertificateChain obtained from S2Av2 is empty")
- }
- x509Cert, err := x509.ParseCertificate(block.Bytes)
- if err != nil {
- return nil, err
- }
- cert.Certificate = append(cert.Certificate, x509Cert.Raw)
- if i == 0 {
- cert.Leaf = x509Cert
- }
- }
-
- cert.PrivateKey = remotesigner.New(cert.Leaf, s2AStream)
- if cert.PrivateKey == nil {
- return nil, errors.New("failed to retrieve Private Key from Remote Signer Library")
- }
-
- minVersion, maxVersion, err := getTLSMinMaxVersionsServer(tlsConfig)
- if err != nil {
- return nil, err
- }
-
- clientAuth := getTLSClientAuthType(tlsConfig)
-
- var cipherSuites []uint16
- cipherSuites = getCipherSuites(tlsConfig.Ciphersuites)
-
- // Create mTLS credentials for server.
- return &tls.Config{
- Certificates: []tls.Certificate{cert},
- VerifyPeerCertificate: certverifier.VerifyClientCertificateChain(verificationMode, s2AStream),
- ClientAuth: clientAuth,
- CipherSuites: cipherSuites,
- SessionTicketsDisabled: true,
- MinVersion: minVersion,
- MaxVersion: maxVersion,
- NextProtos: []string{h2},
- }, nil
- }
-}
-
-func getCipherSuites(tlsConfigCipherSuites []commonpb.Ciphersuite) []uint16 {
- var tlsGoCipherSuites []uint16
- for _, v := range tlsConfigCipherSuites {
- s := getTLSCipherSuite(v)
- if s != 0xffff {
- tlsGoCipherSuites = append(tlsGoCipherSuites, s)
- }
- }
- return tlsGoCipherSuites
-}
-
-func getTLSCipherSuite(tlsCipherSuite commonpb.Ciphersuite) uint16 {
- switch tlsCipherSuite {
- case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
- return tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
- case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:
- return tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384
- case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256:
- return tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256
- case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_128_GCM_SHA256:
- return tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
- case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_AES_256_GCM_SHA384:
- return tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384
- case commonpb.Ciphersuite_CIPHERSUITE_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256:
- return tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256
- default:
- return 0xffff
- }
-}
-
-func getServerConfigFromS2Av2(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity, sni string, s2AStream stream.S2AStream) (*s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration, error) {
- authMechanisms := getAuthMechanisms(tokenManager, localIdentities)
- var locID *commonpbv1.Identity
- if localIdentities != nil {
- locID = localIdentities[0]
- }
-
- if err := s2AStream.Send(&s2av2pb.SessionReq{
- LocalIdentity: locID,
- AuthenticationMechanisms: authMechanisms,
- ReqOneof: &s2av2pb.SessionReq_GetTlsConfigurationReq{
- GetTlsConfigurationReq: &s2av2pb.GetTlsConfigurationReq{
- ConnectionSide: commonpb.ConnectionSide_CONNECTION_SIDE_SERVER,
- Sni: sni,
- },
- },
- }); err != nil {
- return nil, err
- }
-
- resp, err := s2AStream.Recv()
- if err != nil {
- return nil, err
- }
-
- // TODO(rmehta19): Add unit test for this if statement.
- if (resp.GetStatus() != nil) && (resp.GetStatus().Code != uint32(codes.OK)) {
- return nil, fmt.Errorf("failed to get TLS configuration from S2A: %d, %v", resp.GetStatus().Code, resp.GetStatus().Details)
- }
-
- return resp.GetGetTlsConfigurationResp().GetServerTlsConfiguration(), nil
-}
-
-func getTLSClientAuthType(tlsConfig *s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration) tls.ClientAuthType {
- var clientAuth tls.ClientAuthType
- switch x := tlsConfig.RequestClientCertificate; x {
- case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_DONT_REQUEST_CLIENT_CERTIFICATE:
- clientAuth = tls.NoClientCert
- case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_BUT_DONT_VERIFY:
- clientAuth = tls.RequestClientCert
- case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY:
- // This case actually maps to tls.VerifyClientCertIfGiven. However this
- // mapping triggers normal verification, followed by custom verification,
- // specified in VerifyPeerCertificate. To bypass normal verification, and
- // only do custom verification we set clientAuth to RequireAnyClientCert or
- // RequestClientCert. See https://github.com/google/s2a-go/pull/43 for full
- // discussion.
- clientAuth = tls.RequireAnyClientCert
- case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY:
- clientAuth = tls.RequireAnyClientCert
- case s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY:
- // This case actually maps to tls.RequireAndVerifyClientCert. However this
- // mapping triggers normal verification, followed by custom verification,
- // specified in VerifyPeerCertificate. To bypass normal verification, and
- // only do custom verification we set clientAuth to RequireAnyClientCert or
- // RequestClientCert. See https://github.com/google/s2a-go/pull/43 for full
- // discussion.
- clientAuth = tls.RequireAnyClientCert
- default:
- clientAuth = tls.RequireAnyClientCert
- }
- return clientAuth
-}
-
-func getAuthMechanisms(tokenManager tokenmanager.AccessTokenManager, localIdentities []*commonpbv1.Identity) []*s2av2pb.AuthenticationMechanism {
- if tokenManager == nil {
- return nil
- }
- if len(localIdentities) == 0 {
- token, err := tokenManager.DefaultToken()
- if err != nil {
- grpclog.Infof("Unable to get token for empty local identity: %v", err)
- return nil
- }
- return []*s2av2pb.AuthenticationMechanism{
- {
- MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{
- Token: token,
- },
- },
- }
- }
- var authMechanisms []*s2av2pb.AuthenticationMechanism
- for _, localIdentity := range localIdentities {
- if localIdentity == nil {
- token, err := tokenManager.DefaultToken()
- if err != nil {
- grpclog.Infof("Unable to get default token for local identity %v: %v", localIdentity, err)
- continue
- }
- authMechanisms = append(authMechanisms, &s2av2pb.AuthenticationMechanism{
- Identity: localIdentity,
- MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{
- Token: token,
- },
- })
- } else {
- token, err := tokenManager.Token(localIdentity)
- if err != nil {
- grpclog.Infof("Unable to get token for local identity %v: %v", localIdentity, err)
- continue
- }
- authMechanisms = append(authMechanisms, &s2av2pb.AuthenticationMechanism{
- Identity: localIdentity,
- MechanismOneof: &s2av2pb.AuthenticationMechanism_Token{
- Token: token,
- },
- })
- }
- }
- return authMechanisms
-}
-
-// TODO(rmehta19): refactor switch statements into a helper function.
-func getTLSMinMaxVersionsClient(tlsConfig *s2av2pb.GetTlsConfigurationResp_ClientTlsConfiguration) (uint16, uint16, error) {
- // Map S2Av2 TLSVersion to consts defined in tls package.
- var minVersion uint16
- var maxVersion uint16
- switch x := tlsConfig.MinTlsVersion; x {
- case commonpb.TLSVersion_TLS_VERSION_1_0:
- minVersion = tls.VersionTLS10
- case commonpb.TLSVersion_TLS_VERSION_1_1:
- minVersion = tls.VersionTLS11
- case commonpb.TLSVersion_TLS_VERSION_1_2:
- minVersion = tls.VersionTLS12
- case commonpb.TLSVersion_TLS_VERSION_1_3:
- minVersion = tls.VersionTLS13
- default:
- return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MinTlsVersion: %v", x)
- }
-
- switch x := tlsConfig.MaxTlsVersion; x {
- case commonpb.TLSVersion_TLS_VERSION_1_0:
- maxVersion = tls.VersionTLS10
- case commonpb.TLSVersion_TLS_VERSION_1_1:
- maxVersion = tls.VersionTLS11
- case commonpb.TLSVersion_TLS_VERSION_1_2:
- maxVersion = tls.VersionTLS12
- case commonpb.TLSVersion_TLS_VERSION_1_3:
- maxVersion = tls.VersionTLS13
- default:
- return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MaxTlsVersion: %v", x)
- }
- if minVersion > maxVersion {
- return minVersion, maxVersion, errors.New("S2Av2 provided minVersion > maxVersion")
- }
- return minVersion, maxVersion, nil
-}
-
-func getTLSMinMaxVersionsServer(tlsConfig *s2av2pb.GetTlsConfigurationResp_ServerTlsConfiguration) (uint16, uint16, error) {
- // Map S2Av2 TLSVersion to consts defined in tls package.
- var minVersion uint16
- var maxVersion uint16
- switch x := tlsConfig.MinTlsVersion; x {
- case commonpb.TLSVersion_TLS_VERSION_1_0:
- minVersion = tls.VersionTLS10
- case commonpb.TLSVersion_TLS_VERSION_1_1:
- minVersion = tls.VersionTLS11
- case commonpb.TLSVersion_TLS_VERSION_1_2:
- minVersion = tls.VersionTLS12
- case commonpb.TLSVersion_TLS_VERSION_1_3:
- minVersion = tls.VersionTLS13
- default:
- return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MinTlsVersion: %v", x)
- }
-
- switch x := tlsConfig.MaxTlsVersion; x {
- case commonpb.TLSVersion_TLS_VERSION_1_0:
- maxVersion = tls.VersionTLS10
- case commonpb.TLSVersion_TLS_VERSION_1_1:
- maxVersion = tls.VersionTLS11
- case commonpb.TLSVersion_TLS_VERSION_1_2:
- maxVersion = tls.VersionTLS12
- case commonpb.TLSVersion_TLS_VERSION_1_3:
- maxVersion = tls.VersionTLS13
- default:
- return minVersion, maxVersion, fmt.Errorf("S2Av2 provided invalid MaxTlsVersion: %v", x)
- }
- if minVersion > maxVersion {
- return minVersion, maxVersion, errors.New("S2Av2 provided minVersion > maxVersion")
- }
- return minVersion, maxVersion, nil
-}
diff --git a/vendor/github.com/google/s2a-go/retry/retry.go b/vendor/github.com/google/s2a-go/retry/retry.go
deleted file mode 100644
index f7e0a237..00000000
--- a/vendor/github.com/google/s2a-go/retry/retry.go
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- *
- * Copyright 2023 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package retry provides a retry helper for talking to S2A gRPC server.
-// The implementation is modeled after
-// https://github.com/googleapis/google-cloud-go/blob/main/compute/metadata/retry.go
-package retry
-
-import (
- "context"
- "math/rand"
- "time"
-
- "google.golang.org/grpc/grpclog"
-)
-
-const (
- maxRetryAttempts = 5
- maxRetryForLoops = 10
-)
-
-type defaultBackoff struct {
- max time.Duration
- mul float64
- cur time.Duration
-}
-
-// Pause returns a duration, which is used as the backoff wait time
-// before the next retry.
-func (b *defaultBackoff) Pause() time.Duration {
- d := time.Duration(1 + rand.Int63n(int64(b.cur)))
- b.cur = time.Duration(float64(b.cur) * b.mul)
- if b.cur > b.max {
- b.cur = b.max
- }
- return d
-}
-
-// Sleep will wait for the specified duration or return on context
-// expiration.
-func Sleep(ctx context.Context, d time.Duration) error {
- t := time.NewTimer(d)
- select {
- case <-ctx.Done():
- t.Stop()
- return ctx.Err()
- case <-t.C:
- return nil
- }
-}
-
-// NewRetryer creates an instance of S2ARetryer using the defaultBackoff
-// implementation.
-var NewRetryer = func() *S2ARetryer {
- return &S2ARetryer{bo: &defaultBackoff{
- cur: 100 * time.Millisecond,
- max: 30 * time.Second,
- mul: 2,
- }}
-}
-
-type backoff interface {
- Pause() time.Duration
-}
-
-// S2ARetryer implements a retry helper for talking to S2A gRPC server.
-type S2ARetryer struct {
- bo backoff
- attempts int
-}
-
-// Attempts return the number of retries attempted.
-func (r *S2ARetryer) Attempts() int {
- return r.attempts
-}
-
-// Retry returns a boolean indicating whether retry should be performed
-// and the backoff duration.
-func (r *S2ARetryer) Retry(err error) (time.Duration, bool) {
- if err == nil {
- return 0, false
- }
- if r.attempts >= maxRetryAttempts {
- return 0, false
- }
- r.attempts++
- return r.bo.Pause(), true
-}
-
-// Run uses S2ARetryer to execute the function passed in, until success or reaching
-// max number of retry attempts.
-func Run(ctx context.Context, f func() error) {
- retryer := NewRetryer()
- forLoopCnt := 0
- var err error
- for {
- err = f()
- if bo, shouldRetry := retryer.Retry(err); shouldRetry {
- if grpclog.V(1) {
- grpclog.Infof("will attempt retry: %v", err)
- }
- if ctx.Err() != nil {
- if grpclog.V(1) {
- grpclog.Infof("exit retry loop due to context error: %v", ctx.Err())
- }
- break
- }
- if errSleep := Sleep(ctx, bo); errSleep != nil {
- if grpclog.V(1) {
- grpclog.Infof("exit retry loop due to sleep error: %v", errSleep)
- }
- break
- }
- // This shouldn't happen, just make sure we are not stuck in the for loops.
- forLoopCnt++
- if forLoopCnt > maxRetryForLoops {
- if grpclog.V(1) {
- grpclog.Infof("exit the for loop after too many retries")
- }
- break
- }
- continue
- }
- if grpclog.V(1) {
- grpclog.Infof("retry conditions not met, exit the loop")
- }
- break
- }
-}
diff --git a/vendor/github.com/google/s2a-go/s2a.go b/vendor/github.com/google/s2a-go/s2a.go
deleted file mode 100644
index 5ecb06f9..00000000
--- a/vendor/github.com/google/s2a-go/s2a.go
+++ /dev/null
@@ -1,427 +0,0 @@
-/*
- *
- * Copyright 2021 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package s2a provides the S2A transport credentials used by a gRPC
-// application.
-package s2a
-
-import (
- "context"
- "crypto/tls"
- "errors"
- "fmt"
- "net"
- "sync"
- "time"
-
- "github.com/golang/protobuf/proto"
- "github.com/google/s2a-go/fallback"
- "github.com/google/s2a-go/internal/handshaker"
- "github.com/google/s2a-go/internal/handshaker/service"
- "github.com/google/s2a-go/internal/tokenmanager"
- "github.com/google/s2a-go/internal/v2"
- "github.com/google/s2a-go/retry"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
-
- commonpb "github.com/google/s2a-go/internal/proto/common_go_proto"
- s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto"
-)
-
-const (
- s2aSecurityProtocol = "tls"
- // defaultTimeout specifies the default server handshake timeout.
- defaultTimeout = 30.0 * time.Second
-)
-
-// s2aTransportCreds are the transport credentials required for establishing
-// a secure connection using the S2A. They implement the
-// credentials.TransportCredentials interface.
-type s2aTransportCreds struct {
- info *credentials.ProtocolInfo
- minTLSVersion commonpb.TLSVersion
- maxTLSVersion commonpb.TLSVersion
- // tlsCiphersuites contains the ciphersuites used in the S2A connection.
- // Note that these are currently unconfigurable.
- tlsCiphersuites []commonpb.Ciphersuite
- // localIdentity should only be used by the client.
- localIdentity *commonpb.Identity
- // localIdentities should only be used by the server.
- localIdentities []*commonpb.Identity
- // targetIdentities should only be used by the client.
- targetIdentities []*commonpb.Identity
- isClient bool
- s2aAddr string
- ensureProcessSessionTickets *sync.WaitGroup
-}
-
-// NewClientCreds returns a client-side transport credentials object that uses
-// the S2A to establish a secure connection with a server.
-func NewClientCreds(opts *ClientOptions) (credentials.TransportCredentials, error) {
- if opts == nil {
- return nil, errors.New("nil client options")
- }
- var targetIdentities []*commonpb.Identity
- for _, targetIdentity := range opts.TargetIdentities {
- protoTargetIdentity, err := toProtoIdentity(targetIdentity)
- if err != nil {
- return nil, err
- }
- targetIdentities = append(targetIdentities, protoTargetIdentity)
- }
- localIdentity, err := toProtoIdentity(opts.LocalIdentity)
- if err != nil {
- return nil, err
- }
- if opts.EnableLegacyMode {
- return &s2aTransportCreds{
- info: &credentials.ProtocolInfo{
- SecurityProtocol: s2aSecurityProtocol,
- },
- minTLSVersion: commonpb.TLSVersion_TLS1_3,
- maxTLSVersion: commonpb.TLSVersion_TLS1_3,
- tlsCiphersuites: []commonpb.Ciphersuite{
- commonpb.Ciphersuite_AES_128_GCM_SHA256,
- commonpb.Ciphersuite_AES_256_GCM_SHA384,
- commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256,
- },
- localIdentity: localIdentity,
- targetIdentities: targetIdentities,
- isClient: true,
- s2aAddr: opts.S2AAddress,
- ensureProcessSessionTickets: opts.EnsureProcessSessionTickets,
- }, nil
- }
- verificationMode := getVerificationMode(opts.VerificationMode)
- var fallbackFunc fallback.ClientHandshake
- if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackClientHandshakeFunc != nil {
- fallbackFunc = opts.FallbackOpts.FallbackClientHandshakeFunc
- }
- return v2.NewClientCreds(opts.S2AAddress, opts.TransportCreds, localIdentity, verificationMode, fallbackFunc, opts.getS2AStream, opts.serverAuthorizationPolicy)
-}
-
-// NewServerCreds returns a server-side transport credentials object that uses
-// the S2A to establish a secure connection with a client.
-func NewServerCreds(opts *ServerOptions) (credentials.TransportCredentials, error) {
- if opts == nil {
- return nil, errors.New("nil server options")
- }
- var localIdentities []*commonpb.Identity
- for _, localIdentity := range opts.LocalIdentities {
- protoLocalIdentity, err := toProtoIdentity(localIdentity)
- if err != nil {
- return nil, err
- }
- localIdentities = append(localIdentities, protoLocalIdentity)
- }
- if opts.EnableLegacyMode {
- return &s2aTransportCreds{
- info: &credentials.ProtocolInfo{
- SecurityProtocol: s2aSecurityProtocol,
- },
- minTLSVersion: commonpb.TLSVersion_TLS1_3,
- maxTLSVersion: commonpb.TLSVersion_TLS1_3,
- tlsCiphersuites: []commonpb.Ciphersuite{
- commonpb.Ciphersuite_AES_128_GCM_SHA256,
- commonpb.Ciphersuite_AES_256_GCM_SHA384,
- commonpb.Ciphersuite_CHACHA20_POLY1305_SHA256,
- },
- localIdentities: localIdentities,
- isClient: false,
- s2aAddr: opts.S2AAddress,
- }, nil
- }
- verificationMode := getVerificationMode(opts.VerificationMode)
- return v2.NewServerCreds(opts.S2AAddress, opts.TransportCreds, localIdentities, verificationMode, opts.getS2AStream)
-}
-
-// ClientHandshake initiates a client-side TLS handshake using the S2A.
-func (c *s2aTransportCreds) ClientHandshake(ctx context.Context, serverAuthority string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
- if !c.isClient {
- return nil, nil, errors.New("client handshake called using server transport credentials")
- }
-
- var cancel context.CancelFunc
- ctx, cancel = context.WithCancel(ctx)
- defer cancel()
-
- // Connect to the S2A.
- hsConn, err := service.Dial(ctx, c.s2aAddr, nil)
- if err != nil {
- grpclog.Infof("Failed to connect to S2A: %v", err)
- return nil, nil, err
- }
-
- opts := &handshaker.ClientHandshakerOptions{
- MinTLSVersion: c.minTLSVersion,
- MaxTLSVersion: c.maxTLSVersion,
- TLSCiphersuites: c.tlsCiphersuites,
- TargetIdentities: c.targetIdentities,
- LocalIdentity: c.localIdentity,
- TargetName: serverAuthority,
- EnsureProcessSessionTickets: c.ensureProcessSessionTickets,
- }
- chs, err := handshaker.NewClientHandshaker(ctx, hsConn, rawConn, c.s2aAddr, opts)
- if err != nil {
- grpclog.Infof("Call to handshaker.NewClientHandshaker failed: %v", err)
- return nil, nil, err
- }
- defer func() {
- if err != nil {
- if closeErr := chs.Close(); closeErr != nil {
- grpclog.Infof("Close failed unexpectedly: %v", err)
- err = fmt.Errorf("%v: close unexpectedly failed: %v", err, closeErr)
- }
- }
- }()
-
- secConn, authInfo, err := chs.ClientHandshake(context.Background())
- if err != nil {
- grpclog.Infof("Handshake failed: %v", err)
- return nil, nil, err
- }
- return secConn, authInfo, nil
-}
-
-// ServerHandshake initiates a server-side TLS handshake using the S2A.
-func (c *s2aTransportCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
- if c.isClient {
- return nil, nil, errors.New("server handshake called using client transport credentials")
- }
-
- ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout)
- defer cancel()
-
- // Connect to the S2A.
- hsConn, err := service.Dial(ctx, c.s2aAddr, nil)
- if err != nil {
- grpclog.Infof("Failed to connect to S2A: %v", err)
- return nil, nil, err
- }
-
- opts := &handshaker.ServerHandshakerOptions{
- MinTLSVersion: c.minTLSVersion,
- MaxTLSVersion: c.maxTLSVersion,
- TLSCiphersuites: c.tlsCiphersuites,
- LocalIdentities: c.localIdentities,
- }
- shs, err := handshaker.NewServerHandshaker(ctx, hsConn, rawConn, c.s2aAddr, opts)
- if err != nil {
- grpclog.Infof("Call to handshaker.NewServerHandshaker failed: %v", err)
- return nil, nil, err
- }
- defer func() {
- if err != nil {
- if closeErr := shs.Close(); closeErr != nil {
- grpclog.Infof("Close failed unexpectedly: %v", err)
- err = fmt.Errorf("%v: close unexpectedly failed: %v", err, closeErr)
- }
- }
- }()
-
- secConn, authInfo, err := shs.ServerHandshake(context.Background())
- if err != nil {
- grpclog.Infof("Handshake failed: %v", err)
- return nil, nil, err
- }
- return secConn, authInfo, nil
-}
-
-func (c *s2aTransportCreds) Info() credentials.ProtocolInfo {
- return *c.info
-}
-
-func (c *s2aTransportCreds) Clone() credentials.TransportCredentials {
- info := *c.info
- var localIdentity *commonpb.Identity
- if c.localIdentity != nil {
- localIdentity = proto.Clone(c.localIdentity).(*commonpb.Identity)
- }
- var localIdentities []*commonpb.Identity
- if c.localIdentities != nil {
- localIdentities = make([]*commonpb.Identity, len(c.localIdentities))
- for i, localIdentity := range c.localIdentities {
- localIdentities[i] = proto.Clone(localIdentity).(*commonpb.Identity)
- }
- }
- var targetIdentities []*commonpb.Identity
- if c.targetIdentities != nil {
- targetIdentities = make([]*commonpb.Identity, len(c.targetIdentities))
- for i, targetIdentity := range c.targetIdentities {
- targetIdentities[i] = proto.Clone(targetIdentity).(*commonpb.Identity)
- }
- }
- return &s2aTransportCreds{
- info: &info,
- minTLSVersion: c.minTLSVersion,
- maxTLSVersion: c.maxTLSVersion,
- tlsCiphersuites: c.tlsCiphersuites,
- localIdentity: localIdentity,
- localIdentities: localIdentities,
- targetIdentities: targetIdentities,
- isClient: c.isClient,
- s2aAddr: c.s2aAddr,
- }
-}
-
-func (c *s2aTransportCreds) OverrideServerName(serverNameOverride string) error {
- c.info.ServerName = serverNameOverride
- return nil
-}
-
-// TLSClientConfigOptions specifies parameters for creating client TLS config.
-type TLSClientConfigOptions struct {
- // ServerName is required by s2a as the expected name when verifying the hostname found in server's certificate.
- // tlsConfig, _ := factory.Build(ctx, &s2a.TLSClientConfigOptions{
- // ServerName: "example.com",
- // })
- ServerName string
-}
-
-// TLSClientConfigFactory defines the interface for a client TLS config factory.
-type TLSClientConfigFactory interface {
- Build(ctx context.Context, opts *TLSClientConfigOptions) (*tls.Config, error)
-}
-
-// NewTLSClientConfigFactory returns an instance of s2aTLSClientConfigFactory.
-func NewTLSClientConfigFactory(opts *ClientOptions) (TLSClientConfigFactory, error) {
- if opts == nil {
- return nil, fmt.Errorf("opts must be non-nil")
- }
- if opts.EnableLegacyMode {
- return nil, fmt.Errorf("NewTLSClientConfigFactory only supports S2Av2")
- }
- tokenManager, err := tokenmanager.NewSingleTokenAccessTokenManager()
- if err != nil {
- // The only possible error is: access token not set in the environment,
- // which is okay in environments other than serverless.
- grpclog.Infof("Access token manager not initialized: %v", err)
- return &s2aTLSClientConfigFactory{
- s2av2Address: opts.S2AAddress,
- transportCreds: opts.TransportCreds,
- tokenManager: nil,
- verificationMode: getVerificationMode(opts.VerificationMode),
- serverAuthorizationPolicy: opts.serverAuthorizationPolicy,
- }, nil
- }
- return &s2aTLSClientConfigFactory{
- s2av2Address: opts.S2AAddress,
- transportCreds: opts.TransportCreds,
- tokenManager: tokenManager,
- verificationMode: getVerificationMode(opts.VerificationMode),
- serverAuthorizationPolicy: opts.serverAuthorizationPolicy,
- }, nil
-}
-
-type s2aTLSClientConfigFactory struct {
- s2av2Address string
- transportCreds credentials.TransportCredentials
- tokenManager tokenmanager.AccessTokenManager
- verificationMode s2av2pb.ValidatePeerCertificateChainReq_VerificationMode
- serverAuthorizationPolicy []byte
-}
-
-func (f *s2aTLSClientConfigFactory) Build(
- ctx context.Context, opts *TLSClientConfigOptions) (*tls.Config, error) {
- serverName := ""
- if opts != nil && opts.ServerName != "" {
- serverName = opts.ServerName
- }
- return v2.NewClientTLSConfig(ctx, f.s2av2Address, f.transportCreds, f.tokenManager, f.verificationMode, serverName, f.serverAuthorizationPolicy)
-}
-
-func getVerificationMode(verificationMode VerificationModeType) s2av2pb.ValidatePeerCertificateChainReq_VerificationMode {
- switch verificationMode {
- case ConnectToGoogle:
- return s2av2pb.ValidatePeerCertificateChainReq_CONNECT_TO_GOOGLE
- case Spiffe:
- return s2av2pb.ValidatePeerCertificateChainReq_SPIFFE
- default:
- return s2av2pb.ValidatePeerCertificateChainReq_UNSPECIFIED
- }
-}
-
-// NewS2ADialTLSContextFunc returns a dialer which establishes an MTLS connection using S2A.
-// Example use with http.RoundTripper:
-//
-// dialTLSContext := s2a.NewS2aDialTLSContextFunc(&s2a.ClientOptions{
-// S2AAddress: s2aAddress, // required
-// })
-// transport := http.DefaultTransport
-// transport.DialTLSContext = dialTLSContext
-func NewS2ADialTLSContextFunc(opts *ClientOptions) func(ctx context.Context, network, addr string) (net.Conn, error) {
-
- return func(ctx context.Context, network, addr string) (net.Conn, error) {
-
- fallback := func(err error) (net.Conn, error) {
- if opts.FallbackOpts != nil && opts.FallbackOpts.FallbackDialer != nil &&
- opts.FallbackOpts.FallbackDialer.Dialer != nil && opts.FallbackOpts.FallbackDialer.ServerAddr != "" {
- fbDialer := opts.FallbackOpts.FallbackDialer
- grpclog.Infof("fall back to dial: %s", fbDialer.ServerAddr)
- fbConn, fbErr := fbDialer.Dialer.DialContext(ctx, network, fbDialer.ServerAddr)
- if fbErr != nil {
- return nil, fmt.Errorf("error fallback to %s: %v; S2A error: %w", fbDialer.ServerAddr, fbErr, err)
- }
- return fbConn, nil
- }
- return nil, err
- }
-
- factory, err := NewTLSClientConfigFactory(opts)
- if err != nil {
- grpclog.Infof("error creating S2A client config factory: %v", err)
- return fallback(err)
- }
-
- serverName, _, err := net.SplitHostPort(addr)
- if err != nil {
- serverName = addr
- }
- timeoutCtx, cancel := context.WithTimeout(ctx, v2.GetS2ATimeout())
- defer cancel()
-
- var s2aTLSConfig *tls.Config
- retry.Run(timeoutCtx,
- func() error {
- s2aTLSConfig, err = factory.Build(timeoutCtx, &TLSClientConfigOptions{
- ServerName: serverName,
- })
- return err
- })
- if err != nil {
- grpclog.Infof("error building S2A TLS config: %v", err)
- return fallback(err)
- }
-
- s2aDialer := &tls.Dialer{
- Config: s2aTLSConfig,
- }
- var c net.Conn
- retry.Run(timeoutCtx,
- func() error {
- c, err = s2aDialer.DialContext(timeoutCtx, network, addr)
- return err
- })
- if err != nil {
- grpclog.Infof("error dialing with S2A to %s: %v", addr, err)
- return fallback(err)
- }
- grpclog.Infof("success dialing MTLS to %s with S2A", addr)
- return c, nil
- }
-}
diff --git a/vendor/github.com/google/s2a-go/s2a_options.go b/vendor/github.com/google/s2a-go/s2a_options.go
deleted file mode 100644
index fcdbc162..00000000
--- a/vendor/github.com/google/s2a-go/s2a_options.go
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- *
- * Copyright 2021 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package s2a
-
-import (
- "context"
- "crypto/tls"
- "errors"
- "sync"
-
- "github.com/google/s2a-go/fallback"
- "github.com/google/s2a-go/stream"
- "google.golang.org/grpc/credentials"
-
- s2apb "github.com/google/s2a-go/internal/proto/common_go_proto"
-)
-
-// Identity is the interface for S2A identities.
-type Identity interface {
- // Name returns the name of the identity.
- Name() string
-}
-
-type spiffeID struct {
- spiffeID string
-}
-
-func (s *spiffeID) Name() string { return s.spiffeID }
-
-// NewSpiffeID creates a SPIFFE ID from id.
-func NewSpiffeID(id string) Identity {
- return &spiffeID{spiffeID: id}
-}
-
-type hostname struct {
- hostname string
-}
-
-func (h *hostname) Name() string { return h.hostname }
-
-// NewHostname creates a hostname from name.
-func NewHostname(name string) Identity {
- return &hostname{hostname: name}
-}
-
-type uid struct {
- uid string
-}
-
-func (h *uid) Name() string { return h.uid }
-
-// NewUID creates a UID from name.
-func NewUID(name string) Identity {
- return &uid{uid: name}
-}
-
-// VerificationModeType specifies the mode that S2A must use to verify the peer
-// certificate chain.
-type VerificationModeType int
-
-// Three types of verification modes.
-const (
- Unspecified = iota
- ConnectToGoogle
- Spiffe
-)
-
-// ClientOptions contains the client-side options used to establish a secure
-// channel using the S2A handshaker service.
-type ClientOptions struct {
- // TargetIdentities contains a list of allowed server identities. One of the
- // target identities should match the peer identity in the handshake
- // result; otherwise, the handshake fails.
- TargetIdentities []Identity
- // LocalIdentity is the local identity of the client application. If none is
- // provided, then the S2A will choose the default identity, if one exists.
- LocalIdentity Identity
- // S2AAddress is the address of the S2A.
- S2AAddress string
- // Optional transport credentials.
- // If set, this will be used for the gRPC connection to the S2A server.
- TransportCreds credentials.TransportCredentials
- // EnsureProcessSessionTickets waits for all session tickets to be sent to
- // S2A before a process completes.
- //
- // This functionality is crucial for processes that complete very soon after
- // using S2A to establish a TLS connection, but it can be ignored for longer
- // lived processes.
- //
- // Usage example:
- // func main() {
- // var ensureProcessSessionTickets sync.WaitGroup
- // clientOpts := &s2a.ClientOptions{
- // EnsureProcessSessionTickets: &ensureProcessSessionTickets,
- // // Set other members.
- // }
- // creds, _ := s2a.NewClientCreds(clientOpts)
- // conn, _ := grpc.Dial(serverAddr, grpc.WithTransportCredentials(creds))
- // defer conn.Close()
- //
- // // Make RPC call.
- //
- // // The process terminates right after the RPC call ends.
- // // ensureProcessSessionTickets can be used to ensure resumption
- // // tickets are fully processed. If the process is long-lived, using
- // // ensureProcessSessionTickets is not necessary.
- // ensureProcessSessionTickets.Wait()
- // }
- EnsureProcessSessionTickets *sync.WaitGroup
- // If true, enables the use of legacy S2Av1.
- EnableLegacyMode bool
- // VerificationMode specifies the mode that S2A must use to verify the
- // peer certificate chain.
- VerificationMode VerificationModeType
-
- // Optional fallback after dialing with S2A fails.
- FallbackOpts *FallbackOptions
-
- // Generates an S2AStream interface for talking to the S2A server.
- getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)
-
- // Serialized user specified policy for server authorization.
- serverAuthorizationPolicy []byte
-}
-
-// FallbackOptions prescribes the fallback logic that should be taken if the application fails to connect with S2A.
-type FallbackOptions struct {
- // FallbackClientHandshakeFunc is used to specify fallback behavior when calling s2a.NewClientCreds().
- // It will be called by ClientHandshake function, after handshake with S2A fails.
- // s2a.NewClientCreds() ignores the other FallbackDialer field.
- FallbackClientHandshakeFunc fallback.ClientHandshake
-
- // FallbackDialer is used to specify fallback behavior when calling s2a.NewS2aDialTLSContextFunc().
- // It passes in a custom fallback dialer and server address to use after dialing with S2A fails.
- // s2a.NewS2aDialTLSContextFunc() ignores the other FallbackClientHandshakeFunc field.
- FallbackDialer *FallbackDialer
-}
-
-// FallbackDialer contains a fallback tls.Dialer and a server address to connect to.
-type FallbackDialer struct {
- // Dialer specifies a fallback tls.Dialer.
- Dialer *tls.Dialer
- // ServerAddr is used by Dialer to establish fallback connection.
- ServerAddr string
-}
-
-// DefaultClientOptions returns the default client options.
-func DefaultClientOptions(s2aAddress string) *ClientOptions {
- return &ClientOptions{
- S2AAddress: s2aAddress,
- VerificationMode: ConnectToGoogle,
- }
-}
-
-// ServerOptions contains the server-side options used to establish a secure
-// channel using the S2A handshaker service.
-type ServerOptions struct {
- // LocalIdentities is the list of local identities that may be assumed by
- // the server. If no local identity is specified, then the S2A chooses a
- // default local identity, if one exists.
- LocalIdentities []Identity
- // S2AAddress is the address of the S2A.
- S2AAddress string
- // Optional transport credentials.
- // If set, this will be used for the gRPC connection to the S2A server.
- TransportCreds credentials.TransportCredentials
- // If true, enables the use of legacy S2Av1.
- EnableLegacyMode bool
- // VerificationMode specifies the mode that S2A must use to verify the
- // peer certificate chain.
- VerificationMode VerificationModeType
-
- // Generates an S2AStream interface for talking to the S2A server.
- getS2AStream func(ctx context.Context, s2av2Address string) (stream.S2AStream, error)
-}
-
-// DefaultServerOptions returns the default server options.
-func DefaultServerOptions(s2aAddress string) *ServerOptions {
- return &ServerOptions{
- S2AAddress: s2aAddress,
- VerificationMode: ConnectToGoogle,
- }
-}
-
-func toProtoIdentity(identity Identity) (*s2apb.Identity, error) {
- if identity == nil {
- return nil, nil
- }
- switch id := identity.(type) {
- case *spiffeID:
- return &s2apb.Identity{IdentityOneof: &s2apb.Identity_SpiffeId{SpiffeId: id.Name()}}, nil
- case *hostname:
- return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Hostname{Hostname: id.Name()}}, nil
- case *uid:
- return &s2apb.Identity{IdentityOneof: &s2apb.Identity_Uid{Uid: id.Name()}}, nil
- default:
- return nil, errors.New("unrecognized identity type")
- }
-}
diff --git a/vendor/github.com/google/s2a-go/s2a_utils.go b/vendor/github.com/google/s2a-go/s2a_utils.go
deleted file mode 100644
index d649cc46..00000000
--- a/vendor/github.com/google/s2a-go/s2a_utils.go
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- *
- * Copyright 2021 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package s2a
-
-import (
- "context"
- "errors"
-
- commonpb "github.com/google/s2a-go/internal/proto/common_go_proto"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/peer"
-)
-
-// AuthInfo exposes security information from the S2A to the application.
-type AuthInfo interface {
- // AuthType returns the authentication type.
- AuthType() string
- // ApplicationProtocol returns the application protocol, e.g. "grpc".
- ApplicationProtocol() string
- // TLSVersion returns the TLS version negotiated during the handshake.
- TLSVersion() commonpb.TLSVersion
- // Ciphersuite returns the ciphersuite negotiated during the handshake.
- Ciphersuite() commonpb.Ciphersuite
- // PeerIdentity returns the authenticated identity of the peer.
- PeerIdentity() *commonpb.Identity
- // LocalIdentity returns the local identity of the application used during
- // session setup.
- LocalIdentity() *commonpb.Identity
- // PeerCertFingerprint returns the SHA256 hash of the peer certificate used in
- // the S2A handshake.
- PeerCertFingerprint() []byte
- // LocalCertFingerprint returns the SHA256 hash of the local certificate used
- // in the S2A handshake.
- LocalCertFingerprint() []byte
- // IsHandshakeResumed returns true if a cached session was used to resume
- // the handshake.
- IsHandshakeResumed() bool
- // SecurityLevel returns the security level of the connection.
- SecurityLevel() credentials.SecurityLevel
-}
-
-// AuthInfoFromPeer extracts the authinfo.S2AAuthInfo object from the given
-// peer, if it exists. This API should be used by gRPC clients after
-// obtaining a peer object using the grpc.Peer() CallOption.
-func AuthInfoFromPeer(p *peer.Peer) (AuthInfo, error) {
- s2aAuthInfo, ok := p.AuthInfo.(AuthInfo)
- if !ok {
- return nil, errors.New("no S2AAuthInfo found in Peer")
- }
- return s2aAuthInfo, nil
-}
-
-// AuthInfoFromContext extracts the authinfo.S2AAuthInfo object from the given
-// context, if it exists. This API should be used by gRPC server RPC handlers
-// to get information about the peer. On the client-side, use the grpc.Peer()
-// CallOption and the AuthInfoFromPeer function.
-func AuthInfoFromContext(ctx context.Context) (AuthInfo, error) {
- p, ok := peer.FromContext(ctx)
- if !ok {
- return nil, errors.New("no Peer found in Context")
- }
- return AuthInfoFromPeer(p)
-}
diff --git a/vendor/github.com/google/s2a-go/stream/s2a_stream.go b/vendor/github.com/google/s2a-go/stream/s2a_stream.go
deleted file mode 100644
index 584bf32b..00000000
--- a/vendor/github.com/google/s2a-go/stream/s2a_stream.go
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- *
- * Copyright 2023 Google LLC
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * https://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package stream provides an interface for bidirectional streaming to the S2A server.
-package stream
-
-import (
- s2av2pb "github.com/google/s2a-go/internal/proto/v2/s2a_go_proto"
-)
-
-// S2AStream defines the operation for communicating with the S2A server over a bidirectional stream.
-type S2AStream interface {
- // Send sends the message to the S2A server.
- Send(*s2av2pb.SessionReq) error
- // Recv receives the message from the S2A server.
- Recv() (*s2av2pb.SessionResp, error)
- // Closes the channel to the S2A server.
- CloseSend() error
-}
diff --git a/vendor/github.com/google/s2a-go/testdata/client_cert.pem b/vendor/github.com/google/s2a-go/testdata/client_cert.pem
deleted file mode 100644
index 493a5a26..00000000
--- a/vendor/github.com/google/s2a-go/testdata/client_cert.pem
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIID8TCCAtmgAwIBAgIUKXNlBRVe6UepjQUijIFPZBd/4qYwDQYJKoZIhvcNAQEL
-BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
-YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
-AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
-MjIwNTMxMjAwMzE1WhcNNDIwNTI2MjAwMzE1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
-BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
-ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
-KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAOOFuIucH7XXfohGxKd3uR/ihUA/LdduR9I8kfpUEbq5BOt8xZe5/Yn9
-a1ozEHVW6cOAbHbnwAR8tkSgZ/t42QIA2k77HWU1Jh2xiEIsJivo3imm4/kZWuR0
-OqPh7MhzxpR/hvNwpI5mJsAVBWFMa5KtecFZLnyZtwHylrRN1QXzuLrOxuKFufK3
-RKbTABScn5RbZL976H/jgfSeXrbt242NrIoBnVe6fRbekbq2DQ6zFArbQMUgHjHK
-P0UqBgdr1QmHfi9KytFyx9BTP3gXWnWIu+bY7/v7qKJMHFwGETo+dCLWYevJL316
-HnLfhApDMfP8U+Yv/y1N/YvgaSOSlEcCAwEAAaNTMFEwHQYDVR0OBBYEFKhAU4nu
-0h/lrnggbIGvx4ej0WklMB8GA1UdIwQYMBaAFKhAU4nu0h/lrnggbIGvx4ej0Wkl
-MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAE/6NghzQ5fu6yR6
-EHKbj/YMrFdT7aGn5n2sAf7wJ33LIhiFHkpWBsVlm7rDtZtwhe891ZK/P60anlg9
-/P0Ua53tSRVRmCvTnEbXWOVMN4is6MsR7BlmzUxl4AtIn7jbeifEwRL7B4xDYmdA
-QrQnsqoz45dLgS5xK4WDqXATP09Q91xQDuhud/b+A4jrvgwFASmL7rMIZbp4f1JQ
-nlnl/9VoTBQBvJiWkDUtQDMpRLtauddEkv4AGz75p5IspXWD6cOemuh2iQec11xD
-X20rs2WZbAcAiUa3nmy8OKYw435vmpj8gp39WYbX/Yx9TymrFFbVY92wYn+quTco
-pKklVz0=
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/testdata/client_key.pem b/vendor/github.com/google/s2a-go/testdata/client_key.pem
deleted file mode 100644
index 55a7f10c..00000000
--- a/vendor/github.com/google/s2a-go/testdata/client_key.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEogIBAAKCAQEA44W4i5wftdd+iEbEp3e5H+KFQD8t125H0jyR+lQRurkE63zF
-l7n9if1rWjMQdVbpw4BsdufABHy2RKBn+3jZAgDaTvsdZTUmHbGIQiwmK+jeKabj
-+Rla5HQ6o+HsyHPGlH+G83CkjmYmwBUFYUxrkq15wVkufJm3AfKWtE3VBfO4us7G
-4oW58rdEptMAFJyflFtkv3vof+OB9J5etu3bjY2sigGdV7p9Ft6RurYNDrMUCttA
-xSAeMco/RSoGB2vVCYd+L0rK0XLH0FM/eBdadYi75tjv+/uookwcXAYROj50ItZh
-68kvfXoect+ECkMx8/xT5i//LU39i+BpI5KURwIDAQABAoIBABgyjo/6iLzUMFbZ
-/+w3pW6orrdIgN2akvTfED9pVYFgUA+jc3hRhY95bkNnjuaL2cy7Cc4Tk65mfRQL
-Y0OxdJLr+EvSFSxAXM9npDA1ddHRsF8JqtFBSxNk8R+g1Yf0GDiO35Fgd3/ViWWA
-VtQkRoSRApP3oiQKTRZd8H04keFR+PvmDk/Lq11l3Kc24A1PevKIPX1oI990ggw9
-9i4uSV+cnuMxmcI9xxJtgwdDFdjr39l2arLOHr4s6LGoV2IOdXHNlv5xRqWUZ0FH
-MDHowkLgwDrdSTnNeaVNkce14Gqx+bd4hNaLCdKXMpedBTEmrut3f3hdV1kKjaKt
-aqRYr8ECgYEA/YDGZY2jvFoHHBywlqmEMFrrCvQGH51m5R1Ntpkzr+Rh3YCmrpvq
-xgwJXING0PUw3dz+xrH5lJICrfNE5Kt3fPu1rAEy+13mYsNowghtUq2Rtu0Hsjjx
-2E3Bf8vEB6RNBMmGkUpTTIAroGF5tpJoRvfnWax+k4pFdrKYFtyZdNcCgYEA5cNv
-EPltvOobjTXlUmtVP3n27KZN2aXexTcagLzRxE9CV4cYySENl3KuOMmccaZpIl6z
-aHk6BT4X+M0LqElNUczrInfVqI+SGAFLGy7W6CJaqSr6cpyFUP/fosKpm6wKGgLq
-udHfpvz5rckhKd8kJxFLvhGOK9yN5qpzih0gfhECgYAJfwRvk3G5wYmYpP58dlcs
-VIuPenqsPoI3PPTHTU/hW+XKnWIhElgmGRdUrto9Q6IT/Y5RtSMLTLjq+Tzwb/fm
-56rziYv2XJsfwgAvnI8z1Kqrto9ePsHYf3krJ1/thVsZPc9bq/QY3ohD1sLvcuaT
-GgBBnLOVJU3a12/ZE2RwOwKBgF0csWMAoj8/5IB6if+3ral2xOGsl7oPZVMo/J2V
-Z7EVqb4M6rd/pKFugTpUQgkwtkSOekhpcGD1hAN5HTNK2YG/+L5UMAsKe9sskwJm
-HgOfAHy0BSDzW3ey6i9skg2bT9Cww+0gJ3Hl7U1HSCBO5LjMYpSZSrNtwzfqdb5Q
-BX3xAoGARZdR28Ej3+/+0+fz47Yu2h4z0EI/EbrudLOWY936jIeAVwHckI3+BuqH
-qR4poj1gfbnMxNuI9UzIXzjEmGewx9kDZ7IYnvloZKqoVQODO5GlKF2ja6IcMNlh
-GCNdD6PSAS6HcmalmWo9sj+1YMkrl+GJikKZqVBHrHNwMGAG67w=
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem
deleted file mode 100644
index 60c4cf06..00000000
--- a/vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem
+++ /dev/null
@@ -1,19 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDCDCCAfACFFlYsYCFit01ZpYmfjxpo7/6wMEbMA0GCSqGSIb3DQEBCwUAMEgx
-CzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEPMA0GA1UECgwGR29vZ2xlMRswGQYD
-VQQDDBJ0ZXN0LXMyYS1tdGxzLXJvb3QwHhcNMjMwODIyMTY0NTE4WhcNNDMwODIy
-MTY0NTE4WjA5MQswCQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExHTAbBgNVBAMMFHRl
-c3QtczJhLW10bHMtY2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
-AQEAqrQQMyxNtmdCB+uY3szgRsfPrKC+TV9Fusnd8PfaCVuGTGcSBKM018nV2TDn
-3IYFQ1HgLpGwGwOFDBb3y0o9i2/l2VJySriX1GSNX6nDmVasQlO1wuOLCP7/LRmO
-7b6Kise5W0IFhYaptKyWnekn2pS0tAjimqpfn2w0U6FDGtQUqg/trQQmGtTSJHjb
-A+OFd0EFC18KGP8Q+jOMaMkJRmpeEiAPyHPDoMhqQNT26RApv9j2Uzo4SuXzHH6T
-cAdm1+zG+EXY/UZKX9oDkSbwIJvN+gCmNyORLalJ12gsGYOCjMd8K0mlXBqrmmbO
-VHVbUm9062lhE7x59AA8DK4DoQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCPOvtL
-dq2hxFHlIy0YUK8jp/DtwJZPwzx1id5FtWwd0CxBS1StIgmkHMxtkJGz1iyQLplI
-je+Msd4sTsb5zZi/8kGKehi8Wj4lghp4oP30cpob41OvM68M9RC/wSOVk9igSww+
-l3zof6wKRIswsi5VHrL16ruIVVoDlyFbKr8yk+cp9OPOV8hNNN7ewY9xC8OgnTt8
-YtdaLe6uTplKBLW+j3GtshigRhyfkGJyPFYL4LAeDJCHlC1qmBnkyP0ijMp6vneM
-E8TLavnMTMcpihWTWpyKeRkO6HDRsP4AofQAp7VAiAdSOplga+w2qgrVICV+m8MK
-BTq2PBvc59T6OFLq
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem b/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem
deleted file mode 100644
index 9d112d1e..00000000
--- a/vendor/github.com/google/s2a-go/testdata/mds_client_key.pem
+++ /dev/null
@@ -1,28 +0,0 @@
------BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCqtBAzLE22Z0IH
-65jezOBGx8+soL5NX0W6yd3w99oJW4ZMZxIEozTXydXZMOfchgVDUeAukbAbA4UM
-FvfLSj2Lb+XZUnJKuJfUZI1fqcOZVqxCU7XC44sI/v8tGY7tvoqKx7lbQgWFhqm0
-rJad6SfalLS0COKaql+fbDRToUMa1BSqD+2tBCYa1NIkeNsD44V3QQULXwoY/xD6
-M4xoyQlGal4SIA/Ic8OgyGpA1PbpECm/2PZTOjhK5fMcfpNwB2bX7Mb4Rdj9Rkpf
-2gORJvAgm836AKY3I5EtqUnXaCwZg4KMx3wrSaVcGquaZs5UdVtSb3TraWETvHn0
-ADwMrgOhAgMBAAECggEAUccupZ1ZY4OHTi0PkNk8rpwFwTFGyeFVEf2ofkr24RnA
-NnUAXEllxOUUNlcoFOz9s3kTeavg3qgqgpa0QmdAIb9LMXg+ec6CKkW7trMpGho8
-LxBUWNfSoU4sKEqAvyPT0lWJVo9D/up6/avbAi6TIbOw+Djzel4ZrlHTpabxc3WT
-EilXzn4q54b3MzxCQeQjcnzTieW4Q5semG2kLiXFToHIY2di01P/O8awUjgrD+uW
-/Cb6H49MnHm9VPkqea1iwZeMQd6Gh5FrC7RezsBjdB1JBcfsv6PFt2ySInjB8SF+
-XR5Gr3Cc5sh9s0LfprZ9Dq0rlSWmwasPMI1COK6SswKBgQDczgeWd3erQ1JX9LEI
-wollawqC9y7uJhEsw1hrPqA3uqZYiLUc7Nmi4laZ12mcGoXNDS3R3XmD58qGmGaU
-lxEVTb8KDVWBgw450VoBKzSMQnCP6zn4nZxTYxeqMKjDGf6TRB6TZc843qsG3eRC
-k91yxrCQ/0HV6PT48C+lieDzLwKBgQDF6aNKiyrswr457undBnM1H8q/Y6xC5ZlK
-UtiQdhuyBnicvz0U8WPxBY/8gha0OXWuSnBqq/z77iFVNv/zT6p9K7kM7nBGd8cB
-8KO6FNbyaHWFrhCI5zNzRTH4oha0hfvUOoti09vqavCtWD4L+D/63ba1wNLKPO9o
-4gWbCnUCLwKBgQC/vus372csgrnvR761LLrEJ8BpGt7WUJh5luoht7DKtHvgRleB
-Vu1oVcV+s2Iy/ZVUDC3OIdZ0hcWKPK5YOxfKuEk+IXYvke+4peTTPwHTC59UW6Fs
-FPK8N0FFuhvT0a8RlAY5WiAp8rPysp6WcnHMSl7qi8BQUozp4Sp/RsziYQKBgBXv
-r4mzoy5a53rEYGd/L4XT4EUWZyGDEVqLlDVu4eL5lKTLDZokp08vrqXuRVX0iHap
-CYzJQ2EpI8iuL/BoBB2bmwcz5n3pCMXORld5t9lmeqA2it6hwbIlGUTVsm6P6zm6
-w3hQwy9YaxTLkxUAjxbfPEEo/jQsTNzzMGve3NlBAoGAbgJExpDyMDnaD2Vi5eyr
-63b54BsqeLHqxJmADifyRCj7G1SJMm3zMKkNNOS0vsXgoiId973STFf1XQiojiv8
-Slbxyv5rczcY0n3LOuQYcM5OzsjzpNFZsT2dDnMfNRUF3rx3Geu/FuJ9scF1b00r
-fVMrcL3jSf/W1Xh4TgtyoU8=
------END PRIVATE KEY-----
diff --git a/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem
deleted file mode 100644
index 44e436f6..00000000
--- a/vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem
+++ /dev/null
@@ -1,21 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDcTCCAlmgAwIBAgIUDUkgI+2FZtuUHyUUi0ZBH7JvN00wDQYJKoZIhvcNAQEL
-BQAwSDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQ8wDQYDVQQKDAZHb29nbGUx
-GzAZBgNVBAMMEnRlc3QtczJhLW10bHMtcm9vdDAeFw0yMzA4MjEyMTI5MTVaFw00
-MzA4MjEyMTI5MTVaMEgxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEPMA0GA1UE
-CgwGR29vZ2xlMRswGQYDVQQDDBJ0ZXN0LXMyYS1tdGxzLXJvb3QwggEiMA0GCSqG
-SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCbFEQfpvla27bATedrN4BAWsI9GSwSnJLW
-QWzXcnAk6cKxQBAhnaKHRxHY8ttLhNTtxQeub894CLzJvHE/0xDhuMzjtCCCZ7i2
-r08tKZ1KcEzPJCPNlxlzAXPA45XU3LRlbGvju/PBPhm6n1hCEKTNI/KETJ5DEaYg
-Cf2LcXVsl/zW20MwDZ+e2w/9a2a6n6DdpW1ekOR550hXAUOIxvmXRBeYeGLFvp1n
-rQgZBhRaxP03UB+PQD2oMi/4mfsS96uGCXdzzX8qV46O8m132HUbnA/wagIwboEe
-d7Bx237dERDyHw5GFnll7orgA0FOtoEufXdeQxWVvTjO0+PVPgsvAgMBAAGjUzBR
-MB0GA1UdDgQWBBRyMtg/yutV8hw8vOq0i8x0eBQi7DAfBgNVHSMEGDAWgBRyMtg/
-yutV8hw8vOq0i8x0eBQi7DAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUA
-A4IBAQArN/gdqWMxd5Rvq2eJMTp6I4RepJOT7Go4sMsRsy1caJqqcoS2EvREDZMN
-XNEBcyQBB5kYd6TCcZGoLnEtWYXQ4jjEiXG1g7/+rWxyqw0ZYuP7FWzuHg3Uor/x
-fApbEKwptP5ywVc+33h4qreGcqXkVCCn+sAcstGgrqubdGZW2T5gazUMyammOOuN
-9IWL1PbvXmgEKD+80NUIrk09zanYyrElGdU/zw/kUbZ3Jf6WUBtJGhTzRQ1qZeKa
-VnpCbLoG3vObEB8mxDUAlIzwAtfvw4U32BVIZA8xrocz6OOoAnSW1bTlo3EOIo/G
-MTV7jmY9TBPtfhRuO/cG650+F+cw
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem b/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem
deleted file mode 100644
index 68c60613..00000000
--- a/vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem
+++ /dev/null
@@ -1,21 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDbjCCAlagAwIBAgIUbexZ5sZl86Al9dsI2PkOgtqKnkgwDQYJKoZIhvcNAQEL
-BQAwSDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQ8wDQYDVQQKDAZHb29nbGUx
-GzAZBgNVBAMMEnRlc3QtczJhLW10bHMtcm9vdDAeFw0yMzA4MjIwMDMyMDRaFw00
-MzA4MjIwMDMyMDRaMDkxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTEdMBsGA1UE
-AwwUdGVzdC1zMmEtbXRscy1zZXJ2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw
-ggEKAoIBAQCMEzybsGPqfh92GLwy43mt8kQDF3ztr8y06RwU1hVnY7QqYK4obpvh
-HkJVnTz9gwNBF3n5nUalqRzactlf2PCydN9oSYNCO8svVmo7vw1CleKAKFAiV5Qn
-H76QlqD15oJreh7nSM8R4qj5KukIHvt0cN0gD6CJQzIURDtsKJwkW3yQjYyT/FAK
-GYtFrB6buDn3Eg3Hsw6z7uj7CzLBsSl7BIGrQILbpbI9nFNT3rUTUhXZKY/3UtJA
-Ob66AjTmMbD16RGYZR4JsPx6CstheifJ6YSI79r5KgD37zX0jMXFWimvb2SmZmFe
-LoohtC8K7uTyjm/dROx6nHXdDt5TQYXHAgMBAAGjXzBdMBsGA1UdEQQUMBKHEAAA
-AAAAAAAAAAAAAAAAAAAwHQYDVR0OBBYEFI3i2+tIk6YYn0MIxC0q93jk1VsUMB8G
-A1UdIwQYMBaAFHIy2D/K61XyHDy86rSLzHR4FCLsMA0GCSqGSIb3DQEBCwUAA4IB
-AQAUhk+s/lrIAULBbU7E22C8f93AzTxE1mhyHGNlfPPJP3t1Dl+h4X4WkFpkz5gT
-EcNXB//Vvoq99HbEK5/92sxsIPexKdJBdcggeHXIgLDkOrEZEb0Nnh9eaAuU2QDn
-JW44hMB+aF6mEaJvOHE6DRkQw3hwFYFisFKKHtlQ3TyOhw5CHGzSExPZusdSFNIe
-2E7V/0QzGPJEFnEFUNe9N8nTH2P385Paoi+5+Iizlp/nztVXfzv0Cj/i+qGgtDUs
-HB+gBU2wxMw8eYyuNzACH70wqGR1Parj8/JoyYhx0S4+Gjzy3JH3CcAMaxyfH/dI
-4Wcvfz/isxgmH1UqIt3oc6ad
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem b/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem
deleted file mode 100644
index b14ad0f7..00000000
--- a/vendor/github.com/google/s2a-go/testdata/mds_server_key.pem
+++ /dev/null
@@ -1,28 +0,0 @@
------BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCMEzybsGPqfh92
-GLwy43mt8kQDF3ztr8y06RwU1hVnY7QqYK4obpvhHkJVnTz9gwNBF3n5nUalqRza
-ctlf2PCydN9oSYNCO8svVmo7vw1CleKAKFAiV5QnH76QlqD15oJreh7nSM8R4qj5
-KukIHvt0cN0gD6CJQzIURDtsKJwkW3yQjYyT/FAKGYtFrB6buDn3Eg3Hsw6z7uj7
-CzLBsSl7BIGrQILbpbI9nFNT3rUTUhXZKY/3UtJAOb66AjTmMbD16RGYZR4JsPx6
-CstheifJ6YSI79r5KgD37zX0jMXFWimvb2SmZmFeLoohtC8K7uTyjm/dROx6nHXd
-Dt5TQYXHAgMBAAECggEAIB5zGdIG/yh/Z1GBqfuOFaxFGx5iJ5BVlLAVH9P9IXFz
-yPnVRXEjbinFlSMSbqEBeIX9EpcVMXxHIPIP1RIGEy2IYr3kiqXyT771ahDDZh6/
-Spqz0UQatSPqyvW3H9uE0Uc12dvQm23JSCUmPRX5m7gbhDQBIChXzdzdcU4Yi59V
-4xmJUvbsAcLw5CBM6kwV+1NGVH9+3mUdhrr9M6B6+sVB/xnaqMGEDfQGiwL8U7EY
-QOuc46KXu3Pd/qCdVLn60IrdjSzDJKeC5UZZ+ejNAo+DfbtOovBj3qu3OCUg4XVy
-0CDBJ1sTdLvUfF4Gb+crjPsd+qBbXcjVfqdadwhsoQKBgQDBF1Pys/NitW8okJwp
-2fiDIASP3TiI+MthWHGyuoZGPvmXQ3H6iuLSm8c/iYI2WPTf53Xff1VcFm1GmQms
-GCsYM8Ax94zCeO6Ei1sYYxwcBloEZfOeV37MPA4pjJF4Lt+n5nveNxP+lrsjksJz
-wToSEgWPDT1b/xcdt4/5j9J85wKBgQC5tiLx+33mwH4DoaFRmSl0+VuSNYFw6DTQ
-SQ+kWqWGH4NENc9wf4Dj2VUZQhpXNhXVSxj+aP2d/ck1NrTJAWqYEXCDtFQOGSa2
-cGPRr+Fhy5NIEaEvR7IXcMBZzx3koYmWVBHricyrXs5FvHrT3N14mGDUG8n24U3f
-R799bau0IQKBgQC97UM+lHCPJCWNggiJRgSifcje9VtZp1btjoBvq/bNe74nYkjn
-htsrC91Fiu1Qpdlfr50K1IXSyaB886VG6JLjAGxI+dUzqJ38M9LLvxj0G+9JKjsi
-AbAQFfZcOg8QZxLJZPVsE0MQhZTXndC06VhEVAOxvPUg214Sde8hK61/+wKBgCRw
-O10VhnePT2pw/VEgZ0T/ZFtEylgYB7zSiRIrgwzVBBGPKVueePC8BPmGwdpYz2Hh
-cU8B1Ll6QU+Co2hJMdwSl+wPpup5PuJPHRbYlrV0lzpt0x2OyL/WrLcyb2Ab3f40
-EqwPhqwdVwXR3JvTW1U9OMqFhVQ+kuP7lPQMX8NhAoGBAJOgZ7Tokipc4Mi68Olw
-SCaOPvjjy4sW2rTRuKyjc1wTAzy7SJ3vXHfGkkN99nTLJFwAyJhWUpnRdwAXGi+x
-gyOa95ImsEfRSwEjbluWfF8/P0IU8GR+ZTqT4NnNCOsi8T/xst4Szd1ECJNnnZDe
-1ChfPP1AH+/75MJCvu6wQBQv
------END PRIVATE KEY-----
diff --git a/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem b/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem
deleted file mode 100644
index ad1bad59..00000000
--- a/vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem
+++ /dev/null
@@ -1,19 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDITCCAgkCFBS8mLoytMpMWBwpAtnRaq3eIKnsMA0GCSqGSIb3DQEBCwUAME0x
-CzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UECgwEVGVzdDEiMCAGA1UE
-AwwZdGVzdC1zMmEtbXRscy1zZWxmLXNpZ25lZDAeFw0yMzA4MjIyMTE2MDFaFw00
-MzA4MjIyMTE2MDFaME0xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UE
-CgwEVGVzdDEiMCAGA1UEAwwZdGVzdC1zMmEtbXRscy1zZWxmLXNpZ25lZDCCASIw
-DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKFFPsYasKZeCFLEXl3RpE/ZOXFe
-2lhutIalSpZvCmso+mQGoZ4cHK7At+kDjBi5CrnXkYcw7quQAhHgU0frhWdj7tsW
-HUUtq7T8eaGWKBnVD9fl+MjtAl1BmhXwV9qRBbj4EesSKGDSGpKf66dOtzw83JbB
-cU7XlPAH1c1zo2GXC1himcZ+SVGHVrOjn4NmeFs8g94/Dke8dWkHwv5YTMVugFK4
-5KxKgSOKkr4ka7PCBzgxCnW4wYSZNRHcxrqkiArO2HAQq0ACr7u+fVDYH//9mP2Z
-ADo/zch7O5yhkiNbjXJIRrptDWEuVYMRloYDhT773h7bV/Q0Wo0NQGtasJ8CAwEA
-ATANBgkqhkiG9w0BAQsFAAOCAQEAPjbH0TMyegF/MDvglkc0sXr6DqlmTxDCZZmG
-lYPZ5Xy062+rxIHghMARbvO4BxepiG37KsP2agvOldm4TtU8nQ8LyswmSIFm4BQ+
-XQWwdsWyYyd8l0d5sXAdaN6AXwy50fvqCepmEqyreMY6dtLzlwo9gVCBFB7QuAPt
-Nc14phpEUZt/KPNuY6cUlB7bz3tmnFbwxUrWj1p0KBEYsr7+KEVZxR+z0wtlU7S9
-ZBrmUvx0fq5Ef7JWtHW0w4ofg1op742sdYl+53C26GZ76ts4MmqVz2/94DScgRaU
-gT0GLVuuCZXRDVeTXqTb4mditRCfzFPe9cCegYhGhSqBs8yh5A==
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem b/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem
deleted file mode 100644
index bcf08e4f..00000000
--- a/vendor/github.com/google/s2a-go/testdata/self_signed_key.pem
+++ /dev/null
@@ -1,28 +0,0 @@
------BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQChRT7GGrCmXghS
-xF5d0aRP2TlxXtpYbrSGpUqWbwprKPpkBqGeHByuwLfpA4wYuQq515GHMO6rkAIR
-4FNH64VnY+7bFh1FLau0/HmhligZ1Q/X5fjI7QJdQZoV8FfakQW4+BHrEihg0hqS
-n+unTrc8PNyWwXFO15TwB9XNc6NhlwtYYpnGfklRh1azo5+DZnhbPIPePw5HvHVp
-B8L+WEzFboBSuOSsSoEjipK+JGuzwgc4MQp1uMGEmTUR3Ma6pIgKzthwEKtAAq+7
-vn1Q2B///Zj9mQA6P83IezucoZIjW41ySEa6bQ1hLlWDEZaGA4U++94e21f0NFqN
-DUBrWrCfAgMBAAECggEAR8e8YwyqJ8KezcgdgIC5M9kp2i4v3UCZFX0or8CI0J2S
-pUbWVLuKgLXCpfIwPyjNf15Vpei/spkMcsx4BQDthdFTFSzIpmvni0z9DlD5VFYj
-ESOJElV7wepbHPy2/c+izmuL/ic81aturGiFyRgeMq+cN3WuaztFTXkPTrzzsZGF
-p/Mx3gqm7Hoc3d2xlv+8L5GjCtEJPlQgZJV+s3ennBjOAd8CC7d9qJetE3Er46pn
-r5jedV3bQRZYBzmooYNHjbAs26++wYac/jTE0/U6nKS17eWq4BQZUtlMXUw5N81B
-7LKn7C03rj2KCn+Nf5uin9ALmoy888LXCDdvL/NZkQKBgQDduv1Heu+tOZuNYUdQ
-Hswmd8sVNAAWGZxdxixHMv58zrgbLFXSX6K89X2l5Sj9XON8TH46MuSFdjSwwWw5
-fBrhVEhA5srcqpvVWIBE05yqPpt0s1NQktMWJKELWlG8jOhVKwM5OYDpdxtwehpz
-1g70XJz+nF/LTV8RdTK+OWDDpQKBgQC6MhdbGHUz/56dY3gZpE5TXnN2hkNbZCgk
-emr6z85VHhQflZbedhCzB9PUnZnCKWOGQHQdxRTtRfd46LVboZqCdYO1ZNQv6toP
-ysS7dTpZZFy7CpQaW0Y6/jS65jW6xIDKR1W40vgltZ3sfpG37JaowpzWdw2WuOnw
-Bg0rcJAf8wKBgQCqE+p/z97UwuF8eufWnyj9QNo382E1koOMspv4KTdnyLETtthF
-vDH6O1wbykG8xmmASLRyM+NyNA+KnXNETNvZh2q8zctBpGRQK8iIAsGjHM7ln0AD
-B/x+ea5GJQuZU4RK/+lDFca6TjBwAFkWDVX/PqL18kDQkxKfM4SuwRhmOQKBgDGh
-eoJIsa0LnP787Z2AI3Srf4F/ZmLs/ppCm1OBotEjdF+64v0nYWonUvqgi8SqfaHi
-elEZIGvis4ViGj1zhRjzNAlc+AZRxpBhDzGcnNIJI4Kj3jhsTfsZmXqcNIQ1LtM8
-Uogyi/yZPaA1WKg7Aym2vlGYaGHdplXZdxc2KOSrAoGABRkD9l2OVcwK7RyNgFxo
-mjxx0tfUdDBhHIi2igih1FiHpeP9E+4/kE/K7PnU9DoDrL1jW1MTpXaYV4seOylk
-k9z/9QfcRa9ePD2N4FqbHWSYp5n3aLoIcGq/9jyjTwayZbbIhWO+vNuHE9wIvecZ
-8x3gNkxJRb4NaLIoNzAhCoo=
------END PRIVATE KEY-----
diff --git a/vendor/github.com/google/s2a-go/testdata/server_cert.pem b/vendor/github.com/google/s2a-go/testdata/server_cert.pem
deleted file mode 100644
index 0f98322c..00000000
--- a/vendor/github.com/google/s2a-go/testdata/server_cert.pem
+++ /dev/null
@@ -1,24 +0,0 @@
------BEGIN CERTIFICATE-----
-MIID8TCCAtmgAwIBAgIUKCoDuLtiZXvhsBY2RoDm0ugizJ8wDQYJKoZIhvcNAQEL
-BQAwgYcxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTESMBAGA1UEBwwJU3Vubnl2
-YWxlMRAwDgYDVQQKDAdDb21wYW55MREwDwYDVQQLDAhEaXZpc2lvbjEWMBQGA1UE
-AwwNczJhX3Rlc3RfY2VydDEaMBgGCSqGSIb3DQEJARYLeHl6QHh5ei5jb20wHhcN
-MjIwNTMxMjAwODI1WhcNNDIwNTI2MjAwODI1WjCBhzELMAkGA1UEBhMCVVMxCzAJ
-BgNVBAgMAkNBMRIwEAYDVQQHDAlTdW5ueXZhbGUxEDAOBgNVBAoMB0NvbXBhbnkx
-ETAPBgNVBAsMCERpdmlzaW9uMRYwFAYDVQQDDA1zMmFfdGVzdF9jZXJ0MRowGAYJ
-KoZIhvcNAQkBFgt4eXpAeHl6LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
-AQoCggEBAKK1++PXQ+M3hjYH/v0K4UEYl5ljzpNM1i52eQM+gFooojT87PDSaphT
-fs0PXy/PTAjHBEvPhWpOpmQXfJNYzjwcCvg66hbqkv++/VTZiFLAsHagzkEz+FRJ
-qT5Eq7G5FLyw1izX1uxyPN7tAEWEEg7eqsiaXD3Cq8+TYN9cjirPeF7RZF8yFCYE
-xqvbo+Yc6RL6xw19iXVTfctRgQe581KQuIY5/LXo3dWDEilFdsADAe8XAEcO64es
-Ow0g1UvXLnpXSE151kXBFb3sKH/ZjCecDYMCIMEb4sWLSblkSxJ5sNSmXIG4wtr2
-Qnii7CXZgnVYraQE/Jyh+NMQANuoSdMCAwEAAaNTMFEwHQYDVR0OBBYEFAyQQQuM
-ab+YUQqjK8dVVOoHVFmXMB8GA1UdIwQYMBaAFAyQQQuMab+YUQqjK8dVVOoHVFmX
-MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADj0vQ6ykWhicoqR
-e6VZMwlEJV7/DSvWWKBd9MUjfKye0A4565ya5lmnzP3DiD3nqGe3miqmLsXKDs+X
-POqlPXTWIamP7D4MJ32XtSLwZB4ru+I+Ao/P/VngPepoRPQoBnzHe7jww0rokqxl
-AZERjlbTUwUAy/BPWPSzSJZ2j0tcs6ZLDNyYzpK4ao8R9/1VmQ92Tcp3feJs1QTg
-odRQc3om/AkWOwsll+oyX0UbJeHkFHiLanUPXbdh+/BkSvZJ8ynL+feSDdaurPe+
-PSfnqLtQft9/neecGRdEaQzzzSFVQUVQzTdK1Q7hA7b55b2HvIa3ktDiks+sJsYN
-Dhm6uZM=
------END CERTIFICATE-----
diff --git a/vendor/github.com/google/s2a-go/testdata/server_key.pem b/vendor/github.com/google/s2a-go/testdata/server_key.pem
deleted file mode 100644
index 81afea78..00000000
--- a/vendor/github.com/google/s2a-go/testdata/server_key.pem
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEAorX749dD4zeGNgf+/QrhQRiXmWPOk0zWLnZ5Az6AWiiiNPzs
-8NJqmFN+zQ9fL89MCMcES8+Fak6mZBd8k1jOPBwK+DrqFuqS/779VNmIUsCwdqDO
-QTP4VEmpPkSrsbkUvLDWLNfW7HI83u0ARYQSDt6qyJpcPcKrz5Ng31yOKs94XtFk
-XzIUJgTGq9uj5hzpEvrHDX2JdVN9y1GBB7nzUpC4hjn8tejd1YMSKUV2wAMB7xcA
-Rw7rh6w7DSDVS9cueldITXnWRcEVvewof9mMJ5wNgwIgwRvixYtJuWRLEnmw1KZc
-gbjC2vZCeKLsJdmCdVitpAT8nKH40xAA26hJ0wIDAQABAoIBACaNR+lsD8G+XiZf
-LqN1+HkcAo9tfnyYMAdCOtnx7SdviT9Uzi8hK/B7mAeuJLeHPlS2EuaDfPD7QaFl
-jza6S+MiIdc+3kgfvESsVAnOoOY6kZUJ9NSuI6CU82y1iJjLaYZrv9NQMLRFPPb0
-4KOX709mosB1EnXvshW0rbc+jtDFhrm1SxMt+k9TuzmMxjbOeW4LOLXPgU8X1T3Q
-Xy0hMZZtcgBs9wFIo8yCtmOixax9pnFE8rRltgDxTodn9LLdz1FieyntNgDksZ0P
-nt4kV7Mqly7ELaea+Foaj244mKsesic2e3GhAlMRLun/VSunSf7mOCxfpITB8dp1
-drDhOYECgYEA19151dVxRcviuovN6Dar+QszMTnU8pDJ8BjLFjXjP/hNBBwMTHDE
-duMuWk2qnwZqMooI/shxrF/ufmTgS0CFrh2+ANBZu27vWConJNXcyNtdigI4wt50
-L0Y2qcZn2mg67qFXHwoR3QNwrwnPwEjRXA09at9CSRZzcwDQ0ETXhYsCgYEAwPaG
-06QdK8Zyly7TTzZJwxzv9uGiqzodmGtX6NEKjgij2JaCxHpukqZBJoqa0jKeK1cm
-eNVkOvT5ff9TMzarSHQLr3pZen2/oVLb5gaFkbcJt/klv9Fd+ZRilHY3i6QwS6pD
-uMiPOWS4DrLHDRVoVlAZTDjT1RVwwTs+P2NhJdkCgYEAsriXysbxBYyMp05gqEW7
-lHIFbFgpSrs9th+Q5U6wW6JEgYaHWDJ1NslY80MiZI93FWjbkbZ7BvBWESeL3EIL
-a+EMErht0pVCbIhZ6FF4foPAqia0wAJVx14mm+G80kNBp5jE/NnleEsE3KcO7nBb
-hg8gLn+x7bk81JZ0TDrzBYkCgYEAuQKluv47SeF3tSScTfKLPpvcKCWmxe1uutkQ
-7JShPhVioyOMNb39jnYBOWbjkm4d4QgqRuiytSR0oi3QI+Ziy5EYMyNn713qAk9j
-r2TJZDDPDKnBW+zt4YI4EohWMXk3JRUW4XDKggjjwJQA7bZ812TtHHvP/xoThfG7
-eSNb3eECgYBw6ssgCtMrdvQiEmjKVX/9yI38mvC2kSGyzbrQnGUfgqRGomRpeZuD
-B5E3kysA4td5pT5lvcLgSW0TbOz+YbiriXjwOihPIelCvc9gE2eOUI71/byUWPFz
-7u5F/xQ4NaGr5suLF+lBC6h7pSbM4El9lIHQAQadpuEdzHqrw+hs3g==
------END RSA PRIVATE KEY-----
diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md
deleted file mode 100644
index 7ec5ac7e..00000000
--- a/vendor/github.com/google/uuid/CHANGELOG.md
+++ /dev/null
@@ -1,41 +0,0 @@
-# Changelog
-
-## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16)
-
-
-### Features
-
-* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3))
-
-
-### Bug Fixes
-
-* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06))
-* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6))
-
-## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
-
-
-### Features
-
-* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29))
-
-## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
-
-
-### Features
-
-* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4))
-
-### Fixes
-
-* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior)
-
-## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)
-
-
-### Bug Fixes
-
-* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0))
-
-## Changelog
diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md
deleted file mode 100644
index a502fdc5..00000000
--- a/vendor/github.com/google/uuid/CONTRIBUTING.md
+++ /dev/null
@@ -1,26 +0,0 @@
-# How to contribute
-
-We definitely welcome patches and contribution to this project!
-
-### Tips
-
-Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org).
-
-Always try to include a test case! If it is not possible or not necessary,
-please explain why in the pull request description.
-
-### Releasing
-
-Commits that would precipitate a SemVer change, as described in the Conventional
-Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
-to create a release candidate pull request. Once submitted, `release-please`
-will create a release.
-
-For tips on how to work with `release-please`, see its documentation.
-
-### Legal requirements
-
-In order to protect both you and ourselves, you will need to sign the
-[Contributor License Agreement](https://cla.developers.google.com/clas).
-
-You may have already signed it for other Google projects.
diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS
deleted file mode 100644
index b4bb97f6..00000000
--- a/vendor/github.com/google/uuid/CONTRIBUTORS
+++ /dev/null
@@ -1,9 +0,0 @@
-Paul Borman
-bmatsuo
-shawnps
-theory
-jboverfelt
-dsymonds
-cd1
-wallclockbuilder
-dansouza
diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE
deleted file mode 100644
index 5dc68268..00000000
--- a/vendor/github.com/google/uuid/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009,2014 Google Inc. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md
deleted file mode 100644
index 3e9a6188..00000000
--- a/vendor/github.com/google/uuid/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# uuid
-The uuid package generates and inspects UUIDs based on
-[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122)
-and DCE 1.1: Authentication and Security Services.
-
-This package is based on the github.com/pborman/uuid package (previously named
-code.google.com/p/go-uuid). It differs from these earlier packages in that
-a UUID is a 16 byte array rather than a byte slice. One loss due to this
-change is the ability to represent an invalid UUID (vs a NIL UUID).
-
-###### Install
-```sh
-go get github.com/google/uuid
-```
-
-###### Documentation
-[](https://pkg.go.dev/github.com/google/uuid)
-
-Full `go doc` style documentation for the package can be viewed online without
-installing this package by using the GoDoc site here:
-http://pkg.go.dev/github.com/google/uuid
diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go
deleted file mode 100644
index fa820b9d..00000000
--- a/vendor/github.com/google/uuid/dce.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2016 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import (
- "encoding/binary"
- "fmt"
- "os"
-)
-
-// A Domain represents a Version 2 domain
-type Domain byte
-
-// Domain constants for DCE Security (Version 2) UUIDs.
-const (
- Person = Domain(0)
- Group = Domain(1)
- Org = Domain(2)
-)
-
-// NewDCESecurity returns a DCE Security (Version 2) UUID.
-//
-// The domain should be one of Person, Group or Org.
-// On a POSIX system the id should be the users UID for the Person
-// domain and the users GID for the Group. The meaning of id for
-// the domain Org or on non-POSIX systems is site defined.
-//
-// For a given domain/id pair the same token may be returned for up to
-// 7 minutes and 10 seconds.
-func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
- uuid, err := NewUUID()
- if err == nil {
- uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
- uuid[9] = byte(domain)
- binary.BigEndian.PutUint32(uuid[0:], id)
- }
- return uuid, err
-}
-
-// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
-// domain with the id returned by os.Getuid.
-//
-// NewDCESecurity(Person, uint32(os.Getuid()))
-func NewDCEPerson() (UUID, error) {
- return NewDCESecurity(Person, uint32(os.Getuid()))
-}
-
-// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
-// domain with the id returned by os.Getgid.
-//
-// NewDCESecurity(Group, uint32(os.Getgid()))
-func NewDCEGroup() (UUID, error) {
- return NewDCESecurity(Group, uint32(os.Getgid()))
-}
-
-// Domain returns the domain for a Version 2 UUID. Domains are only defined
-// for Version 2 UUIDs.
-func (uuid UUID) Domain() Domain {
- return Domain(uuid[9])
-}
-
-// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
-// UUIDs.
-func (uuid UUID) ID() uint32 {
- return binary.BigEndian.Uint32(uuid[0:4])
-}
-
-func (d Domain) String() string {
- switch d {
- case Person:
- return "Person"
- case Group:
- return "Group"
- case Org:
- return "Org"
- }
- return fmt.Sprintf("Domain%d", int(d))
-}
diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go
deleted file mode 100644
index 5b8a4b9a..00000000
--- a/vendor/github.com/google/uuid/doc.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2016 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package uuid generates and inspects UUIDs.
-//
-// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
-// Services.
-//
-// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to
-// maps or compared directly.
-package uuid
diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go
deleted file mode 100644
index dc60082d..00000000
--- a/vendor/github.com/google/uuid/hash.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2016 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import (
- "crypto/md5"
- "crypto/sha1"
- "hash"
-)
-
-// Well known namespace IDs and UUIDs
-var (
- NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
- NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
- NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
- NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
- Nil UUID // empty UUID, all zeros
-
- // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1.
- Max = UUID{
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
- }
-)
-
-// NewHash returns a new UUID derived from the hash of space concatenated with
-// data generated by h. The hash should be at least 16 byte in length. The
-// first 16 bytes of the hash are used to form the UUID. The version of the
-// UUID will be the lower 4 bits of version. NewHash is used to implement
-// NewMD5 and NewSHA1.
-func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
- h.Reset()
- h.Write(space[:]) //nolint:errcheck
- h.Write(data) //nolint:errcheck
- s := h.Sum(nil)
- var uuid UUID
- copy(uuid[:], s)
- uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
- uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
- return uuid
-}
-
-// NewMD5 returns a new MD5 (Version 3) UUID based on the
-// supplied name space and data. It is the same as calling:
-//
-// NewHash(md5.New(), space, data, 3)
-func NewMD5(space UUID, data []byte) UUID {
- return NewHash(md5.New(), space, data, 3)
-}
-
-// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
-// supplied name space and data. It is the same as calling:
-//
-// NewHash(sha1.New(), space, data, 5)
-func NewSHA1(space UUID, data []byte) UUID {
- return NewHash(sha1.New(), space, data, 5)
-}
diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go
deleted file mode 100644
index 14bd3407..00000000
--- a/vendor/github.com/google/uuid/marshal.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2016 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import "fmt"
-
-// MarshalText implements encoding.TextMarshaler.
-func (uuid UUID) MarshalText() ([]byte, error) {
- var js [36]byte
- encodeHex(js[:], uuid)
- return js[:], nil
-}
-
-// UnmarshalText implements encoding.TextUnmarshaler.
-func (uuid *UUID) UnmarshalText(data []byte) error {
- id, err := ParseBytes(data)
- if err != nil {
- return err
- }
- *uuid = id
- return nil
-}
-
-// MarshalBinary implements encoding.BinaryMarshaler.
-func (uuid UUID) MarshalBinary() ([]byte, error) {
- return uuid[:], nil
-}
-
-// UnmarshalBinary implements encoding.BinaryUnmarshaler.
-func (uuid *UUID) UnmarshalBinary(data []byte) error {
- if len(data) != 16 {
- return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
- }
- copy(uuid[:], data)
- return nil
-}
diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go
deleted file mode 100644
index d651a2b0..00000000
--- a/vendor/github.com/google/uuid/node.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2016 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import (
- "sync"
-)
-
-var (
- nodeMu sync.Mutex
- ifname string // name of interface being used
- nodeID [6]byte // hardware for version 1 UUIDs
- zeroID [6]byte // nodeID with only 0's
-)
-
-// NodeInterface returns the name of the interface from which the NodeID was
-// derived. The interface "user" is returned if the NodeID was set by
-// SetNodeID.
-func NodeInterface() string {
- defer nodeMu.Unlock()
- nodeMu.Lock()
- return ifname
-}
-
-// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
-// If name is "" then the first usable interface found will be used or a random
-// Node ID will be generated. If a named interface cannot be found then false
-// is returned.
-//
-// SetNodeInterface never fails when name is "".
-func SetNodeInterface(name string) bool {
- defer nodeMu.Unlock()
- nodeMu.Lock()
- return setNodeInterface(name)
-}
-
-func setNodeInterface(name string) bool {
- iname, addr := getHardwareInterface(name) // null implementation for js
- if iname != "" && addr != nil {
- ifname = iname
- copy(nodeID[:], addr)
- return true
- }
-
- // We found no interfaces with a valid hardware address. If name
- // does not specify a specific interface generate a random Node ID
- // (section 4.1.6)
- if name == "" {
- ifname = "random"
- randomBits(nodeID[:])
- return true
- }
- return false
-}
-
-// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
-// if not already set.
-func NodeID() []byte {
- defer nodeMu.Unlock()
- nodeMu.Lock()
- if nodeID == zeroID {
- setNodeInterface("")
- }
- nid := nodeID
- return nid[:]
-}
-
-// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
-// of id are used. If id is less than 6 bytes then false is returned and the
-// Node ID is not set.
-func SetNodeID(id []byte) bool {
- if len(id) < 6 {
- return false
- }
- defer nodeMu.Unlock()
- nodeMu.Lock()
- copy(nodeID[:], id)
- ifname = "user"
- return true
-}
-
-// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
-// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
-func (uuid UUID) NodeID() []byte {
- var node [6]byte
- copy(node[:], uuid[10:])
- return node[:]
-}
diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go
deleted file mode 100644
index b2a0bc87..00000000
--- a/vendor/github.com/google/uuid/node_js.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2017 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build js
-
-package uuid
-
-// getHardwareInterface returns nil values for the JS version of the code.
-// This removes the "net" dependency, because it is not used in the browser.
-// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
-func getHardwareInterface(name string) (string, []byte) { return "", nil }
diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go
deleted file mode 100644
index 0cbbcddb..00000000
--- a/vendor/github.com/google/uuid/node_net.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2017 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !js
-
-package uuid
-
-import "net"
-
-var interfaces []net.Interface // cached list of interfaces
-
-// getHardwareInterface returns the name and hardware address of interface name.
-// If name is "" then the name and hardware address of one of the system's
-// interfaces is returned. If no interfaces are found (name does not exist or
-// there are no interfaces) then "", nil is returned.
-//
-// Only addresses of at least 6 bytes are returned.
-func getHardwareInterface(name string) (string, []byte) {
- if interfaces == nil {
- var err error
- interfaces, err = net.Interfaces()
- if err != nil {
- return "", nil
- }
- }
- for _, ifs := range interfaces {
- if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
- return ifs.Name, ifs.HardwareAddr
- }
- }
- return "", nil
-}
diff --git a/vendor/github.com/google/uuid/null.go b/vendor/github.com/google/uuid/null.go
deleted file mode 100644
index d7fcbf28..00000000
--- a/vendor/github.com/google/uuid/null.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2021 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import (
- "bytes"
- "database/sql/driver"
- "encoding/json"
- "fmt"
-)
-
-var jsonNull = []byte("null")
-
-// NullUUID represents a UUID that may be null.
-// NullUUID implements the SQL driver.Scanner interface so
-// it can be used as a scan destination:
-//
-// var u uuid.NullUUID
-// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
-// ...
-// if u.Valid {
-// // use u.UUID
-// } else {
-// // NULL value
-// }
-//
-type NullUUID struct {
- UUID UUID
- Valid bool // Valid is true if UUID is not NULL
-}
-
-// Scan implements the SQL driver.Scanner interface.
-func (nu *NullUUID) Scan(value interface{}) error {
- if value == nil {
- nu.UUID, nu.Valid = Nil, false
- return nil
- }
-
- err := nu.UUID.Scan(value)
- if err != nil {
- nu.Valid = false
- return err
- }
-
- nu.Valid = true
- return nil
-}
-
-// Value implements the driver Valuer interface.
-func (nu NullUUID) Value() (driver.Value, error) {
- if !nu.Valid {
- return nil, nil
- }
- // Delegate to UUID Value function
- return nu.UUID.Value()
-}
-
-// MarshalBinary implements encoding.BinaryMarshaler.
-func (nu NullUUID) MarshalBinary() ([]byte, error) {
- if nu.Valid {
- return nu.UUID[:], nil
- }
-
- return []byte(nil), nil
-}
-
-// UnmarshalBinary implements encoding.BinaryUnmarshaler.
-func (nu *NullUUID) UnmarshalBinary(data []byte) error {
- if len(data) != 16 {
- return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
- }
- copy(nu.UUID[:], data)
- nu.Valid = true
- return nil
-}
-
-// MarshalText implements encoding.TextMarshaler.
-func (nu NullUUID) MarshalText() ([]byte, error) {
- if nu.Valid {
- return nu.UUID.MarshalText()
- }
-
- return jsonNull, nil
-}
-
-// UnmarshalText implements encoding.TextUnmarshaler.
-func (nu *NullUUID) UnmarshalText(data []byte) error {
- id, err := ParseBytes(data)
- if err != nil {
- nu.Valid = false
- return err
- }
- nu.UUID = id
- nu.Valid = true
- return nil
-}
-
-// MarshalJSON implements json.Marshaler.
-func (nu NullUUID) MarshalJSON() ([]byte, error) {
- if nu.Valid {
- return json.Marshal(nu.UUID)
- }
-
- return jsonNull, nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (nu *NullUUID) UnmarshalJSON(data []byte) error {
- if bytes.Equal(data, jsonNull) {
- *nu = NullUUID{}
- return nil // valid null UUID
- }
- err := json.Unmarshal(data, &nu.UUID)
- nu.Valid = err == nil
- return err
-}
diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go
deleted file mode 100644
index 2e02ec06..00000000
--- a/vendor/github.com/google/uuid/sql.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2016 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import (
- "database/sql/driver"
- "fmt"
-)
-
-// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
-// Currently, database types that map to string and []byte are supported. Please
-// consult database-specific driver documentation for matching types.
-func (uuid *UUID) Scan(src interface{}) error {
- switch src := src.(type) {
- case nil:
- return nil
-
- case string:
- // if an empty UUID comes from a table, we return a null UUID
- if src == "" {
- return nil
- }
-
- // see Parse for required string format
- u, err := Parse(src)
- if err != nil {
- return fmt.Errorf("Scan: %v", err)
- }
-
- *uuid = u
-
- case []byte:
- // if an empty UUID comes from a table, we return a null UUID
- if len(src) == 0 {
- return nil
- }
-
- // assumes a simple slice of bytes if 16 bytes
- // otherwise attempts to parse
- if len(src) != 16 {
- return uuid.Scan(string(src))
- }
- copy((*uuid)[:], src)
-
- default:
- return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
- }
-
- return nil
-}
-
-// Value implements sql.Valuer so that UUIDs can be written to databases
-// transparently. Currently, UUIDs map to strings. Please consult
-// database-specific driver documentation for matching types.
-func (uuid UUID) Value() (driver.Value, error) {
- return uuid.String(), nil
-}
diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go
deleted file mode 100644
index c3511292..00000000
--- a/vendor/github.com/google/uuid/time.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2016 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import (
- "encoding/binary"
- "sync"
- "time"
-)
-
-// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
-// 1582.
-type Time int64
-
-const (
- lillian = 2299160 // Julian day of 15 Oct 1582
- unix = 2440587 // Julian day of 1 Jan 1970
- epoch = unix - lillian // Days between epochs
- g1582 = epoch * 86400 // seconds between epochs
- g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
-)
-
-var (
- timeMu sync.Mutex
- lasttime uint64 // last time we returned
- clockSeq uint16 // clock sequence for this run
-
- timeNow = time.Now // for testing
-)
-
-// UnixTime converts t the number of seconds and nanoseconds using the Unix
-// epoch of 1 Jan 1970.
-func (t Time) UnixTime() (sec, nsec int64) {
- sec = int64(t - g1582ns100)
- nsec = (sec % 10000000) * 100
- sec /= 10000000
- return sec, nsec
-}
-
-// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
-// clock sequence as well as adjusting the clock sequence as needed. An error
-// is returned if the current time cannot be determined.
-func GetTime() (Time, uint16, error) {
- defer timeMu.Unlock()
- timeMu.Lock()
- return getTime()
-}
-
-func getTime() (Time, uint16, error) {
- t := timeNow()
-
- // If we don't have a clock sequence already, set one.
- if clockSeq == 0 {
- setClockSequence(-1)
- }
- now := uint64(t.UnixNano()/100) + g1582ns100
-
- // If time has gone backwards with this clock sequence then we
- // increment the clock sequence
- if now <= lasttime {
- clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
- }
- lasttime = now
- return Time(now), clockSeq, nil
-}
-
-// ClockSequence returns the current clock sequence, generating one if not
-// already set. The clock sequence is only used for Version 1 UUIDs.
-//
-// The uuid package does not use global static storage for the clock sequence or
-// the last time a UUID was generated. Unless SetClockSequence is used, a new
-// random clock sequence is generated the first time a clock sequence is
-// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1)
-func ClockSequence() int {
- defer timeMu.Unlock()
- timeMu.Lock()
- return clockSequence()
-}
-
-func clockSequence() int {
- if clockSeq == 0 {
- setClockSequence(-1)
- }
- return int(clockSeq & 0x3fff)
-}
-
-// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to
-// -1 causes a new sequence to be generated.
-func SetClockSequence(seq int) {
- defer timeMu.Unlock()
- timeMu.Lock()
- setClockSequence(seq)
-}
-
-func setClockSequence(seq int) {
- if seq == -1 {
- var b [2]byte
- randomBits(b[:]) // clock sequence
- seq = int(b[0])<<8 | int(b[1])
- }
- oldSeq := clockSeq
- clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
- if oldSeq != clockSeq {
- lasttime = 0
- }
-}
-
-// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
-// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs.
-func (uuid UUID) Time() Time {
- var t Time
- switch uuid.Version() {
- case 6:
- time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110
- t = Time(time)
- case 7:
- time := binary.BigEndian.Uint64(uuid[:8])
- t = Time((time>>16)*10000 + g1582ns100)
- default: // forward compatible
- time := int64(binary.BigEndian.Uint32(uuid[0:4]))
- time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
- time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
- t = Time(time)
- }
- return t
-}
-
-// ClockSequence returns the clock sequence encoded in uuid.
-// The clock sequence is only well defined for version 1 and 2 UUIDs.
-func (uuid UUID) ClockSequence() int {
- return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
-}
diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go
deleted file mode 100644
index 5ea6c737..00000000
--- a/vendor/github.com/google/uuid/util.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2016 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import (
- "io"
-)
-
-// randomBits completely fills slice b with random data.
-func randomBits(b []byte) {
- if _, err := io.ReadFull(rander, b); err != nil {
- panic(err.Error()) // rand should never fail
- }
-}
-
-// xvalues returns the value of a byte as a hexadecimal digit or 255.
-var xvalues = [256]byte{
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
- 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
-}
-
-// xtob converts hex characters x1 and x2 into a byte.
-func xtob(x1, x2 byte) (byte, bool) {
- b1 := xvalues[x1]
- b2 := xvalues[x2]
- return (b1 << 4) | b2, b1 != 255 && b2 != 255
-}
diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go
deleted file mode 100644
index 5232b486..00000000
--- a/vendor/github.com/google/uuid/uuid.go
+++ /dev/null
@@ -1,365 +0,0 @@
-// Copyright 2018 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import (
- "bytes"
- "crypto/rand"
- "encoding/hex"
- "errors"
- "fmt"
- "io"
- "strings"
- "sync"
-)
-
-// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
-// 4122.
-type UUID [16]byte
-
-// A Version represents a UUID's version.
-type Version byte
-
-// A Variant represents a UUID's variant.
-type Variant byte
-
-// Constants returned by Variant.
-const (
- Invalid = Variant(iota) // Invalid UUID
- RFC4122 // The variant specified in RFC4122
- Reserved // Reserved, NCS backward compatibility.
- Microsoft // Reserved, Microsoft Corporation backward compatibility.
- Future // Reserved for future definition.
-)
-
-const randPoolSize = 16 * 16
-
-var (
- rander = rand.Reader // random function
- poolEnabled = false
- poolMu sync.Mutex
- poolPos = randPoolSize // protected with poolMu
- pool [randPoolSize]byte // protected with poolMu
-)
-
-type invalidLengthError struct{ len int }
-
-func (err invalidLengthError) Error() string {
- return fmt.Sprintf("invalid UUID length: %d", err.len)
-}
-
-// IsInvalidLengthError is matcher function for custom error invalidLengthError
-func IsInvalidLengthError(err error) bool {
- _, ok := err.(invalidLengthError)
- return ok
-}
-
-// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both
-// the standard UUID forms defined in RFC 4122
-// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
-// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition,
-// Parse accepts non-standard strings such as the raw hex encoding
-// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings,
-// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are
-// examined in the latter case. Parse should not be used to validate strings as
-// it parses non-standard encodings as indicated above.
-func Parse(s string) (UUID, error) {
- var uuid UUID
- switch len(s) {
- // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- case 36:
-
- // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- case 36 + 9:
- if !strings.EqualFold(s[:9], "urn:uuid:") {
- return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
- }
- s = s[9:]
-
- // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
- case 36 + 2:
- s = s[1:]
-
- // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
- case 32:
- var ok bool
- for i := range uuid {
- uuid[i], ok = xtob(s[i*2], s[i*2+1])
- if !ok {
- return uuid, errors.New("invalid UUID format")
- }
- }
- return uuid, nil
- default:
- return uuid, invalidLengthError{len(s)}
- }
- // s is now at least 36 bytes long
- // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
- return uuid, errors.New("invalid UUID format")
- }
- for i, x := range [16]int{
- 0, 2, 4, 6,
- 9, 11,
- 14, 16,
- 19, 21,
- 24, 26, 28, 30, 32, 34,
- } {
- v, ok := xtob(s[x], s[x+1])
- if !ok {
- return uuid, errors.New("invalid UUID format")
- }
- uuid[i] = v
- }
- return uuid, nil
-}
-
-// ParseBytes is like Parse, except it parses a byte slice instead of a string.
-func ParseBytes(b []byte) (UUID, error) {
- var uuid UUID
- switch len(b) {
- case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) {
- return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
- }
- b = b[9:]
- case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
- b = b[1:]
- case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
- var ok bool
- for i := 0; i < 32; i += 2 {
- uuid[i/2], ok = xtob(b[i], b[i+1])
- if !ok {
- return uuid, errors.New("invalid UUID format")
- }
- }
- return uuid, nil
- default:
- return uuid, invalidLengthError{len(b)}
- }
- // s is now at least 36 bytes long
- // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
- return uuid, errors.New("invalid UUID format")
- }
- for i, x := range [16]int{
- 0, 2, 4, 6,
- 9, 11,
- 14, 16,
- 19, 21,
- 24, 26, 28, 30, 32, 34,
- } {
- v, ok := xtob(b[x], b[x+1])
- if !ok {
- return uuid, errors.New("invalid UUID format")
- }
- uuid[i] = v
- }
- return uuid, nil
-}
-
-// MustParse is like Parse but panics if the string cannot be parsed.
-// It simplifies safe initialization of global variables holding compiled UUIDs.
-func MustParse(s string) UUID {
- uuid, err := Parse(s)
- if err != nil {
- panic(`uuid: Parse(` + s + `): ` + err.Error())
- }
- return uuid
-}
-
-// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
-// does not have a length of 16. The bytes are copied from the slice.
-func FromBytes(b []byte) (uuid UUID, err error) {
- err = uuid.UnmarshalBinary(b)
- return uuid, err
-}
-
-// Must returns uuid if err is nil and panics otherwise.
-func Must(uuid UUID, err error) UUID {
- if err != nil {
- panic(err)
- }
- return uuid
-}
-
-// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
-// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
-// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
-// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
-// It returns an error if the format is invalid, otherwise nil.
-func Validate(s string) error {
- switch len(s) {
- // Standard UUID format
- case 36:
-
- // UUID with "urn:uuid:" prefix
- case 36 + 9:
- if !strings.EqualFold(s[:9], "urn:uuid:") {
- return fmt.Errorf("invalid urn prefix: %q", s[:9])
- }
- s = s[9:]
-
- // UUID enclosed in braces
- case 36 + 2:
- if s[0] != '{' || s[len(s)-1] != '}' {
- return fmt.Errorf("invalid bracketed UUID format")
- }
- s = s[1 : len(s)-1]
-
- // UUID without hyphens
- case 32:
- for i := 0; i < len(s); i += 2 {
- _, ok := xtob(s[i], s[i+1])
- if !ok {
- return errors.New("invalid UUID format")
- }
- }
-
- default:
- return invalidLengthError{len(s)}
- }
-
- // Check for standard UUID format
- if len(s) == 36 {
- if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
- return errors.New("invalid UUID format")
- }
- for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} {
- if _, ok := xtob(s[x], s[x+1]); !ok {
- return errors.New("invalid UUID format")
- }
- }
- }
-
- return nil
-}
-
-// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
-// , or "" if uuid is invalid.
-func (uuid UUID) String() string {
- var buf [36]byte
- encodeHex(buf[:], uuid)
- return string(buf[:])
-}
-
-// URN returns the RFC 2141 URN form of uuid,
-// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
-func (uuid UUID) URN() string {
- var buf [36 + 9]byte
- copy(buf[:], "urn:uuid:")
- encodeHex(buf[9:], uuid)
- return string(buf[:])
-}
-
-func encodeHex(dst []byte, uuid UUID) {
- hex.Encode(dst, uuid[:4])
- dst[8] = '-'
- hex.Encode(dst[9:13], uuid[4:6])
- dst[13] = '-'
- hex.Encode(dst[14:18], uuid[6:8])
- dst[18] = '-'
- hex.Encode(dst[19:23], uuid[8:10])
- dst[23] = '-'
- hex.Encode(dst[24:], uuid[10:])
-}
-
-// Variant returns the variant encoded in uuid.
-func (uuid UUID) Variant() Variant {
- switch {
- case (uuid[8] & 0xc0) == 0x80:
- return RFC4122
- case (uuid[8] & 0xe0) == 0xc0:
- return Microsoft
- case (uuid[8] & 0xe0) == 0xe0:
- return Future
- default:
- return Reserved
- }
-}
-
-// Version returns the version of uuid.
-func (uuid UUID) Version() Version {
- return Version(uuid[6] >> 4)
-}
-
-func (v Version) String() string {
- if v > 15 {
- return fmt.Sprintf("BAD_VERSION_%d", v)
- }
- return fmt.Sprintf("VERSION_%d", v)
-}
-
-func (v Variant) String() string {
- switch v {
- case RFC4122:
- return "RFC4122"
- case Reserved:
- return "Reserved"
- case Microsoft:
- return "Microsoft"
- case Future:
- return "Future"
- case Invalid:
- return "Invalid"
- }
- return fmt.Sprintf("BadVariant%d", int(v))
-}
-
-// SetRand sets the random number generator to r, which implements io.Reader.
-// If r.Read returns an error when the package requests random data then
-// a panic will be issued.
-//
-// Calling SetRand with nil sets the random number generator to the default
-// generator.
-func SetRand(r io.Reader) {
- if r == nil {
- rander = rand.Reader
- return
- }
- rander = r
-}
-
-// EnableRandPool enables internal randomness pool used for Random
-// (Version 4) UUID generation. The pool contains random bytes read from
-// the random number generator on demand in batches. Enabling the pool
-// may improve the UUID generation throughput significantly.
-//
-// Since the pool is stored on the Go heap, this feature may be a bad fit
-// for security sensitive applications.
-//
-// Both EnableRandPool and DisableRandPool are not thread-safe and should
-// only be called when there is no possibility that New or any other
-// UUID Version 4 generation function will be called concurrently.
-func EnableRandPool() {
- poolEnabled = true
-}
-
-// DisableRandPool disables the randomness pool if it was previously
-// enabled with EnableRandPool.
-//
-// Both EnableRandPool and DisableRandPool are not thread-safe and should
-// only be called when there is no possibility that New or any other
-// UUID Version 4 generation function will be called concurrently.
-func DisableRandPool() {
- poolEnabled = false
- defer poolMu.Unlock()
- poolMu.Lock()
- poolPos = randPoolSize
-}
-
-// UUIDs is a slice of UUID types.
-type UUIDs []UUID
-
-// Strings returns a string slice containing the string form of each UUID in uuids.
-func (uuids UUIDs) Strings() []string {
- var uuidStrs = make([]string, len(uuids))
- for i, uuid := range uuids {
- uuidStrs[i] = uuid.String()
- }
- return uuidStrs
-}
diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go
deleted file mode 100644
index 46310962..00000000
--- a/vendor/github.com/google/uuid/version1.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2016 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import (
- "encoding/binary"
-)
-
-// NewUUID returns a Version 1 UUID based on the current NodeID and clock
-// sequence, and the current time. If the NodeID has not been set by SetNodeID
-// or SetNodeInterface then it will be set automatically. If the NodeID cannot
-// be set NewUUID returns nil. If clock sequence has not been set by
-// SetClockSequence then it will be set automatically. If GetTime fails to
-// return the current NewUUID returns nil and an error.
-//
-// In most cases, New should be used.
-func NewUUID() (UUID, error) {
- var uuid UUID
- now, seq, err := GetTime()
- if err != nil {
- return uuid, err
- }
-
- timeLow := uint32(now & 0xffffffff)
- timeMid := uint16((now >> 32) & 0xffff)
- timeHi := uint16((now >> 48) & 0x0fff)
- timeHi |= 0x1000 // Version 1
-
- binary.BigEndian.PutUint32(uuid[0:], timeLow)
- binary.BigEndian.PutUint16(uuid[4:], timeMid)
- binary.BigEndian.PutUint16(uuid[6:], timeHi)
- binary.BigEndian.PutUint16(uuid[8:], seq)
-
- nodeMu.Lock()
- if nodeID == zeroID {
- setNodeInterface("")
- }
- copy(uuid[10:], nodeID[:])
- nodeMu.Unlock()
-
- return uuid, nil
-}
diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go
deleted file mode 100644
index 7697802e..00000000
--- a/vendor/github.com/google/uuid/version4.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2016 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import "io"
-
-// New creates a new random UUID or panics. New is equivalent to
-// the expression
-//
-// uuid.Must(uuid.NewRandom())
-func New() UUID {
- return Must(NewRandom())
-}
-
-// NewString creates a new random UUID and returns it as a string or panics.
-// NewString is equivalent to the expression
-//
-// uuid.New().String()
-func NewString() string {
- return Must(NewRandom()).String()
-}
-
-// NewRandom returns a Random (Version 4) UUID.
-//
-// The strength of the UUIDs is based on the strength of the crypto/rand
-// package.
-//
-// Uses the randomness pool if it was enabled with EnableRandPool.
-//
-// A note about uniqueness derived from the UUID Wikipedia entry:
-//
-// Randomly generated UUIDs have 122 random bits. One's annual risk of being
-// hit by a meteorite is estimated to be one chance in 17 billion, that
-// means the probability is about 0.00000000006 (6 × 10−11),
-// equivalent to the odds of creating a few tens of trillions of UUIDs in a
-// year and having one duplicate.
-func NewRandom() (UUID, error) {
- if !poolEnabled {
- return NewRandomFromReader(rander)
- }
- return newRandomFromPool()
-}
-
-// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
-func NewRandomFromReader(r io.Reader) (UUID, error) {
- var uuid UUID
- _, err := io.ReadFull(r, uuid[:])
- if err != nil {
- return Nil, err
- }
- uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
- uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
- return uuid, nil
-}
-
-func newRandomFromPool() (UUID, error) {
- var uuid UUID
- poolMu.Lock()
- if poolPos == randPoolSize {
- _, err := io.ReadFull(rander, pool[:])
- if err != nil {
- poolMu.Unlock()
- return Nil, err
- }
- poolPos = 0
- }
- copy(uuid[:], pool[poolPos:(poolPos+16)])
- poolPos += 16
- poolMu.Unlock()
-
- uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
- uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
- return uuid, nil
-}
diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go
deleted file mode 100644
index 339a959a..00000000
--- a/vendor/github.com/google/uuid/version6.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2023 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import "encoding/binary"
-
-// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality.
-// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs.
-// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead.
-//
-// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6
-//
-// NewV6 returns a Version 6 UUID based on the current NodeID and clock
-// sequence, and the current time. If the NodeID has not been set by SetNodeID
-// or SetNodeInterface then it will be set automatically. If the NodeID cannot
-// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by
-// SetClockSequence then it will be set automatically. If GetTime fails to
-// return the current NewV6 returns Nil and an error.
-func NewV6() (UUID, error) {
- var uuid UUID
- now, seq, err := GetTime()
- if err != nil {
- return uuid, err
- }
-
- /*
- 0 1 2 3
- 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | time_high |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | time_mid | time_low_and_version |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- |clk_seq_hi_res | clk_seq_low | node (0-1) |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | node (2-5) |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- */
-
- binary.BigEndian.PutUint64(uuid[0:], uint64(now))
- binary.BigEndian.PutUint16(uuid[8:], seq)
-
- uuid[6] = 0x60 | (uuid[6] & 0x0F)
- uuid[8] = 0x80 | (uuid[8] & 0x3F)
-
- nodeMu.Lock()
- if nodeID == zeroID {
- setNodeInterface("")
- }
- copy(uuid[10:], nodeID[:])
- nodeMu.Unlock()
-
- return uuid, nil
-}
diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go
deleted file mode 100644
index 3167b643..00000000
--- a/vendor/github.com/google/uuid/version7.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2023 Google Inc. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package uuid
-
-import (
- "io"
-)
-
-// UUID version 7 features a time-ordered value field derived from the widely
-// implemented and well known Unix Epoch timestamp source,
-// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded.
-// As well as improved entropy characteristics over versions 1 or 6.
-//
-// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7
-//
-// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible.
-//
-// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch).
-// Uses the randomness pool if it was enabled with EnableRandPool.
-// On error, NewV7 returns Nil and an error
-func NewV7() (UUID, error) {
- uuid, err := NewRandom()
- if err != nil {
- return uuid, err
- }
- makeV7(uuid[:])
- return uuid, nil
-}
-
-// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch).
-// it use NewRandomFromReader fill random bits.
-// On error, NewV7FromReader returns Nil and an error.
-func NewV7FromReader(r io.Reader) (UUID, error) {
- uuid, err := NewRandomFromReader(r)
- if err != nil {
- return uuid, err
- }
-
- makeV7(uuid[:])
- return uuid, nil
-}
-
-// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
-// uuid[8] already has the right version number (Variant is 10)
-// see function NewV7 and NewV7FromReader
-func makeV7(uuid []byte) {
- /*
- 0 1 2 3
- 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | unix_ts_ms |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | unix_ts_ms | ver | rand_a (12 bit seq) |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- |var| rand_b |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- | rand_b |
- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- */
- _ = uuid[15] // bounds check
-
- t, s := getV7Time()
-
- uuid[0] = byte(t >> 40)
- uuid[1] = byte(t >> 32)
- uuid[2] = byte(t >> 24)
- uuid[3] = byte(t >> 16)
- uuid[4] = byte(t >> 8)
- uuid[5] = byte(t)
-
- uuid[6] = 0x70 | (0x0F & byte(s>>8))
- uuid[7] = byte(s)
-}
-
-// lastV7time is the last time we returned stored as:
-//
-// 52 bits of time in milliseconds since epoch
-// 12 bits of (fractional nanoseconds) >> 8
-var lastV7time int64
-
-const nanoPerMilli = 1000000
-
-// getV7Time returns the time in milliseconds and nanoseconds / 256.
-// The returned (milli << 12 + seq) is guarenteed to be greater than
-// (milli << 12 + seq) returned by any previous call to getV7Time.
-func getV7Time() (milli, seq int64) {
- timeMu.Lock()
- defer timeMu.Unlock()
-
- nano := timeNow().UnixNano()
- milli = nano / nanoPerMilli
- // Sequence number is between 0 and 3906 (nanoPerMilli>>8)
- seq = (nano - milli*nanoPerMilli) >> 8
- now := milli<<12 + seq
- if now <= lastV7time {
- now = lastV7time + 1
- milli = now >> 12
- seq = now & 0xfff
- }
- lastV7time = now
- return milli, seq
-}
diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE b/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE
deleted file mode 100644
index d6456956..00000000
--- a/vendor/github.com/googleapis/enterprise-certificate-proxy/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go
deleted file mode 100644
index ea5beb5a..00000000
--- a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright 2022 Google LLC.
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package client is a cross-platform client for the signer binary (a.k.a."EnterpriseCertSigner").
-//
-// The signer binary is OS-specific, but exposes a standard set of APIs for the client to use.
-package client
-
-import (
- "crypto"
- "crypto/ecdsa"
- "crypto/rsa"
- "crypto/x509"
- "encoding/gob"
- "errors"
- "fmt"
- "io"
- "net/rpc"
- "os"
- "os/exec"
-
- "github.com/googleapis/enterprise-certificate-proxy/client/util"
-)
-
-const signAPI = "EnterpriseCertSigner.Sign"
-const certificateChainAPI = "EnterpriseCertSigner.CertificateChain"
-const publicKeyAPI = "EnterpriseCertSigner.Public"
-const encryptAPI = "EnterpriseCertSigner.Encrypt"
-const decryptAPI = "EnterpriseCertSigner.Decrypt"
-
-// A Connection wraps a pair of unidirectional streams as an io.ReadWriteCloser.
-type Connection struct {
- io.ReadCloser
- io.WriteCloser
-}
-
-// Close closes c's underlying ReadCloser and WriteCloser.
-func (c *Connection) Close() error {
- rerr := c.ReadCloser.Close()
- werr := c.WriteCloser.Close()
- if rerr != nil {
- return rerr
- }
- return werr
-}
-
-func init() {
- gob.Register(crypto.SHA256)
- gob.Register(crypto.SHA384)
- gob.Register(crypto.SHA512)
- gob.Register(&rsa.PSSOptions{})
- gob.Register(&rsa.OAEPOptions{})
-}
-
-// SignArgs contains arguments for a Sign API call.
-type SignArgs struct {
- Digest []byte // The content to sign.
- Opts crypto.SignerOpts // Options for signing. Must implement HashFunc().
-}
-
-// EncryptArgs contains arguments for an Encrypt API call.
-type EncryptArgs struct {
- Plaintext []byte // The plaintext to encrypt.
- Opts any // Options for encryption. Ex: an instance of crypto.Hash.
-}
-
-// DecryptArgs contains arguments to for a Decrypt API call.
-type DecryptArgs struct {
- Ciphertext []byte // The ciphertext to decrypt.
- Opts crypto.DecrypterOpts // Options for decryption. Ex: an instance of *rsa.OAEPOptions.
-}
-
-// Key implements credential.Credential by holding the executed signer subprocess.
-type Key struct {
- cmd *exec.Cmd // Pointer to the signer subprocess.
- client *rpc.Client // Pointer to the rpc client that communicates with the signer subprocess.
- publicKey crypto.PublicKey // Public key of loaded certificate.
- chain [][]byte // Certificate chain of loaded certificate.
-}
-
-// CertificateChain returns the credential as a raw X509 cert chain. This contains the public key.
-func (k *Key) CertificateChain() [][]byte {
- return k.chain
-}
-
-// Close closes the RPC connection and kills the signer subprocess.
-// Call this to free up resources when the Key object is no longer needed.
-func (k *Key) Close() error {
- if err := k.cmd.Process.Kill(); err != nil {
- return fmt.Errorf("failed to kill signer process: %w", err)
- }
- // Wait for cmd to exit and release resources. Since the process is forcefully killed, this
- // will return a non-nil error (varies by OS), which we will ignore.
- _ = k.cmd.Wait()
- // The Pipes connecting the RPC client should have been closed when the signer subprocess was killed.
- // Calling `k.client.Close()` before `k.cmd.Process.Kill()` or `k.cmd.Wait()` _will_ cause a segfault.
- if err := k.client.Close(); err.Error() != "close |0: file already closed" {
- return fmt.Errorf("failed to close RPC connection: %w", err)
- }
- return nil
-}
-
-// Public returns the public key for this Key.
-func (k *Key) Public() crypto.PublicKey {
- return k.publicKey
-}
-
-// Sign signs a message digest, using the specified signer opts. Implements crypto.Signer interface.
-func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed []byte, err error) {
- if opts != nil && opts.HashFunc() != 0 && len(digest) != opts.HashFunc().Size() {
- return nil, fmt.Errorf("Digest length of %v bytes does not match Hash function size of %v bytes", len(digest), opts.HashFunc().Size())
- }
- err = k.client.Call(signAPI, SignArgs{Digest: digest, Opts: opts}, &signed)
- return
-}
-
-// Encrypt encrypts a plaintext msg into ciphertext, using the specified encrypt opts.
-func (k *Key) Encrypt(_ io.Reader, msg []byte, opts any) (ciphertext []byte, err error) {
- err = k.client.Call(encryptAPI, EncryptArgs{Plaintext: msg, Opts: opts}, &ciphertext)
- return
-}
-
-// Decrypt decrypts a ciphertext msg into plaintext, using the specified decrypter opts. Implements crypto.Decrypter interface.
-func (k *Key) Decrypt(_ io.Reader, msg []byte, opts crypto.DecrypterOpts) (plaintext []byte, err error) {
- err = k.client.Call(decryptAPI, DecryptArgs{Ciphertext: msg, Opts: opts}, &plaintext)
- return
-}
-
-// ErrCredUnavailable is a sentinel error that indicates ECP Cred is unavailable,
-// possibly due to missing config or missing binary path.
-var ErrCredUnavailable = errors.New("Cred is unavailable")
-
-// Cred spawns a signer subprocess that listens on stdin/stdout to perform certificate
-// related operations, including signing messages with the private key.
-//
-// The signer binary path is read from the specified configFilePath, if provided.
-// Otherwise, use the default config file path.
-//
-// The config file also specifies which certificate the signer should use.
-func Cred(configFilePath string) (*Key, error) {
- if configFilePath == "" {
- envFilePath := util.GetConfigFilePathFromEnv()
- if envFilePath != "" {
- configFilePath = envFilePath
- } else {
- configFilePath = util.GetDefaultConfigFilePath()
- }
- }
- enterpriseCertSignerPath, err := util.LoadSignerBinaryPath(configFilePath)
- if err != nil {
- if errors.Is(err, util.ErrConfigUnavailable) {
- return nil, ErrCredUnavailable
- }
- return nil, err
- }
- k := &Key{
- cmd: exec.Command(enterpriseCertSignerPath, configFilePath),
- }
-
- // Redirect errors from subprocess to parent process.
- k.cmd.Stderr = os.Stderr
-
- // RPC client will communicate with subprocess over stdin/stdout.
- kin, err := k.cmd.StdinPipe()
- if err != nil {
- return nil, err
- }
- kout, err := k.cmd.StdoutPipe()
- if err != nil {
- return nil, err
- }
- k.client = rpc.NewClient(&Connection{kout, kin})
-
- if err := k.cmd.Start(); err != nil {
- return nil, fmt.Errorf("starting enterprise cert signer subprocess: %w", err)
- }
-
- if err := k.client.Call(certificateChainAPI, struct{}{}, &k.chain); err != nil {
- return nil, fmt.Errorf("failed to retrieve certificate chain: %w", err)
- }
-
- var publicKeyBytes []byte
- if err := k.client.Call(publicKeyAPI, struct{}{}, &publicKeyBytes); err != nil {
- return nil, fmt.Errorf("failed to retrieve public key: %w", err)
- }
-
- publicKey, err := x509.ParsePKIXPublicKey(publicKeyBytes)
- if err != nil {
- return nil, fmt.Errorf("failed to parse public key: %w", err)
- }
-
- var ok bool
- k.publicKey, ok = publicKey.(crypto.PublicKey)
- if !ok {
- return nil, fmt.Errorf("invalid public key type: %T", publicKey)
- }
-
- switch pub := k.publicKey.(type) {
- case *rsa.PublicKey:
- if pub.Size() < 256 {
- return nil, fmt.Errorf("RSA modulus size is less than 2048 bits: %v", pub.Size()*8)
- }
- case *ecdsa.PublicKey:
- default:
- return nil, fmt.Errorf("unsupported public key type: %v", pub)
- }
-
- return k, nil
-}
diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go
deleted file mode 100644
index f374a7f5..00000000
--- a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2022 Google LLC.
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package util provides helper functions for the client.
-package util
-
-import (
- "encoding/json"
- "errors"
- "io"
- "os"
- "os/user"
- "path/filepath"
- "runtime"
- "strings"
-)
-
-const configFileName = "certificate_config.json"
-
-// EnterpriseCertificateConfig contains parameters for initializing signer.
-type EnterpriseCertificateConfig struct {
- Libs Libs `json:"libs"`
-}
-
-// Libs specifies the locations of helper libraries.
-type Libs struct {
- ECP string `json:"ecp"`
-}
-
-// ErrConfigUnavailable is a sentinel error that indicates ECP config is unavailable,
-// possibly due to entire config missing or missing binary path.
-var ErrConfigUnavailable = errors.New("Config is unavailable")
-
-// LoadSignerBinaryPath retrieves the path of the signer binary from the config file.
-func LoadSignerBinaryPath(configFilePath string) (path string, err error) {
- jsonFile, err := os.Open(configFilePath)
- if err != nil {
- if errors.Is(err, os.ErrNotExist) {
- return "", ErrConfigUnavailable
- }
- return "", err
- }
-
- byteValue, err := io.ReadAll(jsonFile)
- if err != nil {
- return "", err
- }
- var config EnterpriseCertificateConfig
- err = json.Unmarshal(byteValue, &config)
- if err != nil {
- return "", err
- }
- signerBinaryPath := config.Libs.ECP
- if signerBinaryPath == "" {
- return "", ErrConfigUnavailable
- }
-
- signerBinaryPath = strings.ReplaceAll(signerBinaryPath, "~", guessHomeDir())
- signerBinaryPath = strings.ReplaceAll(signerBinaryPath, "$HOME", guessHomeDir())
- return signerBinaryPath, nil
-}
-
-func guessHomeDir() string {
- // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470
- if v := os.Getenv("HOME"); v != "" {
- return v
- }
- // Else, fall back to user.Current:
- if u, err := user.Current(); err == nil {
- return u.HomeDir
- }
- return ""
-}
-
-func getDefaultConfigFileDirectory() (directory string) {
- if runtime.GOOS == "windows" {
- return filepath.Join(os.Getenv("APPDATA"), "gcloud")
- }
- return filepath.Join(guessHomeDir(), ".config/gcloud")
-}
-
-// GetDefaultConfigFilePath returns the default path of the enterprise certificate config file created by gCloud.
-func GetDefaultConfigFilePath() (path string) {
- return filepath.Join(getDefaultConfigFileDirectory(), configFileName)
-}
-
-// GetConfigFilePathFromEnv returns the path associated with environment variable GOOGLE_API_CERTIFICATE_CONFIG
-func GetConfigFilePathFromEnv() (path string) {
- return os.Getenv("GOOGLE_API_CERTIFICATE_CONFIG")
-}
diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json
deleted file mode 100644
index 433693a6..00000000
--- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json
+++ /dev/null
@@ -1,3 +0,0 @@
-{
- "v2": "2.12.5"
-}
diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md
deleted file mode 100644
index b64522df..00000000
--- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md
+++ /dev/null
@@ -1,142 +0,0 @@
-# Changelog
-
-## [2.12.5](https://github.com/googleapis/gax-go/compare/v2.12.4...v2.12.5) (2024-06-18)
-
-
-### Bug Fixes
-
-* **v2/apierror:** fix (*APIError).Error() for unwrapped Status ([#351](https://github.com/googleapis/gax-go/issues/351)) ([22c16e7](https://github.com/googleapis/gax-go/commit/22c16e7bff5402bdc4c25063771cdd01c650b500)), refs [#350](https://github.com/googleapis/gax-go/issues/350)
-
-## [2.12.4](https://github.com/googleapis/gax-go/compare/v2.12.3...v2.12.4) (2024-05-03)
-
-
-### Bug Fixes
-
-* provide unmarshal options for streams ([#343](https://github.com/googleapis/gax-go/issues/343)) ([ddf9a90](https://github.com/googleapis/gax-go/commit/ddf9a90bf180295d49875e15cb80b2136a49dbaf))
-
-## [2.12.3](https://github.com/googleapis/gax-go/compare/v2.12.2...v2.12.3) (2024-03-14)
-
-
-### Bug Fixes
-
-* bump protobuf dep to v1.33 ([#333](https://github.com/googleapis/gax-go/issues/333)) ([2892b22](https://github.com/googleapis/gax-go/commit/2892b22c1ae8a70dec3448d82e634643fe6c1be2))
-
-## [2.12.2](https://github.com/googleapis/gax-go/compare/v2.12.1...v2.12.2) (2024-02-23)
-
-
-### Bug Fixes
-
-* **v2/callctx:** fix SetHeader race by cloning header map ([#326](https://github.com/googleapis/gax-go/issues/326)) ([534311f](https://github.com/googleapis/gax-go/commit/534311f0f163d101f30657736c0e6f860e9c39dc))
-
-## [2.12.1](https://github.com/googleapis/gax-go/compare/v2.12.0...v2.12.1) (2024-02-13)
-
-
-### Bug Fixes
-
-* add XGoogFieldMaskHeader constant ([#321](https://github.com/googleapis/gax-go/issues/321)) ([666ee08](https://github.com/googleapis/gax-go/commit/666ee08931041b7fed56bed7132649785b2d3dfe))
-
-## [2.12.0](https://github.com/googleapis/gax-go/compare/v2.11.0...v2.12.0) (2023-06-26)
-
-
-### Features
-
-* **v2/callctx:** add new callctx package ([#291](https://github.com/googleapis/gax-go/issues/291)) ([11503ed](https://github.com/googleapis/gax-go/commit/11503ed98df4ae1bbdedf91ff64d47e63f187d68))
-* **v2:** add BuildHeaders and InsertMetadataIntoOutgoingContext to header ([#290](https://github.com/googleapis/gax-go/issues/290)) ([6a4b89f](https://github.com/googleapis/gax-go/commit/6a4b89f5551a40262e7c3caf2e1bdc7321b76ea1))
-
-## [2.11.0](https://github.com/googleapis/gax-go/compare/v2.10.0...v2.11.0) (2023-06-13)
-
-
-### Features
-
-* **v2:** add GoVersion package variable ([#283](https://github.com/googleapis/gax-go/issues/283)) ([26553cc](https://github.com/googleapis/gax-go/commit/26553ccadb4016b189881f52e6c253b68bb3e3d5))
-
-
-### Bug Fixes
-
-* **v2:** handle space in non-devel go version ([#288](https://github.com/googleapis/gax-go/issues/288)) ([fd7bca0](https://github.com/googleapis/gax-go/commit/fd7bca029a1c5e63def8f0a5fd1ec3f725d92f75))
-
-## [2.10.0](https://github.com/googleapis/gax-go/compare/v2.9.1...v2.10.0) (2023-05-30)
-
-
-### Features
-
-* update dependencies ([#280](https://github.com/googleapis/gax-go/issues/280)) ([4514281](https://github.com/googleapis/gax-go/commit/4514281058590f3637c36bfd49baa65c4d3cfb21))
-
-## [2.9.1](https://github.com/googleapis/gax-go/compare/v2.9.0...v2.9.1) (2023-05-23)
-
-
-### Bug Fixes
-
-* **v2:** drop cloud lro test dep ([#276](https://github.com/googleapis/gax-go/issues/276)) ([c67eeba](https://github.com/googleapis/gax-go/commit/c67eeba0f10a3294b1d93c1b8fbe40211a55ae5f)), refs [#270](https://github.com/googleapis/gax-go/issues/270)
-
-## [2.9.0](https://github.com/googleapis/gax-go/compare/v2.8.0...v2.9.0) (2023-05-22)
-
-
-### Features
-
-* **apierror:** add method to return HTTP status code conditionally ([#274](https://github.com/googleapis/gax-go/issues/274)) ([5874431](https://github.com/googleapis/gax-go/commit/587443169acd10f7f86d1989dc8aaf189e645e98)), refs [#229](https://github.com/googleapis/gax-go/issues/229)
-
-
-### Documentation
-
-* add ref to usage with clients ([#272](https://github.com/googleapis/gax-go/issues/272)) ([ea4d72d](https://github.com/googleapis/gax-go/commit/ea4d72d514beba4de450868b5fb028601a29164e)), refs [#228](https://github.com/googleapis/gax-go/issues/228)
-
-## [2.8.0](https://github.com/googleapis/gax-go/compare/v2.7.1...v2.8.0) (2023-03-15)
-
-
-### Features
-
-* **v2:** add WithTimeout option ([#259](https://github.com/googleapis/gax-go/issues/259)) ([9a8da43](https://github.com/googleapis/gax-go/commit/9a8da43693002448b1e8758023699387481866d1))
-
-## [2.7.1](https://github.com/googleapis/gax-go/compare/v2.7.0...v2.7.1) (2023-03-06)
-
-
-### Bug Fixes
-
-* **v2/apierror:** return Unknown GRPCStatus when err source is HTTP ([#260](https://github.com/googleapis/gax-go/issues/260)) ([043b734](https://github.com/googleapis/gax-go/commit/043b73437a240a91229207fb3ee52a9935a36f23)), refs [#254](https://github.com/googleapis/gax-go/issues/254)
-
-## [2.7.0](https://github.com/googleapis/gax-go/compare/v2.6.0...v2.7.0) (2022-11-02)
-
-
-### Features
-
-* update google.golang.org/api to latest ([#240](https://github.com/googleapis/gax-go/issues/240)) ([f690a02](https://github.com/googleapis/gax-go/commit/f690a02c806a2903bdee943ede3a58e3a331ebd6))
-* **v2/apierror:** add apierror.FromWrappingError ([#238](https://github.com/googleapis/gax-go/issues/238)) ([9dbd96d](https://github.com/googleapis/gax-go/commit/9dbd96d59b9d54ceb7c025513aa8c1a9d727382f))
-
-## [2.6.0](https://github.com/googleapis/gax-go/compare/v2.5.1...v2.6.0) (2022-10-13)
-
-
-### Features
-
-* **v2:** copy DetermineContentType functionality ([#230](https://github.com/googleapis/gax-go/issues/230)) ([2c52a70](https://github.com/googleapis/gax-go/commit/2c52a70bae965397f740ed27d46aabe89ff249b3))
-
-## [2.5.1](https://github.com/googleapis/gax-go/compare/v2.5.0...v2.5.1) (2022-08-04)
-
-
-### Bug Fixes
-
-* **v2:** resolve bad genproto pseudoversion in go.mod ([#218](https://github.com/googleapis/gax-go/issues/218)) ([1379b27](https://github.com/googleapis/gax-go/commit/1379b27e9846d959f7e1163b9ef298b3c92c8d23))
-
-## [2.5.0](https://github.com/googleapis/gax-go/compare/v2.4.0...v2.5.0) (2022-08-04)
-
-
-### Features
-
-* add ExtractProtoMessage to apierror ([#213](https://github.com/googleapis/gax-go/issues/213)) ([a6ce70c](https://github.com/googleapis/gax-go/commit/a6ce70c725c890533a9de6272d3b5ba2e336d6bb))
-
-## [2.4.0](https://github.com/googleapis/gax-go/compare/v2.3.0...v2.4.0) (2022-05-09)
-
-
-### Features
-
-* **v2:** add OnHTTPCodes CallOption ([#188](https://github.com/googleapis/gax-go/issues/188)) ([ba7c534](https://github.com/googleapis/gax-go/commit/ba7c5348363ab6c33e1cee3c03c0be68a46ca07c))
-
-
-### Bug Fixes
-
-* **v2/apierror:** use errors.As in FromError ([#189](https://github.com/googleapis/gax-go/issues/189)) ([f30f05b](https://github.com/googleapis/gax-go/commit/f30f05be583828f4c09cca4091333ea88ff8d79e))
-
-
-### Miscellaneous Chores
-
-* **v2:** bump release-please processing ([#192](https://github.com/googleapis/gax-go/issues/192)) ([56172f9](https://github.com/googleapis/gax-go/commit/56172f971d1141d7687edaac053ad3470af76719))
diff --git a/vendor/github.com/googleapis/gax-go/v2/LICENSE b/vendor/github.com/googleapis/gax-go/v2/LICENSE
deleted file mode 100644
index 6d16b657..00000000
--- a/vendor/github.com/googleapis/gax-go/v2/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright 2016, Google Inc.
-All rights reserved.
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go
deleted file mode 100644
index 7de60773..00000000
--- a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go
+++ /dev/null
@@ -1,363 +0,0 @@
-// Copyright 2021, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Package apierror implements a wrapper error for parsing error details from
-// API calls. Both HTTP & gRPC status errors are supported.
-//
-// For examples of how to use [APIError] with client libraries please reference
-// [Inspecting errors](https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors)
-// in the client library documentation.
-package apierror
-
-import (
- "errors"
- "fmt"
- "strings"
-
- jsonerror "github.com/googleapis/gax-go/v2/apierror/internal/proto"
- "google.golang.org/api/googleapi"
- "google.golang.org/genproto/googleapis/rpc/errdetails"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
- "google.golang.org/protobuf/encoding/protojson"
- "google.golang.org/protobuf/proto"
-)
-
-// ErrDetails holds the google/rpc/error_details.proto messages.
-type ErrDetails struct {
- ErrorInfo *errdetails.ErrorInfo
- BadRequest *errdetails.BadRequest
- PreconditionFailure *errdetails.PreconditionFailure
- QuotaFailure *errdetails.QuotaFailure
- RetryInfo *errdetails.RetryInfo
- ResourceInfo *errdetails.ResourceInfo
- RequestInfo *errdetails.RequestInfo
- DebugInfo *errdetails.DebugInfo
- Help *errdetails.Help
- LocalizedMessage *errdetails.LocalizedMessage
-
- // Unknown stores unidentifiable error details.
- Unknown []interface{}
-}
-
-// ErrMessageNotFound is used to signal ExtractProtoMessage found no matching messages.
-var ErrMessageNotFound = errors.New("message not found")
-
-// ExtractProtoMessage provides a mechanism for extracting protobuf messages from the
-// Unknown error details. If ExtractProtoMessage finds an unknown message of the same type,
-// the content of the message is copied to the provided message.
-//
-// ExtractProtoMessage will return ErrMessageNotFound if there are no message matching the
-// protocol buffer type of the provided message.
-func (e ErrDetails) ExtractProtoMessage(v proto.Message) error {
- if v == nil {
- return ErrMessageNotFound
- }
- for _, elem := range e.Unknown {
- if elemProto, ok := elem.(proto.Message); ok {
- if v.ProtoReflect().Type() == elemProto.ProtoReflect().Type() {
- proto.Merge(v, elemProto)
- return nil
- }
- }
- }
- return ErrMessageNotFound
-}
-
-func (e ErrDetails) String() string {
- var d strings.Builder
- if e.ErrorInfo != nil {
- d.WriteString(fmt.Sprintf("error details: name = ErrorInfo reason = %s domain = %s metadata = %s\n",
- e.ErrorInfo.GetReason(), e.ErrorInfo.GetDomain(), e.ErrorInfo.GetMetadata()))
- }
-
- if e.BadRequest != nil {
- v := e.BadRequest.GetFieldViolations()
- var f []string
- var desc []string
- for _, x := range v {
- f = append(f, x.GetField())
- desc = append(desc, x.GetDescription())
- }
- d.WriteString(fmt.Sprintf("error details: name = BadRequest field = %s desc = %s\n",
- strings.Join(f, " "), strings.Join(desc, " ")))
- }
-
- if e.PreconditionFailure != nil {
- v := e.PreconditionFailure.GetViolations()
- var t []string
- var s []string
- var desc []string
- for _, x := range v {
- t = append(t, x.GetType())
- s = append(s, x.GetSubject())
- desc = append(desc, x.GetDescription())
- }
- d.WriteString(fmt.Sprintf("error details: name = PreconditionFailure type = %s subj = %s desc = %s\n", strings.Join(t, " "),
- strings.Join(s, " "), strings.Join(desc, " ")))
- }
-
- if e.QuotaFailure != nil {
- v := e.QuotaFailure.GetViolations()
- var s []string
- var desc []string
- for _, x := range v {
- s = append(s, x.GetSubject())
- desc = append(desc, x.GetDescription())
- }
- d.WriteString(fmt.Sprintf("error details: name = QuotaFailure subj = %s desc = %s\n",
- strings.Join(s, " "), strings.Join(desc, " ")))
- }
-
- if e.RequestInfo != nil {
- d.WriteString(fmt.Sprintf("error details: name = RequestInfo id = %s data = %s\n",
- e.RequestInfo.GetRequestId(), e.RequestInfo.GetServingData()))
- }
-
- if e.ResourceInfo != nil {
- d.WriteString(fmt.Sprintf("error details: name = ResourceInfo type = %s resourcename = %s owner = %s desc = %s\n",
- e.ResourceInfo.GetResourceType(), e.ResourceInfo.GetResourceName(),
- e.ResourceInfo.GetOwner(), e.ResourceInfo.GetDescription()))
-
- }
- if e.RetryInfo != nil {
- d.WriteString(fmt.Sprintf("error details: retry in %s\n", e.RetryInfo.GetRetryDelay().AsDuration()))
-
- }
- if e.Unknown != nil {
- var s []string
- for _, x := range e.Unknown {
- s = append(s, fmt.Sprintf("%v", x))
- }
- d.WriteString(fmt.Sprintf("error details: name = Unknown desc = %s\n", strings.Join(s, " ")))
- }
-
- if e.DebugInfo != nil {
- d.WriteString(fmt.Sprintf("error details: name = DebugInfo detail = %s stack = %s\n", e.DebugInfo.GetDetail(),
- strings.Join(e.DebugInfo.GetStackEntries(), " ")))
- }
- if e.Help != nil {
- var desc []string
- var url []string
- for _, x := range e.Help.Links {
- desc = append(desc, x.GetDescription())
- url = append(url, x.GetUrl())
- }
- d.WriteString(fmt.Sprintf("error details: name = Help desc = %s url = %s\n",
- strings.Join(desc, " "), strings.Join(url, " ")))
- }
- if e.LocalizedMessage != nil {
- d.WriteString(fmt.Sprintf("error details: name = LocalizedMessage locale = %s msg = %s\n",
- e.LocalizedMessage.GetLocale(), e.LocalizedMessage.GetMessage()))
- }
-
- return d.String()
-}
-
-// APIError wraps either a gRPC Status error or a HTTP googleapi.Error. It
-// implements error and Status interfaces.
-type APIError struct {
- err error
- status *status.Status
- httpErr *googleapi.Error
- details ErrDetails
-}
-
-// Details presents the error details of the APIError.
-func (a *APIError) Details() ErrDetails {
- return a.details
-}
-
-// Unwrap extracts the original error.
-func (a *APIError) Unwrap() error {
- return a.err
-}
-
-// Error returns a readable representation of the APIError.
-func (a *APIError) Error() string {
- var msg string
- if a.httpErr != nil {
- // Truncate the googleapi.Error message because it dumps the Details in
- // an ugly way.
- msg = fmt.Sprintf("googleapi: Error %d: %s", a.httpErr.Code, a.httpErr.Message)
- } else if a.status != nil && a.err != nil {
- msg = a.err.Error()
- } else if a.status != nil {
- msg = a.status.Message()
- }
- return strings.TrimSpace(fmt.Sprintf("%s\n%s", msg, a.details))
-}
-
-// GRPCStatus extracts the underlying gRPC Status error.
-// This method is necessary to fulfill the interface
-// described in https://pkg.go.dev/google.golang.org/grpc/status#FromError.
-func (a *APIError) GRPCStatus() *status.Status {
- return a.status
-}
-
-// Reason returns the reason in an ErrorInfo.
-// If ErrorInfo is nil, it returns an empty string.
-func (a *APIError) Reason() string {
- return a.details.ErrorInfo.GetReason()
-}
-
-// Domain returns the domain in an ErrorInfo.
-// If ErrorInfo is nil, it returns an empty string.
-func (a *APIError) Domain() string {
- return a.details.ErrorInfo.GetDomain()
-}
-
-// Metadata returns the metadata in an ErrorInfo.
-// If ErrorInfo is nil, it returns nil.
-func (a *APIError) Metadata() map[string]string {
- return a.details.ErrorInfo.GetMetadata()
-
-}
-
-// setDetailsFromError parses a Status error or a googleapi.Error
-// and sets status and details or httpErr and details, respectively.
-// It returns false if neither Status nor googleapi.Error can be parsed.
-// When err is a googleapi.Error, the status of the returned error will
-// be set to an Unknown error, rather than nil, since a nil code is
-// interpreted as OK in the gRPC status package.
-func (a *APIError) setDetailsFromError(err error) bool {
- st, isStatus := status.FromError(err)
- var herr *googleapi.Error
- isHTTPErr := errors.As(err, &herr)
-
- switch {
- case isStatus:
- a.status = st
- a.details = parseDetails(st.Details())
- case isHTTPErr:
- a.httpErr = herr
- a.details = parseHTTPDetails(herr)
- a.status = status.New(codes.Unknown, herr.Message)
- default:
- return false
- }
- return true
-}
-
-// FromError parses a Status error or a googleapi.Error and builds an
-// APIError, wrapping the provided error in the new APIError. It
-// returns false if neither Status nor googleapi.Error can be parsed.
-func FromError(err error) (*APIError, bool) {
- return ParseError(err, true)
-}
-
-// ParseError parses a Status error or a googleapi.Error and builds an
-// APIError. If wrap is true, it wraps the error in the new APIError.
-// It returns false if neither Status nor googleapi.Error can be parsed.
-func ParseError(err error, wrap bool) (*APIError, bool) {
- if err == nil {
- return nil, false
- }
- ae := APIError{}
- if wrap {
- ae = APIError{err: err}
- }
- if !ae.setDetailsFromError(err) {
- return nil, false
- }
- return &ae, true
-}
-
-// parseDetails accepts a slice of interface{} that should be backed by some
-// sort of proto.Message that can be cast to the google/rpc/error_details.proto
-// types.
-//
-// This is for internal use only.
-func parseDetails(details []interface{}) ErrDetails {
- var ed ErrDetails
- for _, d := range details {
- switch d := d.(type) {
- case *errdetails.ErrorInfo:
- ed.ErrorInfo = d
- case *errdetails.BadRequest:
- ed.BadRequest = d
- case *errdetails.PreconditionFailure:
- ed.PreconditionFailure = d
- case *errdetails.QuotaFailure:
- ed.QuotaFailure = d
- case *errdetails.RetryInfo:
- ed.RetryInfo = d
- case *errdetails.ResourceInfo:
- ed.ResourceInfo = d
- case *errdetails.RequestInfo:
- ed.RequestInfo = d
- case *errdetails.DebugInfo:
- ed.DebugInfo = d
- case *errdetails.Help:
- ed.Help = d
- case *errdetails.LocalizedMessage:
- ed.LocalizedMessage = d
- default:
- ed.Unknown = append(ed.Unknown, d)
- }
- }
-
- return ed
-}
-
-// parseHTTPDetails will convert the given googleapi.Error into the protobuf
-// representation then parse the Any values that contain the error details.
-//
-// This is for internal use only.
-func parseHTTPDetails(gae *googleapi.Error) ErrDetails {
- e := &jsonerror.Error{}
- if err := protojson.Unmarshal([]byte(gae.Body), e); err != nil {
- // If the error body does not conform to the error schema, ignore it
- // altogther. See https://cloud.google.com/apis/design/errors#http_mapping.
- return ErrDetails{}
- }
-
- // Coerce the Any messages into proto.Message then parse the details.
- details := []interface{}{}
- for _, any := range e.GetError().GetDetails() {
- m, err := any.UnmarshalNew()
- if err != nil {
- // Ignore malformed Any values.
- continue
- }
- details = append(details, m)
- }
-
- return parseDetails(details)
-}
-
-// HTTPCode returns the underlying HTTP response status code. This method returns
-// `-1` if the underlying error is a [google.golang.org/grpc/status.Status]. To
-// check gRPC error codes use [google.golang.org/grpc/status.Code].
-func (a *APIError) HTTPCode() int {
- if a.httpErr == nil {
- return -1
- }
- return a.httpErr.Code
-}
diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md
deleted file mode 100644
index 9ff0caea..00000000
--- a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/README.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# HTTP JSON Error Schema
-
-The `error.proto` represents the HTTP-JSON schema used by Google APIs to convey
-error payloads as described by https://cloud.google.com/apis/design/errors#http_mapping.
-This package is for internal parsing logic only and should not be used in any
-other context.
-
-## Regeneration
-
-To regenerate the protobuf Go code you will need the following:
-
-* A local copy of [googleapis], the absolute path to which should be exported to
-the environment variable `GOOGLEAPIS`
-* The protobuf compiler [protoc]
-* The Go [protobuf plugin]
-* The [goimports] tool
-
-From this directory run the following command:
-```sh
-protoc -I $GOOGLEAPIS -I. --go_out=. --go_opt=module=github.com/googleapis/gax-go/v2/apierror/internal/proto error.proto
-goimports -w .
-```
-
-Note: the `module` plugin option ensures the generated code is placed in this
-directory, and not in several nested directories defined by `go_package` option.
-
-[googleapis]: https://github.com/googleapis/googleapis
-[protoc]: https://github.com/protocolbuffers/protobuf#protocol-compiler-installation
-[protobuf plugin]: https://developers.google.com/protocol-buffers/docs/reference/go-generated
-[goimports]: https://pkg.go.dev/golang.org/x/tools/cmd/goimports
\ No newline at end of file
diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go
deleted file mode 100644
index e4b03f16..00000000
--- a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go
+++ /dev/null
@@ -1,256 +0,0 @@
-// Copyright 2022 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.28.0
-// protoc v3.17.3
-// source: custom_error.proto
-
-package jsonerror
-
-import (
- reflect "reflect"
- sync "sync"
-
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// Error code for `CustomError`.
-type CustomError_CustomErrorCode int32
-
-const (
- // Default error.
- CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED CustomError_CustomErrorCode = 0
- // Too many foo.
- CustomError_TOO_MANY_FOO CustomError_CustomErrorCode = 1
- // Not enough foo.
- CustomError_NOT_ENOUGH_FOO CustomError_CustomErrorCode = 2
- // Catastrophic error.
- CustomError_UNIVERSE_WAS_DESTROYED CustomError_CustomErrorCode = 3
-)
-
-// Enum value maps for CustomError_CustomErrorCode.
-var (
- CustomError_CustomErrorCode_name = map[int32]string{
- 0: "CUSTOM_ERROR_CODE_UNSPECIFIED",
- 1: "TOO_MANY_FOO",
- 2: "NOT_ENOUGH_FOO",
- 3: "UNIVERSE_WAS_DESTROYED",
- }
- CustomError_CustomErrorCode_value = map[string]int32{
- "CUSTOM_ERROR_CODE_UNSPECIFIED": 0,
- "TOO_MANY_FOO": 1,
- "NOT_ENOUGH_FOO": 2,
- "UNIVERSE_WAS_DESTROYED": 3,
- }
-)
-
-func (x CustomError_CustomErrorCode) Enum() *CustomError_CustomErrorCode {
- p := new(CustomError_CustomErrorCode)
- *p = x
- return p
-}
-
-func (x CustomError_CustomErrorCode) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (CustomError_CustomErrorCode) Descriptor() protoreflect.EnumDescriptor {
- return file_custom_error_proto_enumTypes[0].Descriptor()
-}
-
-func (CustomError_CustomErrorCode) Type() protoreflect.EnumType {
- return &file_custom_error_proto_enumTypes[0]
-}
-
-func (x CustomError_CustomErrorCode) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use CustomError_CustomErrorCode.Descriptor instead.
-func (CustomError_CustomErrorCode) EnumDescriptor() ([]byte, []int) {
- return file_custom_error_proto_rawDescGZIP(), []int{0, 0}
-}
-
-// CustomError is an example of a custom error message which may be included
-// in an rpc status. It is not meant to reflect a standard error.
-type CustomError struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Error code specific to the custom API being invoked.
- Code CustomError_CustomErrorCode `protobuf:"varint,1,opt,name=code,proto3,enum=error.CustomError_CustomErrorCode" json:"code,omitempty"`
- // Name of the failed entity.
- Entity string `protobuf:"bytes,2,opt,name=entity,proto3" json:"entity,omitempty"`
- // Message that describes the error.
- ErrorMessage string `protobuf:"bytes,3,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
-}
-
-func (x *CustomError) Reset() {
- *x = CustomError{}
- if protoimpl.UnsafeEnabled {
- mi := &file_custom_error_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CustomError) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CustomError) ProtoMessage() {}
-
-func (x *CustomError) ProtoReflect() protoreflect.Message {
- mi := &file_custom_error_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CustomError.ProtoReflect.Descriptor instead.
-func (*CustomError) Descriptor() ([]byte, []int) {
- return file_custom_error_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *CustomError) GetCode() CustomError_CustomErrorCode {
- if x != nil {
- return x.Code
- }
- return CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED
-}
-
-func (x *CustomError) GetEntity() string {
- if x != nil {
- return x.Entity
- }
- return ""
-}
-
-func (x *CustomError) GetErrorMessage() string {
- if x != nil {
- return x.ErrorMessage
- }
- return ""
-}
-
-var File_custom_error_proto protoreflect.FileDescriptor
-
-var file_custom_error_proto_rawDesc = []byte{
- 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xfa, 0x01, 0x0a, 0x0b,
- 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x04, 0x63,
- 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x72, 0x72, 0x6f,
- 0x72, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x43, 0x75,
- 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63,
- 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x65,
- 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
- 0x22, 0x76, 0x0a, 0x0f, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43,
- 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x45, 0x52,
- 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
- 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x4f, 0x4f, 0x5f, 0x4d, 0x41,
- 0x4e, 0x59, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4e, 0x4f, 0x54, 0x5f,
- 0x45, 0x4e, 0x4f, 0x55, 0x47, 0x48, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16,
- 0x55, 0x4e, 0x49, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x57, 0x41, 0x53, 0x5f, 0x44, 0x45, 0x53,
- 0x54, 0x52, 0x4f, 0x59, 0x45, 0x44, 0x10, 0x03, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68,
- 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2f, 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65,
- 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_custom_error_proto_rawDescOnce sync.Once
- file_custom_error_proto_rawDescData = file_custom_error_proto_rawDesc
-)
-
-func file_custom_error_proto_rawDescGZIP() []byte {
- file_custom_error_proto_rawDescOnce.Do(func() {
- file_custom_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_custom_error_proto_rawDescData)
- })
- return file_custom_error_proto_rawDescData
-}
-
-var file_custom_error_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_custom_error_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_custom_error_proto_goTypes = []interface{}{
- (CustomError_CustomErrorCode)(0), // 0: error.CustomError.CustomErrorCode
- (*CustomError)(nil), // 1: error.CustomError
-}
-var file_custom_error_proto_depIdxs = []int32{
- 0, // 0: error.CustomError.code:type_name -> error.CustomError.CustomErrorCode
- 1, // [1:1] is the sub-list for method output_type
- 1, // [1:1] is the sub-list for method input_type
- 1, // [1:1] is the sub-list for extension type_name
- 1, // [1:1] is the sub-list for extension extendee
- 0, // [0:1] is the sub-list for field type_name
-}
-
-func init() { file_custom_error_proto_init() }
-func file_custom_error_proto_init() {
- if File_custom_error_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_custom_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CustomError); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_custom_error_proto_rawDesc,
- NumEnums: 1,
- NumMessages: 1,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_custom_error_proto_goTypes,
- DependencyIndexes: file_custom_error_proto_depIdxs,
- EnumInfos: file_custom_error_proto_enumTypes,
- MessageInfos: file_custom_error_proto_msgTypes,
- }.Build()
- File_custom_error_proto = out.File
- file_custom_error_proto_rawDesc = nil
- file_custom_error_proto_goTypes = nil
- file_custom_error_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto
deleted file mode 100644
index 21678ae6..00000000
--- a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2022 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package error;
-
-option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror";
-
-
-// CustomError is an example of a custom error message which may be included
-// in an rpc status. It is not meant to reflect a standard error.
-message CustomError {
-
- // Error code for `CustomError`.
- enum CustomErrorCode {
- // Default error.
- CUSTOM_ERROR_CODE_UNSPECIFIED = 0;
-
- // Too many foo.
- TOO_MANY_FOO = 1;
-
- // Not enough foo.
- NOT_ENOUGH_FOO = 2;
-
- // Catastrophic error.
- UNIVERSE_WAS_DESTROYED = 3;
-
- }
-
- // Error code specific to the custom API being invoked.
- CustomErrorCode code = 1;
-
- // Name of the failed entity.
- string entity = 2;
-
- // Message that describes the error.
- string error_message = 3;
-}
diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go
deleted file mode 100644
index 7dd9b837..00000000
--- a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.pb.go
+++ /dev/null
@@ -1,280 +0,0 @@
-// Copyright 2021 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.28.0
-// protoc v3.15.8
-// source: apierror/internal/proto/error.proto
-
-package jsonerror
-
-import (
- reflect "reflect"
- sync "sync"
-
- code "google.golang.org/genproto/googleapis/rpc/code"
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- anypb "google.golang.org/protobuf/types/known/anypb"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// The error format v2 for Google JSON REST APIs.
-// Copied from https://cloud.google.com/apis/design/errors#http_mapping.
-//
-// NOTE: This schema is not used for other wire protocols.
-type Error struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The actual error payload. The nested message structure is for backward
- // compatibility with Google API client libraries. It also makes the error
- // more readable to developers.
- Error *Error_Status `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
-}
-
-func (x *Error) Reset() {
- *x = Error{}
- if protoimpl.UnsafeEnabled {
- mi := &file_apierror_internal_proto_error_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Error) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Error) ProtoMessage() {}
-
-func (x *Error) ProtoReflect() protoreflect.Message {
- mi := &file_apierror_internal_proto_error_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Error.ProtoReflect.Descriptor instead.
-func (*Error) Descriptor() ([]byte, []int) {
- return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *Error) GetError() *Error_Status {
- if x != nil {
- return x.Error
- }
- return nil
-}
-
-// This message has the same semantics as `google.rpc.Status`. It uses HTTP
-// status code instead of gRPC status code. It has an extra field `status`
-// for backward compatibility with Google API Client Libraries.
-type Error_Status struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The HTTP status code that corresponds to `google.rpc.Status.code`.
- Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
- // This corresponds to `google.rpc.Status.message`.
- Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
- // This is the enum version for `google.rpc.Status.code`.
- Status code.Code `protobuf:"varint,4,opt,name=status,proto3,enum=google.rpc.Code" json:"status,omitempty"`
- // This corresponds to `google.rpc.Status.details`.
- Details []*anypb.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"`
-}
-
-func (x *Error_Status) Reset() {
- *x = Error_Status{}
- if protoimpl.UnsafeEnabled {
- mi := &file_apierror_internal_proto_error_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Error_Status) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Error_Status) ProtoMessage() {}
-
-func (x *Error_Status) ProtoReflect() protoreflect.Message {
- mi := &file_apierror_internal_proto_error_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Error_Status.ProtoReflect.Descriptor instead.
-func (*Error_Status) Descriptor() ([]byte, []int) {
- return file_apierror_internal_proto_error_proto_rawDescGZIP(), []int{0, 0}
-}
-
-func (x *Error_Status) GetCode() int32 {
- if x != nil {
- return x.Code
- }
- return 0
-}
-
-func (x *Error_Status) GetMessage() string {
- if x != nil {
- return x.Message
- }
- return ""
-}
-
-func (x *Error_Status) GetStatus() code.Code {
- if x != nil {
- return x.Status
- }
- return code.Code(0)
-}
-
-func (x *Error_Status) GetDetails() []*anypb.Any {
- if x != nil {
- return x.Details
- }
- return nil
-}
-
-var File_apierror_internal_proto_error_proto protoreflect.FileDescriptor
-
-var file_apierror_internal_proto_error_proto_rawDesc = []byte{
- 0x0a, 0x23, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72,
- 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x19, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e,
- 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
- 0x72, 0x70, 0x63, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5,
- 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x29, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f,
- 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e,
- 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x65, 0x72,
- 0x72, 0x6f, 0x72, 0x1a, 0x90, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12,
- 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f,
- 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a, 0x06,
- 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x06,
- 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c,
- 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64,
- 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
- 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f,
- 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, 0x72, 0x72,
- 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x33,
-}
-
-var (
- file_apierror_internal_proto_error_proto_rawDescOnce sync.Once
- file_apierror_internal_proto_error_proto_rawDescData = file_apierror_internal_proto_error_proto_rawDesc
-)
-
-func file_apierror_internal_proto_error_proto_rawDescGZIP() []byte {
- file_apierror_internal_proto_error_proto_rawDescOnce.Do(func() {
- file_apierror_internal_proto_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_apierror_internal_proto_error_proto_rawDescData)
- })
- return file_apierror_internal_proto_error_proto_rawDescData
-}
-
-var file_apierror_internal_proto_error_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
-var file_apierror_internal_proto_error_proto_goTypes = []interface{}{
- (*Error)(nil), // 0: error.Error
- (*Error_Status)(nil), // 1: error.Error.Status
- (code.Code)(0), // 2: google.rpc.Code
- (*anypb.Any)(nil), // 3: google.protobuf.Any
-}
-var file_apierror_internal_proto_error_proto_depIdxs = []int32{
- 1, // 0: error.Error.error:type_name -> error.Error.Status
- 2, // 1: error.Error.Status.status:type_name -> google.rpc.Code
- 3, // 2: error.Error.Status.details:type_name -> google.protobuf.Any
- 3, // [3:3] is the sub-list for method output_type
- 3, // [3:3] is the sub-list for method input_type
- 3, // [3:3] is the sub-list for extension type_name
- 3, // [3:3] is the sub-list for extension extendee
- 0, // [0:3] is the sub-list for field type_name
-}
-
-func init() { file_apierror_internal_proto_error_proto_init() }
-func file_apierror_internal_proto_error_proto_init() {
- if File_apierror_internal_proto_error_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_apierror_internal_proto_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Error); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_apierror_internal_proto_error_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Error_Status); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_apierror_internal_proto_error_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 2,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_apierror_internal_proto_error_proto_goTypes,
- DependencyIndexes: file_apierror_internal_proto_error_proto_depIdxs,
- MessageInfos: file_apierror_internal_proto_error_proto_msgTypes,
- }.Build()
- File_apierror_internal_proto_error_proto = out.File
- file_apierror_internal_proto_error_proto_rawDesc = nil
- file_apierror_internal_proto_error_proto_goTypes = nil
- file_apierror_internal_proto_error_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto
deleted file mode 100644
index 4b9b13ce..00000000
--- a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/error.proto
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2021 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package error;
-
-import "google/protobuf/any.proto";
-import "google/rpc/code.proto";
-
-option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror";
-
-// The error format v2 for Google JSON REST APIs.
-// Copied from https://cloud.google.com/apis/design/errors#http_mapping.
-//
-// NOTE: This schema is not used for other wire protocols.
-message Error {
- // This message has the same semantics as `google.rpc.Status`. It uses HTTP
- // status code instead of gRPC status code. It has an extra field `status`
- // for backward compatibility with Google API Client Libraries.
- message Status {
- // The HTTP status code that corresponds to `google.rpc.Status.code`.
- int32 code = 1;
- // This corresponds to `google.rpc.Status.message`.
- string message = 2;
- // This is the enum version for `google.rpc.Status.code`.
- google.rpc.Code status = 4;
- // This corresponds to `google.rpc.Status.details`.
- repeated google.protobuf.Any details = 5;
- }
- // The actual error payload. The nested message structure is for backward
- // compatibility with Google API client libraries. It also makes the error
- // more readable to developers.
- Status error = 1;
-}
diff --git a/vendor/github.com/googleapis/gax-go/v2/call_option.go b/vendor/github.com/googleapis/gax-go/v2/call_option.go
deleted file mode 100644
index c52e03f6..00000000
--- a/vendor/github.com/googleapis/gax-go/v2/call_option.go
+++ /dev/null
@@ -1,265 +0,0 @@
-// Copyright 2016, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package gax
-
-import (
- "errors"
- "math/rand"
- "time"
-
- "google.golang.org/api/googleapi"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-// CallOption is an option used by Invoke to control behaviors of RPC calls.
-// CallOption works by modifying relevant fields of CallSettings.
-type CallOption interface {
- // Resolve applies the option by modifying cs.
- Resolve(cs *CallSettings)
-}
-
-// Retryer is used by Invoke to determine retry behavior.
-type Retryer interface {
- // Retry reports whether a request should be retried and how long to pause before retrying
- // if the previous attempt returned with err. Invoke never calls Retry with nil error.
- Retry(err error) (pause time.Duration, shouldRetry bool)
-}
-
-type retryerOption func() Retryer
-
-func (o retryerOption) Resolve(s *CallSettings) {
- s.Retry = o
-}
-
-// WithRetry sets CallSettings.Retry to fn.
-func WithRetry(fn func() Retryer) CallOption {
- return retryerOption(fn)
-}
-
-// OnErrorFunc returns a Retryer that retries if and only if the previous attempt
-// returns an error that satisfies shouldRetry.
-//
-// Pause times between retries are specified by bo. bo is only used for its
-// parameters; each Retryer has its own copy.
-func OnErrorFunc(bo Backoff, shouldRetry func(err error) bool) Retryer {
- return &errorRetryer{
- shouldRetry: shouldRetry,
- backoff: bo,
- }
-}
-
-type errorRetryer struct {
- backoff Backoff
- shouldRetry func(err error) bool
-}
-
-func (r *errorRetryer) Retry(err error) (time.Duration, bool) {
- if r.shouldRetry(err) {
- return r.backoff.Pause(), true
- }
-
- return 0, false
-}
-
-// OnCodes returns a Retryer that retries if and only if
-// the previous attempt returns a GRPC error whose error code is stored in cc.
-// Pause times between retries are specified by bo.
-//
-// bo is only used for its parameters; each Retryer has its own copy.
-func OnCodes(cc []codes.Code, bo Backoff) Retryer {
- return &boRetryer{
- backoff: bo,
- codes: append([]codes.Code(nil), cc...),
- }
-}
-
-type boRetryer struct {
- backoff Backoff
- codes []codes.Code
-}
-
-func (r *boRetryer) Retry(err error) (time.Duration, bool) {
- st, ok := status.FromError(err)
- if !ok {
- return 0, false
- }
- c := st.Code()
- for _, rc := range r.codes {
- if c == rc {
- return r.backoff.Pause(), true
- }
- }
- return 0, false
-}
-
-// OnHTTPCodes returns a Retryer that retries if and only if
-// the previous attempt returns a googleapi.Error whose status code is stored in
-// cc. Pause times between retries are specified by bo.
-//
-// bo is only used for its parameters; each Retryer has its own copy.
-func OnHTTPCodes(bo Backoff, cc ...int) Retryer {
- codes := make(map[int]bool, len(cc))
- for _, c := range cc {
- codes[c] = true
- }
-
- return &httpRetryer{
- backoff: bo,
- codes: codes,
- }
-}
-
-type httpRetryer struct {
- backoff Backoff
- codes map[int]bool
-}
-
-func (r *httpRetryer) Retry(err error) (time.Duration, bool) {
- var gerr *googleapi.Error
- if !errors.As(err, &gerr) {
- return 0, false
- }
-
- if r.codes[gerr.Code] {
- return r.backoff.Pause(), true
- }
-
- return 0, false
-}
-
-// Backoff implements exponential backoff. The wait time between retries is a
-// random value between 0 and the "retry period" - the time between retries. The
-// retry period starts at Initial and increases by the factor of Multiplier
-// every retry, but is capped at Max.
-//
-// Note: MaxNumRetries / RPCDeadline is specifically not provided. These should
-// be built on top of Backoff.
-type Backoff struct {
- // Initial is the initial value of the retry period, defaults to 1 second.
- Initial time.Duration
-
- // Max is the maximum value of the retry period, defaults to 30 seconds.
- Max time.Duration
-
- // Multiplier is the factor by which the retry period increases.
- // It should be greater than 1 and defaults to 2.
- Multiplier float64
-
- // cur is the current retry period.
- cur time.Duration
-}
-
-// Pause returns the next time.Duration that the caller should use to backoff.
-func (bo *Backoff) Pause() time.Duration {
- if bo.Initial == 0 {
- bo.Initial = time.Second
- }
- if bo.cur == 0 {
- bo.cur = bo.Initial
- }
- if bo.Max == 0 {
- bo.Max = 30 * time.Second
- }
- if bo.Multiplier < 1 {
- bo.Multiplier = 2
- }
- // Select a duration between 1ns and the current max. It might seem
- // counterintuitive to have so much jitter, but
- // https://www.awsarchitectureblog.com/2015/03/backoff.html argues that
- // that is the best strategy.
- d := time.Duration(1 + rand.Int63n(int64(bo.cur)))
- bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier)
- if bo.cur > bo.Max {
- bo.cur = bo.Max
- }
- return d
-}
-
-type grpcOpt []grpc.CallOption
-
-func (o grpcOpt) Resolve(s *CallSettings) {
- s.GRPC = o
-}
-
-type pathOpt struct {
- p string
-}
-
-func (p pathOpt) Resolve(s *CallSettings) {
- s.Path = p.p
-}
-
-type timeoutOpt struct {
- t time.Duration
-}
-
-func (t timeoutOpt) Resolve(s *CallSettings) {
- s.timeout = t.t
-}
-
-// WithPath applies a Path override to the HTTP-based APICall.
-//
-// This is for internal use only.
-func WithPath(p string) CallOption {
- return &pathOpt{p: p}
-}
-
-// WithGRPCOptions allows passing gRPC call options during client creation.
-func WithGRPCOptions(opt ...grpc.CallOption) CallOption {
- return grpcOpt(append([]grpc.CallOption(nil), opt...))
-}
-
-// WithTimeout is a convenience option for setting a context.WithTimeout on the
-// singular context.Context used for **all** APICall attempts. Calculated from
-// the start of the first APICall attempt.
-// If the context.Context provided to Invoke already has a Deadline set, that
-// will always be respected over the deadline calculated using this option.
-func WithTimeout(t time.Duration) CallOption {
- return &timeoutOpt{t: t}
-}
-
-// CallSettings allow fine-grained control over how calls are made.
-type CallSettings struct {
- // Retry returns a Retryer to be used to control retry logic of a method call.
- // If Retry is nil or the returned Retryer is nil, the call will not be retried.
- Retry func() Retryer
-
- // CallOptions to be forwarded to GRPC.
- GRPC []grpc.CallOption
-
- // Path is an HTTP override for an APICall.
- Path string
-
- // Timeout defines the amount of time that Invoke has to complete.
- // Unexported so it cannot be changed by the code in an APICall.
- timeout time.Duration
-}
diff --git a/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go b/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go
deleted file mode 100644
index f5af5c99..00000000
--- a/vendor/github.com/googleapis/gax-go/v2/callctx/callctx.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2023, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Package callctx provides helpers for storing and retrieving values out of
-// [context.Context]. These values are used by our client libraries in various
-// ways across the stack.
-package callctx
-
-import (
- "context"
- "fmt"
-)
-
-const (
- // XGoogFieldMaskHeader is the canonical header key for the [System Parameter]
- // that specifies the response read mask. The value(s) for this header
- // must adhere to format described in [fieldmaskpb].
- //
- // [System Parameter]: https://cloud.google.com/apis/docs/system-parameters
- // [fieldmaskpb]: https://google.golang.org/protobuf/types/known/fieldmaskpb
- XGoogFieldMaskHeader = "x-goog-fieldmask"
-
- headerKey = contextKey("header")
-)
-
-// contextKey is a private type used to store/retrieve context values.
-type contextKey string
-
-// HeadersFromContext retrieves headers set from [SetHeaders]. These headers
-// can then be cast to http.Header or metadata.MD to send along on requests.
-func HeadersFromContext(ctx context.Context) map[string][]string {
- m, ok := ctx.Value(headerKey).(map[string][]string)
- if !ok {
- return nil
- }
- return m
-}
-
-// SetHeaders stores key value pairs in the returned context that can later
-// be retrieved by [HeadersFromContext]. Values stored in this manner will
-// automatically be retrieved by client libraries and sent as outgoing headers
-// on all requests. keyvals should have a corresponding value for every key
-// provided. If there is an odd number of keyvals this method will panic.
-func SetHeaders(ctx context.Context, keyvals ...string) context.Context {
- if len(keyvals)%2 != 0 {
- panic(fmt.Sprintf("callctx: an even number of key value pairs must be provided, got %d", len(keyvals)))
- }
- h, ok := ctx.Value(headerKey).(map[string][]string)
- if !ok {
- h = make(map[string][]string)
- } else {
- h = cloneHeaders(h)
- }
-
- for i := 0; i < len(keyvals); i = i + 2 {
- h[keyvals[i]] = append(h[keyvals[i]], keyvals[i+1])
- }
- return context.WithValue(ctx, headerKey, h)
-}
-
-// cloneHeaders makes a new key-value map while reusing the value slices.
-// As such, new values should be appended to the value slice, and modifying
-// indexed values is not thread safe.
-//
-// TODO: Replace this with maps.Clone when Go 1.21 is the minimum version.
-func cloneHeaders(h map[string][]string) map[string][]string {
- c := make(map[string][]string, len(h))
- for k, v := range h {
- vc := make([]string, len(v))
- copy(vc, v)
- c[k] = vc
- }
- return c
-}
diff --git a/vendor/github.com/googleapis/gax-go/v2/content_type.go b/vendor/github.com/googleapis/gax-go/v2/content_type.go
deleted file mode 100644
index 1b53d0a3..00000000
--- a/vendor/github.com/googleapis/gax-go/v2/content_type.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2022, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package gax
-
-import (
- "io"
- "io/ioutil"
- "net/http"
-)
-
-const sniffBuffSize = 512
-
-func newContentSniffer(r io.Reader) *contentSniffer {
- return &contentSniffer{r: r}
-}
-
-// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader.
-type contentSniffer struct {
- r io.Reader
- start []byte // buffer for the sniffed bytes.
- err error // set to any error encountered while reading bytes to be sniffed.
-
- ctype string // set on first sniff.
- sniffed bool // set to true on first sniff.
-}
-
-func (cs *contentSniffer) Read(p []byte) (n int, err error) {
- // Ensure that the content type is sniffed before any data is consumed from Reader.
- _, _ = cs.ContentType()
-
- if len(cs.start) > 0 {
- n := copy(p, cs.start)
- cs.start = cs.start[n:]
- return n, nil
- }
-
- // We may have read some bytes into start while sniffing, even if the read ended in an error.
- // We should first return those bytes, then the error.
- if cs.err != nil {
- return 0, cs.err
- }
-
- // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader.
- return cs.r.Read(p)
-}
-
-// ContentType returns the sniffed content type, and whether the content type was successfully sniffed.
-func (cs *contentSniffer) ContentType() (string, bool) {
- if cs.sniffed {
- return cs.ctype, cs.ctype != ""
- }
- cs.sniffed = true
- // If ReadAll hits EOF, it returns err==nil.
- cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize))
-
- // Don't try to detect the content type based on possibly incomplete data.
- if cs.err != nil {
- return "", false
- }
-
- cs.ctype = http.DetectContentType(cs.start)
- return cs.ctype, true
-}
-
-// DetermineContentType determines the content type of the supplied reader.
-// The content of media will be sniffed to determine the content type.
-// After calling DetectContentType the caller must not perform further reads on
-// media, but rather read from the Reader that is returned.
-func DetermineContentType(media io.Reader) (io.Reader, string) {
- // For backwards compatibility, allow clients to set content
- // type by providing a ContentTyper for media.
- // Note: This is an anonymous interface definition copied from googleapi.ContentTyper.
- if typer, ok := media.(interface {
- ContentType() string
- }); ok {
- return media, typer.ContentType()
- }
-
- sniffer := newContentSniffer(media)
- if ctype, ok := sniffer.ContentType(); ok {
- return sniffer, ctype
- }
- // If content type could not be sniffed, reads from sniffer will eventually fail with an error.
- return sniffer, ""
-}
diff --git a/vendor/github.com/googleapis/gax-go/v2/gax.go b/vendor/github.com/googleapis/gax-go/v2/gax.go
deleted file mode 100644
index 36cdfa33..00000000
--- a/vendor/github.com/googleapis/gax-go/v2/gax.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2016, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Package gax contains a set of modules which aid the development of APIs
-// for clients and servers based on gRPC and Google API conventions.
-//
-// Application code will rarely need to use this library directly.
-// However, code generated automatically from API definition files can use it
-// to simplify code generation and to provide more convenient and idiomatic API surfaces.
-package gax
-
-import "github.com/googleapis/gax-go/v2/internal"
-
-// Version specifies the gax-go version being used.
-const Version = internal.Version
diff --git a/vendor/github.com/googleapis/gax-go/v2/header.go b/vendor/github.com/googleapis/gax-go/v2/header.go
deleted file mode 100644
index f5273985..00000000
--- a/vendor/github.com/googleapis/gax-go/v2/header.go
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright 2018, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package gax
-
-import (
- "bytes"
- "context"
- "fmt"
- "net/http"
- "runtime"
- "strings"
- "unicode"
-
- "github.com/googleapis/gax-go/v2/callctx"
- "google.golang.org/grpc/metadata"
-)
-
-var (
- // GoVersion is a header-safe representation of the current runtime
- // environment's Go version. This is for GAX consumers that need to
- // report the Go runtime version in API calls.
- GoVersion string
- // version is a package internal global variable for testing purposes.
- version = runtime.Version
-)
-
-// versionUnknown is only used when the runtime version cannot be determined.
-const versionUnknown = "UNKNOWN"
-
-func init() {
- GoVersion = goVersion()
-}
-
-// goVersion returns a Go runtime version derived from the runtime environment
-// that is modified to be suitable for reporting in a header, meaning it has no
-// whitespace. If it is unable to determine the Go runtime version, it returns
-// versionUnknown.
-func goVersion() string {
- const develPrefix = "devel +"
-
- s := version()
- if strings.HasPrefix(s, develPrefix) {
- s = s[len(develPrefix):]
- if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
- s = s[:p]
- }
- return s
- } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
- s = s[:p]
- }
-
- notSemverRune := func(r rune) bool {
- return !strings.ContainsRune("0123456789.", r)
- }
-
- if strings.HasPrefix(s, "go1") {
- s = s[2:]
- var prerelease string
- if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
- s, prerelease = s[:p], s[p:]
- }
- if strings.HasSuffix(s, ".") {
- s += "0"
- } else if strings.Count(s, ".") < 2 {
- s += ".0"
- }
- if prerelease != "" {
- // Some release candidates already have a dash in them.
- if !strings.HasPrefix(prerelease, "-") {
- prerelease = "-" + prerelease
- }
- s += prerelease
- }
- return s
- }
- return "UNKNOWN"
-}
-
-// XGoogHeader is for use by the Google Cloud Libraries only. See package
-// [github.com/googleapis/gax-go/v2/callctx] for help setting/retrieving
-// request/response headers.
-//
-// XGoogHeader formats key-value pairs.
-// The resulting string is suitable for x-goog-api-client header.
-func XGoogHeader(keyval ...string) string {
- if len(keyval) == 0 {
- return ""
- }
- if len(keyval)%2 != 0 {
- panic("gax.Header: odd argument count")
- }
- var buf bytes.Buffer
- for i := 0; i < len(keyval); i += 2 {
- buf.WriteByte(' ')
- buf.WriteString(keyval[i])
- buf.WriteByte('/')
- buf.WriteString(keyval[i+1])
- }
- return buf.String()[1:]
-}
-
-// InsertMetadataIntoOutgoingContext is for use by the Google Cloud Libraries
-// only. See package [github.com/googleapis/gax-go/v2/callctx] for help
-// setting/retrieving request/response headers.
-//
-// InsertMetadataIntoOutgoingContext returns a new context that merges the
-// provided keyvals metadata pairs with any existing metadata/headers in the
-// provided context. keyvals should have a corresponding value for every key
-// provided. If there is an odd number of keyvals this method will panic.
-// Existing values for keys will not be overwritten, instead provided values
-// will be appended to the list of existing values.
-func InsertMetadataIntoOutgoingContext(ctx context.Context, keyvals ...string) context.Context {
- return metadata.NewOutgoingContext(ctx, insertMetadata(ctx, keyvals...))
-}
-
-// BuildHeaders is for use by the Google Cloud Libraries only. See package
-// [github.com/googleapis/gax-go/v2/callctx] for help setting/retrieving
-// request/response headers.
-//
-// BuildHeaders returns a new http.Header that merges the provided
-// keyvals header pairs with any existing metadata/headers in the provided
-// context. keyvals should have a corresponding value for every key provided.
-// If there is an odd number of keyvals this method will panic.
-// Existing values for keys will not be overwritten, instead provided values
-// will be appended to the list of existing values.
-func BuildHeaders(ctx context.Context, keyvals ...string) http.Header {
- return http.Header(insertMetadata(ctx, keyvals...))
-}
-
-func insertMetadata(ctx context.Context, keyvals ...string) metadata.MD {
- if len(keyvals)%2 != 0 {
- panic(fmt.Sprintf("gax: an even number of key value pairs must be provided, got %d", len(keyvals)))
- }
- out, ok := metadata.FromOutgoingContext(ctx)
- if !ok {
- out = metadata.MD(make(map[string][]string))
- }
- headers := callctx.HeadersFromContext(ctx)
-
- // x-goog-api-client is a special case that we want to make sure gets merged
- // into a single header.
- const xGoogHeader = "x-goog-api-client"
- var mergedXgoogHeader strings.Builder
-
- for k, vals := range headers {
- if k == xGoogHeader {
- // Merge all values for the x-goog-api-client header set on the ctx.
- for _, v := range vals {
- mergedXgoogHeader.WriteString(v)
- mergedXgoogHeader.WriteRune(' ')
- }
- continue
- }
- out[k] = append(out[k], vals...)
- }
- for i := 0; i < len(keyvals); i = i + 2 {
- out[keyvals[i]] = append(out[keyvals[i]], keyvals[i+1])
-
- if keyvals[i] == xGoogHeader {
- // Merge the x-goog-api-client header values set on the ctx with any
- // values passed in for it from the client.
- mergedXgoogHeader.WriteString(keyvals[i+1])
- mergedXgoogHeader.WriteRune(' ')
- }
- }
-
- // Add the x goog header back in, replacing the separate values that were set.
- if mergedXgoogHeader.Len() > 0 {
- out[xGoogHeader] = []string{mergedXgoogHeader.String()[:mergedXgoogHeader.Len()-1]}
- }
-
- return out
-}
diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go
deleted file mode 100644
index 4f780f46..00000000
--- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2022, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package internal
-
-// Version is the current tagged release of the library.
-const Version = "2.12.5"
diff --git a/vendor/github.com/googleapis/gax-go/v2/invoke.go b/vendor/github.com/googleapis/gax-go/v2/invoke.go
deleted file mode 100644
index 721d1af5..00000000
--- a/vendor/github.com/googleapis/gax-go/v2/invoke.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2016, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package gax
-
-import (
- "context"
- "strings"
- "time"
-
- "github.com/googleapis/gax-go/v2/apierror"
-)
-
-// APICall is a user defined call stub.
-type APICall func(context.Context, CallSettings) error
-
-// Invoke calls the given APICall, performing retries as specified by opts, if
-// any.
-func Invoke(ctx context.Context, call APICall, opts ...CallOption) error {
- var settings CallSettings
- for _, opt := range opts {
- opt.Resolve(&settings)
- }
- return invoke(ctx, call, settings, Sleep)
-}
-
-// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing.
-// If interrupted, Sleep returns ctx.Err().
-func Sleep(ctx context.Context, d time.Duration) error {
- t := time.NewTimer(d)
- select {
- case <-ctx.Done():
- t.Stop()
- return ctx.Err()
- case <-t.C:
- return nil
- }
-}
-
-type sleeper func(ctx context.Context, d time.Duration) error
-
-// invoke implements Invoke, taking an additional sleeper argument for testing.
-func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error {
- var retryer Retryer
-
- // Only use the value provided via WithTimeout if the context doesn't
- // already have a deadline. This is important for backwards compatibility if
- // the user already set a deadline on the context given to Invoke.
- if _, ok := ctx.Deadline(); !ok && settings.timeout != 0 {
- c, cc := context.WithTimeout(ctx, settings.timeout)
- defer cc()
- ctx = c
- }
-
- for {
- err := call(ctx, settings)
- if err == nil {
- return nil
- }
- // Never retry permanent certificate errors. (e.x. if ca-certificates
- // are not installed). We should only make very few, targeted
- // exceptions: many (other) status=Unavailable should be retried, such
- // as if there's a network hiccup, or the internet goes out for a
- // minute. This is also why here we are doing string parsing instead of
- // simply making Unavailable a non-retried code elsewhere.
- if strings.Contains(err.Error(), "x509: certificate signed by unknown authority") {
- return err
- }
- if apierr, ok := apierror.FromError(err); ok {
- err = apierr
- }
- if settings.Retry == nil {
- return err
- }
- if retryer == nil {
- if r := settings.Retry(); r != nil {
- retryer = r
- } else {
- return err
- }
- }
- if d, ok := retryer.Retry(err); !ok {
- return err
- } else if err = sp(ctx, d); err != nil {
- return err
- }
- }
-}
diff --git a/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go b/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go
deleted file mode 100644
index 9b690d40..00000000
--- a/vendor/github.com/googleapis/gax-go/v2/proto_json_stream.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2022, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package gax
-
-import (
- "encoding/json"
- "errors"
- "io"
-
- "google.golang.org/protobuf/encoding/protojson"
- "google.golang.org/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
-)
-
-var (
- arrayOpen = json.Delim('[')
- arrayClose = json.Delim(']')
- errBadOpening = errors.New("unexpected opening token, expected '['")
-)
-
-// ProtoJSONStream represents a wrapper for consuming a stream of protobuf
-// messages encoded using protobuf-JSON format. More information on this format
-// can be found at https://developers.google.com/protocol-buffers/docs/proto3#json.
-// The stream must appear as a comma-delimited, JSON array of obbjects with
-// opening and closing square braces.
-//
-// This is for internal use only.
-type ProtoJSONStream struct {
- first, closed bool
- reader io.ReadCloser
- stream *json.Decoder
- typ protoreflect.MessageType
-}
-
-// NewProtoJSONStreamReader accepts a stream of bytes via an io.ReadCloser that are
-// protobuf-JSON encoded protobuf messages of the given type. The ProtoJSONStream
-// must be closed when done.
-//
-// This is for internal use only.
-func NewProtoJSONStreamReader(rc io.ReadCloser, typ protoreflect.MessageType) *ProtoJSONStream {
- return &ProtoJSONStream{
- first: true,
- reader: rc,
- stream: json.NewDecoder(rc),
- typ: typ,
- }
-}
-
-// Recv decodes the next protobuf message in the stream or returns io.EOF if
-// the stream is done. It is not safe to call Recv on the same stream from
-// different goroutines, just like it is not safe to do so with a single gRPC
-// stream. Type-cast the protobuf message returned to the type provided at
-// ProtoJSONStream creation.
-// Calls to Recv after calling Close will produce io.EOF.
-func (s *ProtoJSONStream) Recv() (proto.Message, error) {
- if s.closed {
- return nil, io.EOF
- }
- if s.first {
- s.first = false
-
- // Consume the opening '[' so Decode gets one object at a time.
- if t, err := s.stream.Token(); err != nil {
- return nil, err
- } else if t != arrayOpen {
- return nil, errBadOpening
- }
- }
-
- // Capture the next block of data for the item (a JSON object) in the stream.
- var raw json.RawMessage
- if err := s.stream.Decode(&raw); err != nil {
- e := err
- // To avoid checking the first token of each stream, just attempt to
- // Decode the next blob and if that fails, double check if it is just
- // the closing token ']'. If it is the closing, return io.EOF. If it
- // isn't, return the original error.
- if t, _ := s.stream.Token(); t == arrayClose {
- e = io.EOF
- }
- return nil, e
- }
-
- // Initialize a new instance of the protobuf message to unmarshal the
- // raw data into.
- m := s.typ.New().Interface()
- unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true}
- err := unm.Unmarshal(raw, m)
-
- return m, err
-}
-
-// Close closes the stream so that resources are cleaned up.
-func (s *ProtoJSONStream) Close() error {
- // Dereference the *json.Decoder so that the memory is gc'd.
- s.stream = nil
- s.closed = true
-
- return s.reader.Close()
-}
diff --git a/vendor/github.com/googleapis/gax-go/v2/release-please-config.json b/vendor/github.com/googleapis/gax-go/v2/release-please-config.json
deleted file mode 100644
index 61ee266a..00000000
--- a/vendor/github.com/googleapis/gax-go/v2/release-please-config.json
+++ /dev/null
@@ -1,10 +0,0 @@
-{
- "release-type": "go-yoshi",
- "separate-pull-requests": true,
- "include-component-in-tag": false,
- "packages": {
- "v2": {
- "component": "v2"
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE
deleted file mode 100644
index c33dcc7c..00000000
--- a/vendor/github.com/hashicorp/errwrap/LICENSE
+++ /dev/null
@@ -1,354 +0,0 @@
-Mozilla Public License, version 2.0
-
-1. Definitions
-
-1.1. “Contributor”
-
- means each individual or legal entity that creates, contributes to the
- creation of, or owns Covered Software.
-
-1.2. “Contributor Version”
-
- means the combination of the Contributions of others (if any) used by a
- Contributor and that particular Contributor’s Contribution.
-
-1.3. “Contribution”
-
- means Covered Software of a particular Contributor.
-
-1.4. “Covered Software”
-
- means Source Code Form to which the initial Contributor has attached the
- notice in Exhibit A, the Executable Form of such Source Code Form, and
- Modifications of such Source Code Form, in each case including portions
- thereof.
-
-1.5. “Incompatible With Secondary Licenses”
- means
-
- a. that the initial Contributor has attached the notice described in
- Exhibit B to the Covered Software; or
-
- b. that the Covered Software was made available under the terms of version
- 1.1 or earlier of the License, but not also under the terms of a
- Secondary License.
-
-1.6. “Executable Form”
-
- means any form of the work other than Source Code Form.
-
-1.7. “Larger Work”
-
- means a work that combines Covered Software with other material, in a separate
- file or files, that is not Covered Software.
-
-1.8. “License”
-
- means this document.
-
-1.9. “Licensable”
-
- means having the right to grant, to the maximum extent possible, whether at the
- time of the initial grant or subsequently, any and all of the rights conveyed by
- this License.
-
-1.10. “Modifications”
-
- means any of the following:
-
- a. any file in Source Code Form that results from an addition to, deletion
- from, or modification of the contents of Covered Software; or
-
- b. any new file in Source Code Form that contains any Covered Software.
-
-1.11. “Patent Claims” of a Contributor
-
- means any patent claim(s), including without limitation, method, process,
- and apparatus claims, in any patent Licensable by such Contributor that
- would be infringed, but for the grant of the License, by the making,
- using, selling, offering for sale, having made, import, or transfer of
- either its Contributions or its Contributor Version.
-
-1.12. “Secondary License”
-
- means either the GNU General Public License, Version 2.0, the GNU Lesser
- General Public License, Version 2.1, the GNU Affero General Public
- License, Version 3.0, or any later versions of those licenses.
-
-1.13. “Source Code Form”
-
- means the form of the work preferred for making modifications.
-
-1.14. “You” (or “Your”)
-
- means an individual or a legal entity exercising rights under this
- License. For legal entities, “You” includes any entity that controls, is
- controlled by, or is under common control with You. For purposes of this
- definition, “control” means (a) the power, direct or indirect, to cause
- the direction or management of such entity, whether by contract or
- otherwise, or (b) ownership of more than fifty percent (50%) of the
- outstanding shares or beneficial ownership of such entity.
-
-
-2. License Grants and Conditions
-
-2.1. Grants
-
- Each Contributor hereby grants You a world-wide, royalty-free,
- non-exclusive license:
-
- a. under intellectual property rights (other than patent or trademark)
- Licensable by such Contributor to use, reproduce, make available,
- modify, display, perform, distribute, and otherwise exploit its
- Contributions, either on an unmodified basis, with Modifications, or as
- part of a Larger Work; and
-
- b. under Patent Claims of such Contributor to make, use, sell, offer for
- sale, have made, import, and otherwise transfer either its Contributions
- or its Contributor Version.
-
-2.2. Effective Date
-
- The licenses granted in Section 2.1 with respect to any Contribution become
- effective for each Contribution on the date the Contributor first distributes
- such Contribution.
-
-2.3. Limitations on Grant Scope
-
- The licenses granted in this Section 2 are the only rights granted under this
- License. No additional rights or licenses will be implied from the distribution
- or licensing of Covered Software under this License. Notwithstanding Section
- 2.1(b) above, no patent license is granted by a Contributor:
-
- a. for any code that a Contributor has removed from Covered Software; or
-
- b. for infringements caused by: (i) Your and any other third party’s
- modifications of Covered Software, or (ii) the combination of its
- Contributions with other software (except as part of its Contributor
- Version); or
-
- c. under Patent Claims infringed by Covered Software in the absence of its
- Contributions.
-
- This License does not grant any rights in the trademarks, service marks, or
- logos of any Contributor (except as may be necessary to comply with the
- notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
- No Contributor makes additional grants as a result of Your choice to
- distribute the Covered Software under a subsequent version of this License
- (see Section 10.2) or under the terms of a Secondary License (if permitted
- under the terms of Section 3.3).
-
-2.5. Representation
-
- Each Contributor represents that the Contributor believes its Contributions
- are its original creation(s) or it has sufficient rights to grant the
- rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
- This License is not intended to limit any rights You have under applicable
- copyright doctrines of fair use, fair dealing, or other equivalents.
-
-2.7. Conditions
-
- Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
- Section 2.1.
-
-
-3. Responsibilities
-
-3.1. Distribution of Source Form
-
- All distribution of Covered Software in Source Code Form, including any
- Modifications that You create or to which You contribute, must be under the
- terms of this License. You must inform recipients that the Source Code Form
- of the Covered Software is governed by the terms of this License, and how
- they can obtain a copy of this License. You may not attempt to alter or
- restrict the recipients’ rights in the Source Code Form.
-
-3.2. Distribution of Executable Form
-
- If You distribute Covered Software in Executable Form then:
-
- a. such Covered Software must also be made available in Source Code Form,
- as described in Section 3.1, and You must inform recipients of the
- Executable Form how they can obtain a copy of such Source Code Form by
- reasonable means in a timely manner, at a charge no more than the cost
- of distribution to the recipient; and
-
- b. You may distribute such Executable Form under the terms of this License,
- or sublicense it under different terms, provided that the license for
- the Executable Form does not attempt to limit or alter the recipients’
- rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
- You may create and distribute a Larger Work under terms of Your choice,
- provided that You also comply with the requirements of this License for the
- Covered Software. If the Larger Work is a combination of Covered Software
- with a work governed by one or more Secondary Licenses, and the Covered
- Software is not Incompatible With Secondary Licenses, this License permits
- You to additionally distribute such Covered Software under the terms of
- such Secondary License(s), so that the recipient of the Larger Work may, at
- their option, further distribute the Covered Software under the terms of
- either this License or such Secondary License(s).
-
-3.4. Notices
-
- You may not remove or alter the substance of any license notices (including
- copyright notices, patent notices, disclaimers of warranty, or limitations
- of liability) contained within the Source Code Form of the Covered
- Software, except that You may alter any license notices to the extent
- required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
- You may choose to offer, and to charge a fee for, warranty, support,
- indemnity or liability obligations to one or more recipients of Covered
- Software. However, You may do so only on Your own behalf, and not on behalf
- of any Contributor. You must make it absolutely clear that any such
- warranty, support, indemnity, or liability obligation is offered by You
- alone, and You hereby agree to indemnify every Contributor for any
- liability incurred by such Contributor as a result of warranty, support,
- indemnity or liability terms You offer. You may include additional
- disclaimers of warranty and limitations of liability specific to any
- jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
-
- If it is impossible for You to comply with any of the terms of this License
- with respect to some or all of the Covered Software due to statute, judicial
- order, or regulation then You must: (a) comply with the terms of this License
- to the maximum extent possible; and (b) describe the limitations and the code
- they affect. Such description must be placed in a text file included with all
- distributions of the Covered Software under this License. Except to the
- extent prohibited by statute or regulation, such description must be
- sufficiently detailed for a recipient of ordinary skill to be able to
- understand it.
-
-5. Termination
-
-5.1. The rights granted under this License will terminate automatically if You
- fail to comply with any of its terms. However, if You become compliant,
- then the rights granted under this License from a particular Contributor
- are reinstated (a) provisionally, unless and until such Contributor
- explicitly and finally terminates Your grants, and (b) on an ongoing basis,
- if such Contributor fails to notify You of the non-compliance by some
- reasonable means prior to 60 days after You have come back into compliance.
- Moreover, Your grants from a particular Contributor are reinstated on an
- ongoing basis if such Contributor notifies You of the non-compliance by
- some reasonable means, this is the first time You have received notice of
- non-compliance with this License from such Contributor, and You become
- compliant prior to 30 days after Your receipt of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
- infringement claim (excluding declaratory judgment actions, counter-claims,
- and cross-claims) alleging that a Contributor Version directly or
- indirectly infringes any patent, then the rights granted to You by any and
- all Contributors for the Covered Software under Section 2.1 of this License
- shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
- license agreements (excluding distributors and resellers) which have been
- validly granted by You or Your distributors under this License prior to
- termination shall survive termination.
-
-6. Disclaimer of Warranty
-
- Covered Software is provided under this License on an “as is” basis, without
- warranty of any kind, either expressed, implied, or statutory, including,
- without limitation, warranties that the Covered Software is free of defects,
- merchantable, fit for a particular purpose or non-infringing. The entire
- risk as to the quality and performance of the Covered Software is with You.
- Should any Covered Software prove defective in any respect, You (not any
- Contributor) assume the cost of any necessary servicing, repair, or
- correction. This disclaimer of warranty constitutes an essential part of this
- License. No use of any Covered Software is authorized under this License
- except under this disclaimer.
-
-7. Limitation of Liability
-
- Under no circumstances and under no legal theory, whether tort (including
- negligence), contract, or otherwise, shall any Contributor, or anyone who
- distributes Covered Software as permitted above, be liable to You for any
- direct, indirect, special, incidental, or consequential damages of any
- character including, without limitation, damages for lost profits, loss of
- goodwill, work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses, even if such party shall have been
- informed of the possibility of such damages. This limitation of liability
- shall not apply to liability for death or personal injury resulting from such
- party’s negligence to the extent applicable law prohibits such limitation.
- Some jurisdictions do not allow the exclusion or limitation of incidental or
- consequential damages, so this exclusion and limitation may not apply to You.
-
-8. Litigation
-
- Any litigation relating to this License may be brought only in the courts of
- a jurisdiction where the defendant maintains its principal place of business
- and such litigation shall be governed by laws of that jurisdiction, without
- reference to its conflict-of-law provisions. Nothing in this Section shall
- prevent a party’s ability to bring cross-claims or counter-claims.
-
-9. Miscellaneous
-
- This License represents the complete agreement concerning the subject matter
- hereof. If any provision of this License is held to be unenforceable, such
- provision shall be reformed only to the extent necessary to make it
- enforceable. Any law or regulation which provides that the language of a
- contract shall be construed against the drafter shall not be used to construe
- this License against a Contributor.
-
-
-10. Versions of the License
-
-10.1. New Versions
-
- Mozilla Foundation is the license steward. Except as provided in Section
- 10.3, no one other than the license steward has the right to modify or
- publish new versions of this License. Each version will be given a
- distinguishing version number.
-
-10.2. Effect of New Versions
-
- You may distribute the Covered Software under the terms of the version of
- the License under which You originally received the Covered Software, or
- under the terms of any subsequent version published by the license
- steward.
-
-10.3. Modified Versions
-
- If you create software not governed by this License, and you want to
- create a new license for such software, you may create and use a modified
- version of this License if you rename the license and remove any
- references to the name of the license steward (except to note that such
- modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
- If You choose to distribute Source Code Form that is Incompatible With
- Secondary Licenses under the terms of this version of the License, the
- notice described in Exhibit B of this License must be attached.
-
-Exhibit A - Source Code Form License Notice
-
- This Source Code Form is subject to the
- terms of the Mozilla Public License, v.
- 2.0. If a copy of the MPL was not
- distributed with this file, You can
- obtain one at
- http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular file, then
-You may include the notice in a location (such as a LICENSE file in a relevant
-directory) where a recipient would be likely to look for such a notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - “Incompatible With Secondary Licenses” Notice
-
- This Source Code Form is “Incompatible
- With Secondary Licenses”, as defined by
- the Mozilla Public License, v. 2.0.
-
diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md
deleted file mode 100644
index 444df08f..00000000
--- a/vendor/github.com/hashicorp/errwrap/README.md
+++ /dev/null
@@ -1,89 +0,0 @@
-# errwrap
-
-`errwrap` is a package for Go that formalizes the pattern of wrapping errors
-and checking if an error contains another error.
-
-There is a common pattern in Go of taking a returned `error` value and
-then wrapping it (such as with `fmt.Errorf`) before returning it. The problem
-with this pattern is that you completely lose the original `error` structure.
-
-Arguably the _correct_ approach is that you should make a custom structure
-implementing the `error` interface, and have the original error as a field
-on that structure, such [as this example](http://golang.org/pkg/os/#PathError).
-This is a good approach, but you have to know the entire chain of possible
-rewrapping that happens, when you might just care about one.
-
-`errwrap` formalizes this pattern (it doesn't matter what approach you use
-above) by giving a single interface for wrapping errors, checking if a specific
-error is wrapped, and extracting that error.
-
-## Installation and Docs
-
-Install using `go get github.com/hashicorp/errwrap`.
-
-Full documentation is available at
-http://godoc.org/github.com/hashicorp/errwrap
-
-## Usage
-
-#### Basic Usage
-
-Below is a very basic example of its usage:
-
-```go
-// A function that always returns an error, but wraps it, like a real
-// function might.
-func tryOpen() error {
- _, err := os.Open("/i/dont/exist")
- if err != nil {
- return errwrap.Wrapf("Doesn't exist: {{err}}", err)
- }
-
- return nil
-}
-
-func main() {
- err := tryOpen()
-
- // We can use the Contains helpers to check if an error contains
- // another error. It is safe to do this with a nil error, or with
- // an error that doesn't even use the errwrap package.
- if errwrap.Contains(err, "does not exist") {
- // Do something
- }
- if errwrap.ContainsType(err, new(os.PathError)) {
- // Do something
- }
-
- // Or we can use the associated `Get` functions to just extract
- // a specific error. This would return nil if that specific error doesn't
- // exist.
- perr := errwrap.GetType(err, new(os.PathError))
-}
-```
-
-#### Custom Types
-
-If you're already making custom types that properly wrap errors, then
-you can get all the functionality of `errwraps.Contains` and such by
-implementing the `Wrapper` interface with just one function. Example:
-
-```go
-type AppError {
- Code ErrorCode
- Err error
-}
-
-func (e *AppError) WrappedErrors() []error {
- return []error{e.Err}
-}
-```
-
-Now this works:
-
-```go
-err := &AppError{Err: fmt.Errorf("an error")}
-if errwrap.ContainsType(err, fmt.Errorf("")) {
- // This will work!
-}
-```
diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go
deleted file mode 100644
index 44e368e5..00000000
--- a/vendor/github.com/hashicorp/errwrap/errwrap.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Package errwrap implements methods to formalize error wrapping in Go.
-//
-// All of the top-level functions that take an `error` are built to be able
-// to take any error, not just wrapped errors. This allows you to use errwrap
-// without having to type-check and type-cast everywhere.
-package errwrap
-
-import (
- "errors"
- "reflect"
- "strings"
-)
-
-// WalkFunc is the callback called for Walk.
-type WalkFunc func(error)
-
-// Wrapper is an interface that can be implemented by custom types to
-// have all the Contains, Get, etc. functions in errwrap work.
-//
-// When Walk reaches a Wrapper, it will call the callback for every
-// wrapped error in addition to the wrapper itself. Since all the top-level
-// functions in errwrap use Walk, this means that all those functions work
-// with your custom type.
-type Wrapper interface {
- WrappedErrors() []error
-}
-
-// Wrap defines that outer wraps inner, returning an error type that
-// can be cleanly used with the other methods in this package, such as
-// Contains, GetAll, etc.
-//
-// This function won't modify the error message at all (the outer message
-// will be used).
-func Wrap(outer, inner error) error {
- return &wrappedError{
- Outer: outer,
- Inner: inner,
- }
-}
-
-// Wrapf wraps an error with a formatting message. This is similar to using
-// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap
-// errors, you should replace it with this.
-//
-// format is the format of the error message. The string '{{err}}' will
-// be replaced with the original error message.
-//
-// Deprecated: Use fmt.Errorf()
-func Wrapf(format string, err error) error {
- outerMsg := ""
- if err != nil {
- outerMsg = err.Error()
- }
-
- outer := errors.New(strings.Replace(
- format, "{{err}}", outerMsg, -1))
-
- return Wrap(outer, err)
-}
-
-// Contains checks if the given error contains an error with the
-// message msg. If err is not a wrapped error, this will always return
-// false unless the error itself happens to match this msg.
-func Contains(err error, msg string) bool {
- return len(GetAll(err, msg)) > 0
-}
-
-// ContainsType checks if the given error contains an error with
-// the same concrete type as v. If err is not a wrapped error, this will
-// check the err itself.
-func ContainsType(err error, v interface{}) bool {
- return len(GetAllType(err, v)) > 0
-}
-
-// Get is the same as GetAll but returns the deepest matching error.
-func Get(err error, msg string) error {
- es := GetAll(err, msg)
- if len(es) > 0 {
- return es[len(es)-1]
- }
-
- return nil
-}
-
-// GetType is the same as GetAllType but returns the deepest matching error.
-func GetType(err error, v interface{}) error {
- es := GetAllType(err, v)
- if len(es) > 0 {
- return es[len(es)-1]
- }
-
- return nil
-}
-
-// GetAll gets all the errors that might be wrapped in err with the
-// given message. The order of the errors is such that the outermost
-// matching error (the most recent wrap) is index zero, and so on.
-func GetAll(err error, msg string) []error {
- var result []error
-
- Walk(err, func(err error) {
- if err.Error() == msg {
- result = append(result, err)
- }
- })
-
- return result
-}
-
-// GetAllType gets all the errors that are the same type as v.
-//
-// The order of the return value is the same as described in GetAll.
-func GetAllType(err error, v interface{}) []error {
- var result []error
-
- var search string
- if v != nil {
- search = reflect.TypeOf(v).String()
- }
- Walk(err, func(err error) {
- var needle string
- if err != nil {
- needle = reflect.TypeOf(err).String()
- }
-
- if needle == search {
- result = append(result, err)
- }
- })
-
- return result
-}
-
-// Walk walks all the wrapped errors in err and calls the callback. If
-// err isn't a wrapped error, this will be called once for err. If err
-// is a wrapped error, the callback will be called for both the wrapper
-// that implements error as well as the wrapped error itself.
-func Walk(err error, cb WalkFunc) {
- if err == nil {
- return
- }
-
- switch e := err.(type) {
- case *wrappedError:
- cb(e.Outer)
- Walk(e.Inner, cb)
- case Wrapper:
- cb(err)
-
- for _, err := range e.WrappedErrors() {
- Walk(err, cb)
- }
- case interface{ Unwrap() error }:
- cb(err)
- Walk(e.Unwrap(), cb)
- default:
- cb(err)
- }
-}
-
-// wrappedError is an implementation of error that has both the
-// outer and inner errors.
-type wrappedError struct {
- Outer error
- Inner error
-}
-
-func (w *wrappedError) Error() string {
- return w.Outer.Error()
-}
-
-func (w *wrappedError) WrappedErrors() []error {
- return []error{w.Outer, w.Inner}
-}
-
-func (w *wrappedError) Unwrap() error {
- return w.Inner
-}
diff --git a/vendor/github.com/hashicorp/go-checkpoint/LICENSE b/vendor/github.com/hashicorp/go-checkpoint/LICENSE
deleted file mode 100644
index c33dcc7c..00000000
--- a/vendor/github.com/hashicorp/go-checkpoint/LICENSE
+++ /dev/null
@@ -1,354 +0,0 @@
-Mozilla Public License, version 2.0
-
-1. Definitions
-
-1.1. “Contributor”
-
- means each individual or legal entity that creates, contributes to the
- creation of, or owns Covered Software.
-
-1.2. “Contributor Version”
-
- means the combination of the Contributions of others (if any) used by a
- Contributor and that particular Contributor’s Contribution.
-
-1.3. “Contribution”
-
- means Covered Software of a particular Contributor.
-
-1.4. “Covered Software”
-
- means Source Code Form to which the initial Contributor has attached the
- notice in Exhibit A, the Executable Form of such Source Code Form, and
- Modifications of such Source Code Form, in each case including portions
- thereof.
-
-1.5. “Incompatible With Secondary Licenses”
- means
-
- a. that the initial Contributor has attached the notice described in
- Exhibit B to the Covered Software; or
-
- b. that the Covered Software was made available under the terms of version
- 1.1 or earlier of the License, but not also under the terms of a
- Secondary License.
-
-1.6. “Executable Form”
-
- means any form of the work other than Source Code Form.
-
-1.7. “Larger Work”
-
- means a work that combines Covered Software with other material, in a separate
- file or files, that is not Covered Software.
-
-1.8. “License”
-
- means this document.
-
-1.9. “Licensable”
-
- means having the right to grant, to the maximum extent possible, whether at the
- time of the initial grant or subsequently, any and all of the rights conveyed by
- this License.
-
-1.10. “Modifications”
-
- means any of the following:
-
- a. any file in Source Code Form that results from an addition to, deletion
- from, or modification of the contents of Covered Software; or
-
- b. any new file in Source Code Form that contains any Covered Software.
-
-1.11. “Patent Claims” of a Contributor
-
- means any patent claim(s), including without limitation, method, process,
- and apparatus claims, in any patent Licensable by such Contributor that
- would be infringed, but for the grant of the License, by the making,
- using, selling, offering for sale, having made, import, or transfer of
- either its Contributions or its Contributor Version.
-
-1.12. “Secondary License”
-
- means either the GNU General Public License, Version 2.0, the GNU Lesser
- General Public License, Version 2.1, the GNU Affero General Public
- License, Version 3.0, or any later versions of those licenses.
-
-1.13. “Source Code Form”
-
- means the form of the work preferred for making modifications.
-
-1.14. “You” (or “Your”)
-
- means an individual or a legal entity exercising rights under this
- License. For legal entities, “You” includes any entity that controls, is
- controlled by, or is under common control with You. For purposes of this
- definition, “control” means (a) the power, direct or indirect, to cause
- the direction or management of such entity, whether by contract or
- otherwise, or (b) ownership of more than fifty percent (50%) of the
- outstanding shares or beneficial ownership of such entity.
-
-
-2. License Grants and Conditions
-
-2.1. Grants
-
- Each Contributor hereby grants You a world-wide, royalty-free,
- non-exclusive license:
-
- a. under intellectual property rights (other than patent or trademark)
- Licensable by such Contributor to use, reproduce, make available,
- modify, display, perform, distribute, and otherwise exploit its
- Contributions, either on an unmodified basis, with Modifications, or as
- part of a Larger Work; and
-
- b. under Patent Claims of such Contributor to make, use, sell, offer for
- sale, have made, import, and otherwise transfer either its Contributions
- or its Contributor Version.
-
-2.2. Effective Date
-
- The licenses granted in Section 2.1 with respect to any Contribution become
- effective for each Contribution on the date the Contributor first distributes
- such Contribution.
-
-2.3. Limitations on Grant Scope
-
- The licenses granted in this Section 2 are the only rights granted under this
- License. No additional rights or licenses will be implied from the distribution
- or licensing of Covered Software under this License. Notwithstanding Section
- 2.1(b) above, no patent license is granted by a Contributor:
-
- a. for any code that a Contributor has removed from Covered Software; or
-
- b. for infringements caused by: (i) Your and any other third party’s
- modifications of Covered Software, or (ii) the combination of its
- Contributions with other software (except as part of its Contributor
- Version); or
-
- c. under Patent Claims infringed by Covered Software in the absence of its
- Contributions.
-
- This License does not grant any rights in the trademarks, service marks, or
- logos of any Contributor (except as may be necessary to comply with the
- notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
- No Contributor makes additional grants as a result of Your choice to
- distribute the Covered Software under a subsequent version of this License
- (see Section 10.2) or under the terms of a Secondary License (if permitted
- under the terms of Section 3.3).
-
-2.5. Representation
-
- Each Contributor represents that the Contributor believes its Contributions
- are its original creation(s) or it has sufficient rights to grant the
- rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
- This License is not intended to limit any rights You have under applicable
- copyright doctrines of fair use, fair dealing, or other equivalents.
-
-2.7. Conditions
-
- Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
- Section 2.1.
-
-
-3. Responsibilities
-
-3.1. Distribution of Source Form
-
- All distribution of Covered Software in Source Code Form, including any
- Modifications that You create or to which You contribute, must be under the
- terms of this License. You must inform recipients that the Source Code Form
- of the Covered Software is governed by the terms of this License, and how
- they can obtain a copy of this License. You may not attempt to alter or
- restrict the recipients’ rights in the Source Code Form.
-
-3.2. Distribution of Executable Form
-
- If You distribute Covered Software in Executable Form then:
-
- a. such Covered Software must also be made available in Source Code Form,
- as described in Section 3.1, and You must inform recipients of the
- Executable Form how they can obtain a copy of such Source Code Form by
- reasonable means in a timely manner, at a charge no more than the cost
- of distribution to the recipient; and
-
- b. You may distribute such Executable Form under the terms of this License,
- or sublicense it under different terms, provided that the license for
- the Executable Form does not attempt to limit or alter the recipients’
- rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
- You may create and distribute a Larger Work under terms of Your choice,
- provided that You also comply with the requirements of this License for the
- Covered Software. If the Larger Work is a combination of Covered Software
- with a work governed by one or more Secondary Licenses, and the Covered
- Software is not Incompatible With Secondary Licenses, this License permits
- You to additionally distribute such Covered Software under the terms of
- such Secondary License(s), so that the recipient of the Larger Work may, at
- their option, further distribute the Covered Software under the terms of
- either this License or such Secondary License(s).
-
-3.4. Notices
-
- You may not remove or alter the substance of any license notices (including
- copyright notices, patent notices, disclaimers of warranty, or limitations
- of liability) contained within the Source Code Form of the Covered
- Software, except that You may alter any license notices to the extent
- required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
- You may choose to offer, and to charge a fee for, warranty, support,
- indemnity or liability obligations to one or more recipients of Covered
- Software. However, You may do so only on Your own behalf, and not on behalf
- of any Contributor. You must make it absolutely clear that any such
- warranty, support, indemnity, or liability obligation is offered by You
- alone, and You hereby agree to indemnify every Contributor for any
- liability incurred by such Contributor as a result of warranty, support,
- indemnity or liability terms You offer. You may include additional
- disclaimers of warranty and limitations of liability specific to any
- jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
-
- If it is impossible for You to comply with any of the terms of this License
- with respect to some or all of the Covered Software due to statute, judicial
- order, or regulation then You must: (a) comply with the terms of this License
- to the maximum extent possible; and (b) describe the limitations and the code
- they affect. Such description must be placed in a text file included with all
- distributions of the Covered Software under this License. Except to the
- extent prohibited by statute or regulation, such description must be
- sufficiently detailed for a recipient of ordinary skill to be able to
- understand it.
-
-5. Termination
-
-5.1. The rights granted under this License will terminate automatically if You
- fail to comply with any of its terms. However, if You become compliant,
- then the rights granted under this License from a particular Contributor
- are reinstated (a) provisionally, unless and until such Contributor
- explicitly and finally terminates Your grants, and (b) on an ongoing basis,
- if such Contributor fails to notify You of the non-compliance by some
- reasonable means prior to 60 days after You have come back into compliance.
- Moreover, Your grants from a particular Contributor are reinstated on an
- ongoing basis if such Contributor notifies You of the non-compliance by
- some reasonable means, this is the first time You have received notice of
- non-compliance with this License from such Contributor, and You become
- compliant prior to 30 days after Your receipt of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
- infringement claim (excluding declaratory judgment actions, counter-claims,
- and cross-claims) alleging that a Contributor Version directly or
- indirectly infringes any patent, then the rights granted to You by any and
- all Contributors for the Covered Software under Section 2.1 of this License
- shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
- license agreements (excluding distributors and resellers) which have been
- validly granted by You or Your distributors under this License prior to
- termination shall survive termination.
-
-6. Disclaimer of Warranty
-
- Covered Software is provided under this License on an “as is” basis, without
- warranty of any kind, either expressed, implied, or statutory, including,
- without limitation, warranties that the Covered Software is free of defects,
- merchantable, fit for a particular purpose or non-infringing. The entire
- risk as to the quality and performance of the Covered Software is with You.
- Should any Covered Software prove defective in any respect, You (not any
- Contributor) assume the cost of any necessary servicing, repair, or
- correction. This disclaimer of warranty constitutes an essential part of this
- License. No use of any Covered Software is authorized under this License
- except under this disclaimer.
-
-7. Limitation of Liability
-
- Under no circumstances and under no legal theory, whether tort (including
- negligence), contract, or otherwise, shall any Contributor, or anyone who
- distributes Covered Software as permitted above, be liable to You for any
- direct, indirect, special, incidental, or consequential damages of any
- character including, without limitation, damages for lost profits, loss of
- goodwill, work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses, even if such party shall have been
- informed of the possibility of such damages. This limitation of liability
- shall not apply to liability for death or personal injury resulting from such
- party’s negligence to the extent applicable law prohibits such limitation.
- Some jurisdictions do not allow the exclusion or limitation of incidental or
- consequential damages, so this exclusion and limitation may not apply to You.
-
-8. Litigation
-
- Any litigation relating to this License may be brought only in the courts of
- a jurisdiction where the defendant maintains its principal place of business
- and such litigation shall be governed by laws of that jurisdiction, without
- reference to its conflict-of-law provisions. Nothing in this Section shall
- prevent a party’s ability to bring cross-claims or counter-claims.
-
-9. Miscellaneous
-
- This License represents the complete agreement concerning the subject matter
- hereof. If any provision of this License is held to be unenforceable, such
- provision shall be reformed only to the extent necessary to make it
- enforceable. Any law or regulation which provides that the language of a
- contract shall be construed against the drafter shall not be used to construe
- this License against a Contributor.
-
-
-10. Versions of the License
-
-10.1. New Versions
-
- Mozilla Foundation is the license steward. Except as provided in Section
- 10.3, no one other than the license steward has the right to modify or
- publish new versions of this License. Each version will be given a
- distinguishing version number.
-
-10.2. Effect of New Versions
-
- You may distribute the Covered Software under the terms of the version of
- the License under which You originally received the Covered Software, or
- under the terms of any subsequent version published by the license
- steward.
-
-10.3. Modified Versions
-
- If you create software not governed by this License, and you want to
- create a new license for such software, you may create and use a modified
- version of this License if you rename the license and remove any
- references to the name of the license steward (except to note that such
- modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
- If You choose to distribute Source Code Form that is Incompatible With
- Secondary Licenses under the terms of this version of the License, the
- notice described in Exhibit B of this License must be attached.
-
-Exhibit A - Source Code Form License Notice
-
- This Source Code Form is subject to the
- terms of the Mozilla Public License, v.
- 2.0. If a copy of the MPL was not
- distributed with this file, You can
- obtain one at
- http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular file, then
-You may include the notice in a location (such as a LICENSE file in a relevant
-directory) where a recipient would be likely to look for such a notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - “Incompatible With Secondary Licenses” Notice
-
- This Source Code Form is “Incompatible
- With Secondary Licenses”, as defined by
- the Mozilla Public License, v. 2.0.
-
diff --git a/vendor/github.com/hashicorp/go-checkpoint/README.md b/vendor/github.com/hashicorp/go-checkpoint/README.md
deleted file mode 100644
index e717b6ad..00000000
--- a/vendor/github.com/hashicorp/go-checkpoint/README.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# Go Checkpoint Client
-
-[Checkpoint](http://checkpoint.hashicorp.com) is an internal service at
-Hashicorp that we use to check version information, broadcast security
-bulletins, etc.
-
-We understand that software making remote calls over the internet
-for any reason can be undesirable. Because of this, Checkpoint can be
-disabled in all of our software that includes it. You can view the source
-of this client to see that we're not sending any private information.
-
-Each Hashicorp application has it's specific configuration option
-to disable checkpoint calls, but the `CHECKPOINT_DISABLE` makes
-the underlying checkpoint component itself disabled. For example
-in the case of packer:
-```
-CHECKPOINT_DISABLE=1 packer build
-```
-
-**Note:** This repository is probably useless outside of internal HashiCorp
-use. It is open source for disclosure and because our open source projects
-must be able to link to it.
diff --git a/vendor/github.com/hashicorp/go-checkpoint/check.go b/vendor/github.com/hashicorp/go-checkpoint/check.go
deleted file mode 100644
index 109d0d35..00000000
--- a/vendor/github.com/hashicorp/go-checkpoint/check.go
+++ /dev/null
@@ -1,368 +0,0 @@
-package checkpoint
-
-import (
- crand "crypto/rand"
- "encoding/binary"
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- mrand "math/rand"
- "net/http"
- "net/url"
- "os"
- "path/filepath"
- "reflect"
- "runtime"
- "strconv"
- "strings"
- "time"
-
- "github.com/hashicorp/go-cleanhttp"
-)
-
-var magicBytes = [4]byte{0x35, 0x77, 0x69, 0xFB}
-
-// CheckParams are the parameters for configuring a check request.
-type CheckParams struct {
- // Product and version are used to lookup the correct product and
- // alerts for the proper version. The version is also used to perform
- // a version check.
- Product string
- Version string
-
- // Arch and OS are used to filter alerts potentially only to things
- // affecting a specific os/arch combination. If these aren't specified,
- // they'll be automatically filled in.
- Arch string
- OS string
-
- // Signature is some random signature that should be stored and used
- // as a cookie-like value. This ensures that alerts aren't repeated.
- // If the signature is changed, repeat alerts may be sent down. The
- // signature should NOT be anything identifiable to a user (such as
- // a MAC address). It should be random.
- //
- // If SignatureFile is given, then the signature will be read from this
- // file. If the file doesn't exist, then a random signature will
- // automatically be generated and stored here. SignatureFile will be
- // ignored if Signature is given.
- Signature string
- SignatureFile string
-
- // CacheFile, if specified, will cache the result of a check. The
- // duration of the cache is specified by CacheDuration, and defaults
- // to 48 hours if not specified. If the CacheFile is newer than the
- // CacheDuration, than the Check will short-circuit and use those
- // results.
- //
- // If the CacheFile directory doesn't exist, it will be created with
- // permissions 0755.
- CacheFile string
- CacheDuration time.Duration
-
- // Force, if true, will force the check even if CHECKPOINT_DISABLE
- // is set. Within HashiCorp products, this is ONLY USED when the user
- // specifically requests it. This is never automatically done without
- // the user's consent.
- Force bool
-}
-
-// CheckResponse is the response for a check request.
-type CheckResponse struct {
- Product string `json:"product"`
- CurrentVersion string `json:"current_version"`
- CurrentReleaseDate int `json:"current_release_date"`
- CurrentDownloadURL string `json:"current_download_url"`
- CurrentChangelogURL string `json:"current_changelog_url"`
- ProjectWebsite string `json:"project_website"`
- Outdated bool `json:"outdated"`
- Alerts []*CheckAlert `json:"alerts"`
-}
-
-// CheckAlert is a single alert message from a check request.
-//
-// These never have to be manually constructed, and are typically populated
-// into a CheckResponse as a result of the Check request.
-type CheckAlert struct {
- ID int `json:"id"`
- Date int `json:"date"`
- Message string `json:"message"`
- URL string `json:"url"`
- Level string `json:"level"`
-}
-
-// Check checks for alerts and new version information.
-func Check(p *CheckParams) (*CheckResponse, error) {
- if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" && !p.Force {
- return &CheckResponse{}, nil
- }
-
- // Set a default timeout of 3 sec for the check request (in milliseconds)
- timeout := 3000
- if _, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil {
- timeout, _ = strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT"))
- }
-
- // If we have a cached result, then use that
- if r, err := checkCache(p.Version, p.CacheFile, p.CacheDuration); err != nil {
- return nil, err
- } else if r != nil {
- defer r.Close()
- return checkResult(r)
- }
-
- var u url.URL
-
- if p.Arch == "" {
- p.Arch = runtime.GOARCH
- }
- if p.OS == "" {
- p.OS = runtime.GOOS
- }
-
- // If we're given a SignatureFile, then attempt to read that.
- signature := p.Signature
- if p.Signature == "" && p.SignatureFile != "" {
- var err error
- signature, err = checkSignature(p.SignatureFile)
- if err != nil {
- return nil, err
- }
- }
-
- v := u.Query()
- v.Set("version", p.Version)
- v.Set("arch", p.Arch)
- v.Set("os", p.OS)
- v.Set("signature", signature)
-
- u.Scheme = "https"
- u.Host = "checkpoint-api.hashicorp.com"
- u.Path = fmt.Sprintf("/v1/check/%s", p.Product)
- u.RawQuery = v.Encode()
-
- req, err := http.NewRequest("GET", u.String(), nil)
- if err != nil {
- return nil, err
- }
- req.Header.Set("Accept", "application/json")
- req.Header.Set("User-Agent", "HashiCorp/go-checkpoint")
-
- client := cleanhttp.DefaultClient()
-
- // We use a short timeout since checking for new versions is not critical
- // enough to block on if checkpoint is broken/slow.
- client.Timeout = time.Duration(timeout) * time.Millisecond
-
- resp, err := client.Do(req)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
-
- if resp.StatusCode != 200 {
- return nil, fmt.Errorf("Unknown status: %d", resp.StatusCode)
- }
-
- var r io.Reader = resp.Body
- if p.CacheFile != "" {
- // Make sure the directory holding our cache exists.
- if err := os.MkdirAll(filepath.Dir(p.CacheFile), 0755); err != nil {
- return nil, err
- }
-
- // We have to cache the result, so write the response to the
- // file as we read it.
- f, err := os.Create(p.CacheFile)
- if err != nil {
- return nil, err
- }
-
- // Write the cache header
- if err := writeCacheHeader(f, p.Version); err != nil {
- f.Close()
- os.Remove(p.CacheFile)
- return nil, err
- }
-
- defer f.Close()
- r = io.TeeReader(r, f)
- }
-
- return checkResult(r)
-}
-
-// CheckInterval is used to check for a response on a given interval duration.
-// The interval is not exact, and checks are randomized to prevent a thundering
-// herd. However, it is expected that on average one check is performed per
-// interval. The returned channel may be closed to stop background checks.
-func CheckInterval(p *CheckParams, interval time.Duration, cb func(*CheckResponse, error)) chan struct{} {
- doneCh := make(chan struct{})
-
- if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" {
- return doneCh
- }
-
- go func() {
- for {
- select {
- case <-time.After(randomStagger(interval)):
- resp, err := Check(p)
- cb(resp, err)
- case <-doneCh:
- return
- }
- }
- }()
-
- return doneCh
-}
-
-// randomStagger returns an interval that is between 3/4 and 5/4 of
-// the given interval. The expected value is the interval.
-func randomStagger(interval time.Duration) time.Duration {
- stagger := time.Duration(mrand.Int63()) % (interval / 2)
- return 3*(interval/4) + stagger
-}
-
-func checkCache(current string, path string, d time.Duration) (io.ReadCloser, error) {
- fi, err := os.Stat(path)
- if err != nil {
- if os.IsNotExist(err) {
- // File doesn't exist, not a problem
- return nil, nil
- }
-
- return nil, err
- }
-
- if d == 0 {
- d = 48 * time.Hour
- }
-
- if fi.ModTime().Add(d).Before(time.Now()) {
- // Cache is busted, delete the old file and re-request. We ignore
- // errors here because re-creating the file is fine too.
- os.Remove(path)
- return nil, nil
- }
-
- // File looks good so far, open it up so we can inspect the contents.
- f, err := os.Open(path)
- if err != nil {
- return nil, err
- }
-
- // Check the signature of the file
- var sig [4]byte
- if err := binary.Read(f, binary.LittleEndian, sig[:]); err != nil {
- f.Close()
- return nil, err
- }
- if !reflect.DeepEqual(sig, magicBytes) {
- // Signatures don't match. Reset.
- f.Close()
- return nil, nil
- }
-
- // Check the version. If it changed, then rewrite
- var length uint32
- if err := binary.Read(f, binary.LittleEndian, &length); err != nil {
- f.Close()
- return nil, err
- }
- data := make([]byte, length)
- if _, err := io.ReadFull(f, data); err != nil {
- f.Close()
- return nil, err
- }
- if string(data) != current {
- // Version changed, reset
- f.Close()
- return nil, nil
- }
-
- return f, nil
-}
-func checkResult(r io.Reader) (*CheckResponse, error) {
- var result CheckResponse
- if err := json.NewDecoder(r).Decode(&result); err != nil {
- return nil, err
- }
- return &result, nil
-}
-
-func checkSignature(path string) (string, error) {
- _, err := os.Stat(path)
- if err == nil {
- // The file exists, read it out
- sigBytes, err := ioutil.ReadFile(path)
- if err != nil {
- return "", err
- }
-
- // Split the file into lines
- lines := strings.SplitN(string(sigBytes), "\n", 2)
- if len(lines) > 0 {
- return strings.TrimSpace(lines[0]), nil
- }
- }
-
- // If this isn't a non-exist error, then return that.
- if !os.IsNotExist(err) {
- return "", err
- }
-
- // The file doesn't exist, so create a signature.
- var b [16]byte
- n := 0
- for n < 16 {
- n2, err := crand.Read(b[n:])
- if err != nil {
- return "", err
- }
-
- n += n2
- }
- signature := fmt.Sprintf(
- "%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
-
- // Make sure the directory holding our signature exists.
- if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
- return "", err
- }
-
- // Write the signature
- if err := ioutil.WriteFile(path, []byte(signature+"\n\n"+userMessage+"\n"), 0644); err != nil {
- return "", err
- }
-
- return signature, nil
-}
-
-func writeCacheHeader(f io.Writer, v string) error {
- // Write our signature first
- if err := binary.Write(f, binary.LittleEndian, magicBytes); err != nil {
- return err
- }
-
- // Write out our current version length
- length := uint32(len(v))
- if err := binary.Write(f, binary.LittleEndian, length); err != nil {
- return err
- }
-
- _, err := f.Write([]byte(v))
- return err
-}
-
-// userMessage is suffixed to the signature file to provide feedback.
-var userMessage = `
-This signature is a randomly generated UUID used to de-duplicate
-alerts and version information. This signature is random, it is
-not based on any personally identifiable information. To create
-a new signature, you can simply delete this file at any time.
-See the documentation for the software using Checkpoint for more
-information on how to disable it.
-`
diff --git a/vendor/github.com/hashicorp/go-checkpoint/telemetry.go b/vendor/github.com/hashicorp/go-checkpoint/telemetry.go
deleted file mode 100644
index b9ee6298..00000000
--- a/vendor/github.com/hashicorp/go-checkpoint/telemetry.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package checkpoint
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "net/http"
- "net/url"
- "os"
- "runtime"
- "time"
-
- "github.com/hashicorp/go-cleanhttp"
- uuid "github.com/hashicorp/go-uuid"
-)
-
-// ReportParams are the parameters for configuring a telemetry report.
-type ReportParams struct {
- // Signature is some random signature that should be stored and used
- // as a cookie-like value. This ensures that alerts aren't repeated.
- // If the signature is changed, repeat alerts may be sent down. The
- // signature should NOT be anything identifiable to a user (such as
- // a MAC address). It should be random.
- //
- // If SignatureFile is given, then the signature will be read from this
- // file. If the file doesn't exist, then a random signature will
- // automatically be generated and stored here. SignatureFile will be
- // ignored if Signature is given.
- Signature string `json:"signature"`
- SignatureFile string `json:"-"`
-
- StartTime time.Time `json:"start_time"`
- EndTime time.Time `json:"end_time"`
- Arch string `json:"arch"`
- OS string `json:"os"`
- Payload interface{} `json:"payload,omitempty"`
- Product string `json:"product"`
- RunID string `json:"run_id"`
- SchemaVersion string `json:"schema_version"`
- Version string `json:"version"`
-}
-
-func (i *ReportParams) signature() string {
- signature := i.Signature
- if i.Signature == "" && i.SignatureFile != "" {
- var err error
- signature, err = checkSignature(i.SignatureFile)
- if err != nil {
- return ""
- }
- }
- return signature
-}
-
-// Report sends telemetry information to checkpoint
-func Report(ctx context.Context, r *ReportParams) error {
- if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" {
- return nil
- }
-
- req, err := ReportRequest(r)
- if err != nil {
- return err
- }
-
- client := cleanhttp.DefaultClient()
- resp, err := client.Do(req.WithContext(ctx))
- if err != nil {
- return err
- }
- if resp.StatusCode != 201 {
- return fmt.Errorf("Unknown status: %d", resp.StatusCode)
- }
-
- return nil
-}
-
-// ReportRequest creates a request object for making a report
-func ReportRequest(r *ReportParams) (*http.Request, error) {
- // Populate some fields automatically if we can
- if r.RunID == "" {
- uuid, err := uuid.GenerateUUID()
- if err != nil {
- return nil, err
- }
- r.RunID = uuid
- }
- if r.Arch == "" {
- r.Arch = runtime.GOARCH
- }
- if r.OS == "" {
- r.OS = runtime.GOOS
- }
- if r.Signature == "" {
- r.Signature = r.signature()
- }
-
- b, err := json.Marshal(r)
- if err != nil {
- return nil, err
- }
-
- u := &url.URL{
- Scheme: "https",
- Host: "checkpoint-api.hashicorp.com",
- Path: fmt.Sprintf("/v1/telemetry/%s", r.Product),
- }
-
- req, err := http.NewRequest("POST", u.String(), bytes.NewReader(b))
- if err != nil {
- return nil, err
- }
- req.Header.Set("Accept", "application/json")
- req.Header.Set("User-Agent", "HashiCorp/go-checkpoint")
-
- return req, nil
-}
diff --git a/vendor/github.com/hashicorp/go-checkpoint/versions.go b/vendor/github.com/hashicorp/go-checkpoint/versions.go
deleted file mode 100644
index a5b0d3b3..00000000
--- a/vendor/github.com/hashicorp/go-checkpoint/versions.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package checkpoint
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "net/url"
- "os"
- "strconv"
- "time"
-
- "github.com/hashicorp/go-cleanhttp"
-)
-
-// VersionsParams are the parameters for a versions request.
-type VersionsParams struct {
- // Service is used to lookup the correct service.
- Service string
-
- // Product is used to filter the version contraints.
- Product string
-
- // Force, if true, will force the check even if CHECKPOINT_DISABLE
- // is set. Within HashiCorp products, this is ONLY USED when the user
- // specifically requests it. This is never automatically done without
- // the user's consent.
- Force bool
-}
-
-// VersionsResponse is the response for a versions request.
-type VersionsResponse struct {
- Service string `json:"service"`
- Product string `json:"product"`
- Minimum string `json:"minimum"`
- Maximum string `json:"maximum"`
- Excluding []string `json:"excluding"`
-}
-
-// Versions returns the version constrains for a given service and product.
-func Versions(p *VersionsParams) (*VersionsResponse, error) {
- if disabled := os.Getenv("CHECKPOINT_DISABLE"); disabled != "" && !p.Force {
- return &VersionsResponse{}, nil
- }
-
- // Set a default timeout of 1 sec for the versions request (in milliseconds)
- timeout := 1000
- if _, err := strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT")); err == nil {
- timeout, _ = strconv.Atoi(os.Getenv("CHECKPOINT_TIMEOUT"))
- }
-
- v := url.Values{}
- v.Set("product", p.Product)
-
- u := &url.URL{
- Scheme: "https",
- Host: "checkpoint-api.hashicorp.com",
- Path: fmt.Sprintf("/v1/versions/%s", p.Service),
- RawQuery: v.Encode(),
- }
-
- req, err := http.NewRequest("GET", u.String(), nil)
- if err != nil {
- return nil, err
- }
- req.Header.Set("Accept", "application/json")
- req.Header.Set("User-Agent", "HashiCorp/go-checkpoint")
-
- client := cleanhttp.DefaultClient()
-
- // We use a short timeout since checking for new versions is not critical
- // enough to block on if checkpoint is broken/slow.
- client.Timeout = time.Duration(timeout) * time.Millisecond
-
- resp, err := client.Do(req)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
-
- if resp.StatusCode != 200 {
- return nil, fmt.Errorf("Unknown status: %d", resp.StatusCode)
- }
-
- result := &VersionsResponse{}
- if err := json.NewDecoder(resp.Body).Decode(result); err != nil {
- return nil, err
- }
-
- return result, nil
-}
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE
deleted file mode 100644
index e87a115e..00000000
--- a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE
+++ /dev/null
@@ -1,363 +0,0 @@
-Mozilla Public License, version 2.0
-
-1. Definitions
-
-1.1. "Contributor"
-
- means each individual or legal entity that creates, contributes to the
- creation of, or owns Covered Software.
-
-1.2. "Contributor Version"
-
- means the combination of the Contributions of others (if any) used by a
- Contributor and that particular Contributor's Contribution.
-
-1.3. "Contribution"
-
- means Covered Software of a particular Contributor.
-
-1.4. "Covered Software"
-
- means Source Code Form to which the initial Contributor has attached the
- notice in Exhibit A, the Executable Form of such Source Code Form, and
- Modifications of such Source Code Form, in each case including portions
- thereof.
-
-1.5. "Incompatible With Secondary Licenses"
- means
-
- a. that the initial Contributor has attached the notice described in
- Exhibit B to the Covered Software; or
-
- b. that the Covered Software was made available under the terms of
- version 1.1 or earlier of the License, but not also under the terms of
- a Secondary License.
-
-1.6. "Executable Form"
-
- means any form of the work other than Source Code Form.
-
-1.7. "Larger Work"
-
- means a work that combines Covered Software with other material, in a
- separate file or files, that is not Covered Software.
-
-1.8. "License"
-
- means this document.
-
-1.9. "Licensable"
-
- means having the right to grant, to the maximum extent possible, whether
- at the time of the initial grant or subsequently, any and all of the
- rights conveyed by this License.
-
-1.10. "Modifications"
-
- means any of the following:
-
- a. any file in Source Code Form that results from an addition to,
- deletion from, or modification of the contents of Covered Software; or
-
- b. any new file in Source Code Form that contains any Covered Software.
-
-1.11. "Patent Claims" of a Contributor
-
- means any patent claim(s), including without limitation, method,
- process, and apparatus claims, in any patent Licensable by such
- Contributor that would be infringed, but for the grant of the License,
- by the making, using, selling, offering for sale, having made, import,
- or transfer of either its Contributions or its Contributor Version.
-
-1.12. "Secondary License"
-
- means either the GNU General Public License, Version 2.0, the GNU Lesser
- General Public License, Version 2.1, the GNU Affero General Public
- License, Version 3.0, or any later versions of those licenses.
-
-1.13. "Source Code Form"
-
- means the form of the work preferred for making modifications.
-
-1.14. "You" (or "Your")
-
- means an individual or a legal entity exercising rights under this
- License. For legal entities, "You" includes any entity that controls, is
- controlled by, or is under common control with You. For purposes of this
- definition, "control" means (a) the power, direct or indirect, to cause
- the direction or management of such entity, whether by contract or
- otherwise, or (b) ownership of more than fifty percent (50%) of the
- outstanding shares or beneficial ownership of such entity.
-
-
-2. License Grants and Conditions
-
-2.1. Grants
-
- Each Contributor hereby grants You a world-wide, royalty-free,
- non-exclusive license:
-
- a. under intellectual property rights (other than patent or trademark)
- Licensable by such Contributor to use, reproduce, make available,
- modify, display, perform, distribute, and otherwise exploit its
- Contributions, either on an unmodified basis, with Modifications, or
- as part of a Larger Work; and
-
- b. under Patent Claims of such Contributor to make, use, sell, offer for
- sale, have made, import, and otherwise transfer either its
- Contributions or its Contributor Version.
-
-2.2. Effective Date
-
- The licenses granted in Section 2.1 with respect to any Contribution
- become effective for each Contribution on the date the Contributor first
- distributes such Contribution.
-
-2.3. Limitations on Grant Scope
-
- The licenses granted in this Section 2 are the only rights granted under
- this License. No additional rights or licenses will be implied from the
- distribution or licensing of Covered Software under this License.
- Notwithstanding Section 2.1(b) above, no patent license is granted by a
- Contributor:
-
- a. for any code that a Contributor has removed from Covered Software; or
-
- b. for infringements caused by: (i) Your and any other third party's
- modifications of Covered Software, or (ii) the combination of its
- Contributions with other software (except as part of its Contributor
- Version); or
-
- c. under Patent Claims infringed by Covered Software in the absence of
- its Contributions.
-
- This License does not grant any rights in the trademarks, service marks,
- or logos of any Contributor (except as may be necessary to comply with
- the notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
- No Contributor makes additional grants as a result of Your choice to
- distribute the Covered Software under a subsequent version of this
- License (see Section 10.2) or under the terms of a Secondary License (if
- permitted under the terms of Section 3.3).
-
-2.5. Representation
-
- Each Contributor represents that the Contributor believes its
- Contributions are its original creation(s) or it has sufficient rights to
- grant the rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
- This License is not intended to limit any rights You have under
- applicable copyright doctrines of fair use, fair dealing, or other
- equivalents.
-
-2.7. Conditions
-
- Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
- Section 2.1.
-
-
-3. Responsibilities
-
-3.1. Distribution of Source Form
-
- All distribution of Covered Software in Source Code Form, including any
- Modifications that You create or to which You contribute, must be under
- the terms of this License. You must inform recipients that the Source
- Code Form of the Covered Software is governed by the terms of this
- License, and how they can obtain a copy of this License. You may not
- attempt to alter or restrict the recipients' rights in the Source Code
- Form.
-
-3.2. Distribution of Executable Form
-
- If You distribute Covered Software in Executable Form then:
-
- a. such Covered Software must also be made available in Source Code Form,
- as described in Section 3.1, and You must inform recipients of the
- Executable Form how they can obtain a copy of such Source Code Form by
- reasonable means in a timely manner, at a charge no more than the cost
- of distribution to the recipient; and
-
- b. You may distribute such Executable Form under the terms of this
- License, or sublicense it under different terms, provided that the
- license for the Executable Form does not attempt to limit or alter the
- recipients' rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
- You may create and distribute a Larger Work under terms of Your choice,
- provided that You also comply with the requirements of this License for
- the Covered Software. If the Larger Work is a combination of Covered
- Software with a work governed by one or more Secondary Licenses, and the
- Covered Software is not Incompatible With Secondary Licenses, this
- License permits You to additionally distribute such Covered Software
- under the terms of such Secondary License(s), so that the recipient of
- the Larger Work may, at their option, further distribute the Covered
- Software under the terms of either this License or such Secondary
- License(s).
-
-3.4. Notices
-
- You may not remove or alter the substance of any license notices
- (including copyright notices, patent notices, disclaimers of warranty, or
- limitations of liability) contained within the Source Code Form of the
- Covered Software, except that You may alter any license notices to the
- extent required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
- You may choose to offer, and to charge a fee for, warranty, support,
- indemnity or liability obligations to one or more recipients of Covered
- Software. However, You may do so only on Your own behalf, and not on
- behalf of any Contributor. You must make it absolutely clear that any
- such warranty, support, indemnity, or liability obligation is offered by
- You alone, and You hereby agree to indemnify every Contributor for any
- liability incurred by such Contributor as a result of warranty, support,
- indemnity or liability terms You offer. You may include additional
- disclaimers of warranty and limitations of liability specific to any
- jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
-
- If it is impossible for You to comply with any of the terms of this License
- with respect to some or all of the Covered Software due to statute,
- judicial order, or regulation then You must: (a) comply with the terms of
- this License to the maximum extent possible; and (b) describe the
- limitations and the code they affect. Such description must be placed in a
- text file included with all distributions of the Covered Software under
- this License. Except to the extent prohibited by statute or regulation,
- such description must be sufficiently detailed for a recipient of ordinary
- skill to be able to understand it.
-
-5. Termination
-
-5.1. The rights granted under this License will terminate automatically if You
- fail to comply with any of its terms. However, if You become compliant,
- then the rights granted under this License from a particular Contributor
- are reinstated (a) provisionally, unless and until such Contributor
- explicitly and finally terminates Your grants, and (b) on an ongoing
- basis, if such Contributor fails to notify You of the non-compliance by
- some reasonable means prior to 60 days after You have come back into
- compliance. Moreover, Your grants from a particular Contributor are
- reinstated on an ongoing basis if such Contributor notifies You of the
- non-compliance by some reasonable means, this is the first time You have
- received notice of non-compliance with this License from such
- Contributor, and You become compliant prior to 30 days after Your receipt
- of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
- infringement claim (excluding declaratory judgment actions,
- counter-claims, and cross-claims) alleging that a Contributor Version
- directly or indirectly infringes any patent, then the rights granted to
- You by any and all Contributors for the Covered Software under Section
- 2.1 of this License shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
- license agreements (excluding distributors and resellers) which have been
- validly granted by You or Your distributors under this License prior to
- termination shall survive termination.
-
-6. Disclaimer of Warranty
-
- Covered Software is provided under this License on an "as is" basis,
- without warranty of any kind, either expressed, implied, or statutory,
- including, without limitation, warranties that the Covered Software is free
- of defects, merchantable, fit for a particular purpose or non-infringing.
- The entire risk as to the quality and performance of the Covered Software
- is with You. Should any Covered Software prove defective in any respect,
- You (not any Contributor) assume the cost of any necessary servicing,
- repair, or correction. This disclaimer of warranty constitutes an essential
- part of this License. No use of any Covered Software is authorized under
- this License except under this disclaimer.
-
-7. Limitation of Liability
-
- Under no circumstances and under no legal theory, whether tort (including
- negligence), contract, or otherwise, shall any Contributor, or anyone who
- distributes Covered Software as permitted above, be liable to You for any
- direct, indirect, special, incidental, or consequential damages of any
- character including, without limitation, damages for lost profits, loss of
- goodwill, work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses, even if such party shall have been
- informed of the possibility of such damages. This limitation of liability
- shall not apply to liability for death or personal injury resulting from
- such party's negligence to the extent applicable law prohibits such
- limitation. Some jurisdictions do not allow the exclusion or limitation of
- incidental or consequential damages, so this exclusion and limitation may
- not apply to You.
-
-8. Litigation
-
- Any litigation relating to this License may be brought only in the courts
- of a jurisdiction where the defendant maintains its principal place of
- business and such litigation shall be governed by laws of that
- jurisdiction, without reference to its conflict-of-law provisions. Nothing
- in this Section shall prevent a party's ability to bring cross-claims or
- counter-claims.
-
-9. Miscellaneous
-
- This License represents the complete agreement concerning the subject
- matter hereof. If any provision of this License is held to be
- unenforceable, such provision shall be reformed only to the extent
- necessary to make it enforceable. Any law or regulation which provides that
- the language of a contract shall be construed against the drafter shall not
- be used to construe this License against a Contributor.
-
-
-10. Versions of the License
-
-10.1. New Versions
-
- Mozilla Foundation is the license steward. Except as provided in Section
- 10.3, no one other than the license steward has the right to modify or
- publish new versions of this License. Each version will be given a
- distinguishing version number.
-
-10.2. Effect of New Versions
-
- You may distribute the Covered Software under the terms of the version
- of the License under which You originally received the Covered Software,
- or under the terms of any subsequent version published by the license
- steward.
-
-10.3. Modified Versions
-
- If you create software not governed by this License, and you want to
- create a new license for such software, you may create and use a
- modified version of this License if you rename the license and remove
- any references to the name of the license steward (except to note that
- such modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary
- Licenses If You choose to distribute Source Code Form that is
- Incompatible With Secondary Licenses under the terms of this version of
- the License, the notice described in Exhibit B of this License must be
- attached.
-
-Exhibit A - Source Code Form License Notice
-
- This Source Code Form is subject to the
- terms of the Mozilla Public License, v.
- 2.0. If a copy of the MPL was not
- distributed with this file, You can
- obtain one at
- http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular file,
-then You may include the notice in a location (such as a LICENSE file in a
-relevant directory) where a recipient would be likely to look for such a
-notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - "Incompatible With Secondary Licenses" Notice
-
- This Source Code Form is "Incompatible
- With Secondary Licenses", as defined by
- the Mozilla Public License, v. 2.0.
-
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md
deleted file mode 100644
index 036e5313..00000000
--- a/vendor/github.com/hashicorp/go-cleanhttp/README.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# cleanhttp
-
-Functions for accessing "clean" Go http.Client values
-
--------------
-
-The Go standard library contains a default `http.Client` called
-`http.DefaultClient`. It is a common idiom in Go code to start with
-`http.DefaultClient` and tweak it as necessary, and in fact, this is
-encouraged; from the `http` package documentation:
-
-> The Client's Transport typically has internal state (cached TCP connections),
-so Clients should be reused instead of created as needed. Clients are safe for
-concurrent use by multiple goroutines.
-
-Unfortunately, this is a shared value, and it is not uncommon for libraries to
-assume that they are free to modify it at will. With enough dependencies, it
-can be very easy to encounter strange problems and race conditions due to
-manipulation of this shared value across libraries and goroutines (clients are
-safe for concurrent use, but writing values to the client struct itself is not
-protected).
-
-Making things worse is the fact that a bare `http.Client` will use a default
-`http.Transport` called `http.DefaultTransport`, which is another global value
-that behaves the same way. So it is not simply enough to replace
-`http.DefaultClient` with `&http.Client{}`.
-
-This repository provides some simple functions to get a "clean" `http.Client`
--- one that uses the same default values as the Go standard library, but
-returns a client that does not share any state with other clients.
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
deleted file mode 100644
index fe28d15b..00000000
--- a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package cleanhttp
-
-import (
- "net"
- "net/http"
- "runtime"
- "time"
-)
-
-// DefaultTransport returns a new http.Transport with similar default values to
-// http.DefaultTransport, but with idle connections and keepalives disabled.
-func DefaultTransport() *http.Transport {
- transport := DefaultPooledTransport()
- transport.DisableKeepAlives = true
- transport.MaxIdleConnsPerHost = -1
- return transport
-}
-
-// DefaultPooledTransport returns a new http.Transport with similar default
-// values to http.DefaultTransport. Do not use this for transient transports as
-// it can leak file descriptors over time. Only use this for transports that
-// will be re-used for the same host(s).
-func DefaultPooledTransport() *http.Transport {
- transport := &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- DialContext: (&net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 30 * time.Second,
- DualStack: true,
- }).DialContext,
- MaxIdleConns: 100,
- IdleConnTimeout: 90 * time.Second,
- TLSHandshakeTimeout: 10 * time.Second,
- ExpectContinueTimeout: 1 * time.Second,
- ForceAttemptHTTP2: true,
- MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1,
- }
- return transport
-}
-
-// DefaultClient returns a new http.Client with similar default values to
-// http.Client, but with a non-shared Transport, idle connections disabled, and
-// keepalives disabled.
-func DefaultClient() *http.Client {
- return &http.Client{
- Transport: DefaultTransport(),
- }
-}
-
-// DefaultPooledClient returns a new http.Client with similar default values to
-// http.Client, but with a shared Transport. Do not use this function for
-// transient clients as it can leak file descriptors over time. Only use this
-// for clients that will be re-used for the same host(s).
-func DefaultPooledClient() *http.Client {
- return &http.Client{
- Transport: DefaultPooledTransport(),
- }
-}
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go
deleted file mode 100644
index 05841092..00000000
--- a/vendor/github.com/hashicorp/go-cleanhttp/doc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Package cleanhttp offers convenience utilities for acquiring "clean"
-// http.Transport and http.Client structs.
-//
-// Values set on http.DefaultClient and http.DefaultTransport affect all
-// callers. This can have detrimental effects, esepcially in TLS contexts,
-// where client or root certificates set to talk to multiple endpoints can end
-// up displacing each other, leading to hard-to-debug issues. This package
-// provides non-shared http.Client and http.Transport structs to ensure that
-// the configuration will not be overwritten by other parts of the application
-// or dependencies.
-//
-// The DefaultClient and DefaultTransport functions disable idle connections
-// and keepalives. Without ensuring that idle connections are closed before
-// garbage collection, short-term clients/transports can leak file descriptors,
-// eventually leading to "too many open files" errors. If you will be
-// connecting to the same hosts repeatedly from the same client, you can use
-// DefaultPooledClient to receive a client that has connection pooling
-// semantics similar to http.DefaultClient.
-//
-package cleanhttp
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go
deleted file mode 100644
index 3c845dc0..00000000
--- a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package cleanhttp
-
-import (
- "net/http"
- "strings"
- "unicode"
-)
-
-// HandlerInput provides input options to cleanhttp's handlers
-type HandlerInput struct {
- ErrStatus int
-}
-
-// PrintablePathCheckHandler is a middleware that ensures the request path
-// contains only printable runes.
-func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler {
- // Nil-check on input to make it optional
- if input == nil {
- input = &HandlerInput{
- ErrStatus: http.StatusBadRequest,
- }
- }
-
- // Default to http.StatusBadRequest on error
- if input.ErrStatus == 0 {
- input.ErrStatus = http.StatusBadRequest
- }
-
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if r != nil {
- // Check URL path for non-printable characters
- idx := strings.IndexFunc(r.URL.Path, func(c rune) bool {
- return !unicode.IsPrint(c)
- })
-
- if idx != -1 {
- w.WriteHeader(input.ErrStatus)
- return
- }
-
- if next != nil {
- next.ServeHTTP(w, r)
- }
- }
-
- return
- })
-}
diff --git a/vendor/github.com/hashicorp/go-cty/LICENSE b/vendor/github.com/hashicorp/go-cty/LICENSE
deleted file mode 100644
index d6503b55..00000000
--- a/vendor/github.com/hashicorp/go-cty/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2017-2018 Martin Atkins
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/hashicorp/go-cty/cty/capsule.go b/vendor/github.com/hashicorp/go-cty/cty/capsule.go
deleted file mode 100644
index 2fdc15ea..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/capsule.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package cty
-
-import (
- "fmt"
- "reflect"
-)
-
-type capsuleType struct {
- typeImplSigil
- Name string
- GoType reflect.Type
- Ops *CapsuleOps
-}
-
-func (t *capsuleType) Equals(other Type) bool {
- if otherP, ok := other.typeImpl.(*capsuleType); ok {
- // capsule types compare by pointer identity
- return otherP == t
- }
- return false
-}
-
-func (t *capsuleType) FriendlyName(mode friendlyTypeNameMode) string {
- return t.Name
-}
-
-func (t *capsuleType) GoString() string {
- impl := t.Ops.TypeGoString
- if impl == nil {
- // To get a useful representation of our native type requires some
- // shenanigans.
- victimVal := reflect.Zero(t.GoType)
- if t.Ops == noCapsuleOps {
- return fmt.Sprintf("cty.Capsule(%q, reflect.TypeOf(%#v))", t.Name, victimVal.Interface())
- } else {
- // Including the operations in the output will make this _very_ long,
- // so in practice any capsule type with ops ought to provide a
- // TypeGoString function to override this with something more
- // reasonable.
- return fmt.Sprintf("cty.CapsuleWithOps(%q, reflect.TypeOf(%#v), %#v)", t.Name, victimVal.Interface(), t.Ops)
- }
- }
- return impl(t.GoType)
-}
-
-// Capsule creates a new Capsule type.
-//
-// A Capsule type is a special type that can be used to transport arbitrary
-// Go native values of a given type through the cty type system. A language
-// that uses cty as its type system might, for example, provide functions
-// that return capsule-typed values and then other functions that operate
-// on those values.
-//
-// From cty's perspective, Capsule types have a few interesting characteristics,
-// described in the following paragraphs.
-//
-// Each capsule type has an associated Go native type that it is able to
-// transport. Capsule types compare by identity, so each call to the
-// Capsule function creates an entirely-distinct cty Type, even if two calls
-// use the same native type.
-//
-// Each capsule-typed value contains a pointer to a value of the given native
-// type. A capsule-typed value by default supports no operations except
-// equality, and equality is implemented by pointer identity of the
-// encapsulated pointer. A capsule type can optionally have its own
-// implementations of certain operations if it is created with CapsuleWithOps
-// instead of Capsule.
-//
-// The given name is used as the new type's "friendly name". This can be any
-// string in principle, but will usually be a short, all-lowercase name aimed
-// at users of the embedding language (i.e. not mention Go-specific details)
-// and will ideally not create ambiguity with any predefined cty type.
-//
-// Capsule types are never introduced by any standard cty operation, so a
-// calling application opts in to including them within its own type system
-// by creating them and introducing them via its own functions. At that point,
-// the application is responsible for dealing with any capsule-typed values
-// that might be returned.
-func Capsule(name string, nativeType reflect.Type) Type {
- return Type{
- &capsuleType{
- Name: name,
- GoType: nativeType,
- Ops: noCapsuleOps,
- },
- }
-}
-
-// CapsuleWithOps is like Capsule except the caller may provide an object
-// representing some overloaded operation implementations to associate with
-// the given capsule type.
-//
-// All of the other caveats and restrictions for capsule types still apply, but
-// overloaded operations can potentially help a capsule type participate better
-// in cty operations.
-func CapsuleWithOps(name string, nativeType reflect.Type, ops *CapsuleOps) Type {
- // Copy the operations to make sure the caller can't modify them after
- // we're constructed.
- ourOps := *ops
- ourOps.assertValid()
-
- return Type{
- &capsuleType{
- Name: name,
- GoType: nativeType,
- Ops: &ourOps,
- },
- }
-}
-
-// IsCapsuleType returns true if this type is a capsule type, as created
-// by cty.Capsule .
-func (t Type) IsCapsuleType() bool {
- _, ok := t.typeImpl.(*capsuleType)
- return ok
-}
-
-// EncapsulatedType returns the encapsulated native type of a capsule type,
-// or panics if the receiver is not a Capsule type.
-//
-// Is IsCapsuleType to determine if this method is safe to call.
-func (t Type) EncapsulatedType() reflect.Type {
- impl, ok := t.typeImpl.(*capsuleType)
- if !ok {
- panic("not a capsule type")
- }
- return impl.GoType
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/capsule_ops.go b/vendor/github.com/hashicorp/go-cty/cty/capsule_ops.go
deleted file mode 100644
index 3ff6855e..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/capsule_ops.go
+++ /dev/null
@@ -1,132 +0,0 @@
-package cty
-
-import (
- "reflect"
-)
-
-// CapsuleOps represents a set of overloaded operations for a capsule type.
-//
-// Each field is a reference to a function that can either be nil or can be
-// set to an implementation of the corresponding operation. If an operation
-// function is nil then it isn't supported for the given capsule type.
-type CapsuleOps struct {
- // GoString provides the GoString implementation for values of the
- // corresponding type. Conventionally this should return a string
- // representation of an expression that would produce an equivalent
- // value.
- GoString func(val interface{}) string
-
- // TypeGoString provides the GoString implementation for the corresponding
- // capsule type itself.
- TypeGoString func(goTy reflect.Type) string
-
- // Equals provides the implementation of the Equals operation. This is
- // called only with known, non-null values of the corresponding type,
- // but if the corresponding type is a compound type then it must be
- // ready to detect and handle nested unknown or null values, usually
- // by recursively calling Value.Equals on those nested values.
- //
- // The result value must always be of type cty.Bool, or the Equals
- // operation will panic.
- //
- // If RawEquals is set without also setting Equals, the RawEquals
- // implementation will be used as a fallback implementation. That fallback
- // is appropriate only for leaf types that do not contain any nested
- // cty.Value that would need to distinguish Equals vs. RawEquals for their
- // own equality.
- //
- // If RawEquals is nil then Equals must also be nil, selecting the default
- // pointer-identity comparison instead.
- Equals func(a, b interface{}) Value
-
- // RawEquals provides the implementation of the RawEquals operation.
- // This is called only with known, non-null values of the corresponding
- // type, but if the corresponding type is a compound type then it must be
- // ready to detect and handle nested unknown or null values, usually
- // by recursively calling Value.RawEquals on those nested values.
- //
- // If RawEquals is nil, values of the corresponding type are compared by
- // pointer identity of the encapsulated value.
- RawEquals func(a, b interface{}) bool
-
- // ConversionFrom can provide conversions from the corresponding type to
- // some other type when values of the corresponding type are used with
- // the "convert" package. (The main cty package does not use this operation.)
- //
- // This function itself returns a function, allowing it to switch its
- // behavior depending on the given source type. Return nil to indicate
- // that no such conversion is available.
- ConversionFrom func(src Type) func(interface{}, Path) (Value, error)
-
- // ConversionTo can provide conversions to the corresponding type from
- // some other type when values of the corresponding type are used with
- // the "convert" package. (The main cty package does not use this operation.)
- //
- // This function itself returns a function, allowing it to switch its
- // behavior depending on the given destination type. Return nil to indicate
- // that no such conversion is available.
- ConversionTo func(dst Type) func(Value, Path) (interface{}, error)
-
- // ExtensionData is an extension point for applications that wish to
- // create their own extension features using capsule types.
- //
- // The key argument is any value that can be compared with Go's ==
- // operator, but should be of a named type in a package belonging to the
- // application defining the key. An ExtensionData implementation must
- // check to see if the given key is familar to it, and if so return a
- // suitable value for the key.
- //
- // If the given key is unrecognized, the ExtensionData function must
- // return a nil interface. (Importantly, not an interface containing a nil
- // pointer of some other type.)
- // The common implementation of ExtensionData is a single switch statement
- // over "key" which has a default case returning nil.
- //
- // The meaning of any given key is entirely up to the application that
- // defines it. Applications consuming ExtensionData from capsule types
- // should do so defensively: if the result of ExtensionData is not valid,
- // prefer to ignore it or gracefully produce an error rather than causing
- // a panic.
- ExtensionData func(key interface{}) interface{}
-}
-
-// noCapsuleOps is a pointer to a CapsuleOps with no functions set, which
-// is used as the default operations value when a type is created using
-// the Capsule function.
-var noCapsuleOps = &CapsuleOps{}
-
-func (ops *CapsuleOps) assertValid() {
- if ops.RawEquals == nil && ops.Equals != nil {
- panic("Equals cannot be set without RawEquals")
- }
-}
-
-// CapsuleOps returns a pointer to the CapsuleOps value for a capsule type,
-// or panics if the receiver is not a capsule type.
-//
-// The caller must not modify the CapsuleOps.
-func (ty Type) CapsuleOps() *CapsuleOps {
- if !ty.IsCapsuleType() {
- panic("not a capsule-typed value")
- }
-
- return ty.typeImpl.(*capsuleType).Ops
-}
-
-// CapsuleExtensionData is a convenience interface to the ExtensionData
-// function that can be optionally implemented for a capsule type. It will
-// check to see if the underlying type implements ExtensionData and call it
-// if so. If not, it will return nil to indicate that the given key is not
-// supported.
-//
-// See the documentation for CapsuleOps.ExtensionData for more information
-// on the purpose of and usage of this mechanism.
-//
-// If CapsuleExtensionData is called on a non-capsule type then it will panic.
-func (ty Type) CapsuleExtensionData(key interface{}) interface{} {
- ops := ty.CapsuleOps()
- if ops.ExtensionData == nil {
- return nil
- }
- return ops.ExtensionData(key)
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/collection.go b/vendor/github.com/hashicorp/go-cty/cty/collection.go
deleted file mode 100644
index ab3919b1..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/collection.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package cty
-
-import (
- "errors"
-)
-
-type collectionTypeImpl interface {
- ElementType() Type
-}
-
-// IsCollectionType returns true if the given type supports the operations
-// that are defined for all collection types.
-func (t Type) IsCollectionType() bool {
- _, ok := t.typeImpl.(collectionTypeImpl)
- return ok
-}
-
-// ElementType returns the element type of the receiver if it is a collection
-// type, or panics if it is not. Use IsCollectionType first to test whether
-// this method will succeed.
-func (t Type) ElementType() Type {
- if ct, ok := t.typeImpl.(collectionTypeImpl); ok {
- return ct.ElementType()
- }
- panic(errors.New("not a collection type"))
-}
-
-// ElementCallback is a callback type used for iterating over elements of
-// collections and attributes of objects.
-//
-// The types of key and value depend on what type is being iterated over.
-// Return true to stop iterating after the current element, or false to
-// continue iterating.
-type ElementCallback func(key Value, val Value) (stop bool)
diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/compare_types.go b/vendor/github.com/hashicorp/go-cty/cty/convert/compare_types.go
deleted file mode 100644
index 6ad3bff4..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/convert/compare_types.go
+++ /dev/null
@@ -1,165 +0,0 @@
-package convert
-
-import (
- "github.com/hashicorp/go-cty/cty"
-)
-
-// compareTypes implements a preference order for unification.
-//
-// The result of this method is not useful for anything other than unification
-// preferences, since it assumes that the caller will verify that any suggested
-// conversion is actually possible and it is thus able to to make certain
-// optimistic assumptions.
-func compareTypes(a cty.Type, b cty.Type) int {
-
- // DynamicPseudoType always has lowest preference, because anything can
- // convert to it (it acts as a placeholder for "any type") and we want
- // to optimistically assume that any dynamics will converge on matching
- // their neighbors.
- if a == cty.DynamicPseudoType || b == cty.DynamicPseudoType {
- if a != cty.DynamicPseudoType {
- return -1
- }
- if b != cty.DynamicPseudoType {
- return 1
- }
- return 0
- }
-
- if a.IsPrimitiveType() && b.IsPrimitiveType() {
- // String is a supertype of all primitive types, because we can
- // represent all primitive values as specially-formatted strings.
- if a == cty.String || b == cty.String {
- if a != cty.String {
- return 1
- }
- if b != cty.String {
- return -1
- }
- return 0
- }
- }
-
- if a.IsListType() && b.IsListType() {
- return compareTypes(a.ElementType(), b.ElementType())
- }
- if a.IsSetType() && b.IsSetType() {
- return compareTypes(a.ElementType(), b.ElementType())
- }
- if a.IsMapType() && b.IsMapType() {
- return compareTypes(a.ElementType(), b.ElementType())
- }
-
- // From this point on we may have swapped the two items in order to
- // simplify our cases. Therefore any non-zero return after this point
- // must be multiplied by "swap" to potentially invert the return value
- // if needed.
- swap := 1
- switch {
- case a.IsTupleType() && b.IsListType():
- fallthrough
- case a.IsObjectType() && b.IsMapType():
- fallthrough
- case a.IsSetType() && b.IsTupleType():
- fallthrough
- case a.IsSetType() && b.IsListType():
- a, b = b, a
- swap = -1
- }
-
- if b.IsSetType() && (a.IsTupleType() || a.IsListType()) {
- // We'll just optimistically assume that the element types are
- // unifyable/convertible, and let a second recursive pass
- // figure out how to make that so.
- return -1 * swap
- }
-
- if a.IsListType() && b.IsTupleType() {
- // We'll just optimistically assume that the tuple's element types
- // can be unified into something compatible with the list's element
- // type.
- return -1 * swap
- }
-
- if a.IsMapType() && b.IsObjectType() {
- // We'll just optimistically assume that the object's attribute types
- // can be unified into something compatible with the map's element
- // type.
- return -1 * swap
- }
-
- // For object and tuple types, comparing two types doesn't really tell
- // the whole story because it may be possible to construct a new type C
- // that is the supertype of both A and B by unifying each attribute/element
- // separately. That possibility is handled by Unify as a follow-up if
- // type sorting is insufficient to produce a valid result.
- //
- // Here we will take care of the simple possibilities where no new type
- // is needed.
- if a.IsObjectType() && b.IsObjectType() {
- atysA := a.AttributeTypes()
- atysB := b.AttributeTypes()
-
- if len(atysA) != len(atysB) {
- return 0
- }
-
- hasASuper := false
- hasBSuper := false
- for k := range atysA {
- if _, has := atysB[k]; !has {
- return 0
- }
-
- cmp := compareTypes(atysA[k], atysB[k])
- if cmp < 0 {
- hasASuper = true
- } else if cmp > 0 {
- hasBSuper = true
- }
- }
-
- switch {
- case hasASuper && hasBSuper:
- return 0
- case hasASuper:
- return -1 * swap
- case hasBSuper:
- return 1 * swap
- default:
- return 0
- }
- }
- if a.IsTupleType() && b.IsTupleType() {
- etysA := a.TupleElementTypes()
- etysB := b.TupleElementTypes()
-
- if len(etysA) != len(etysB) {
- return 0
- }
-
- hasASuper := false
- hasBSuper := false
- for i := range etysA {
- cmp := compareTypes(etysA[i], etysB[i])
- if cmp < 0 {
- hasASuper = true
- } else if cmp > 0 {
- hasBSuper = true
- }
- }
-
- switch {
- case hasASuper && hasBSuper:
- return 0
- case hasASuper:
- return -1 * swap
- case hasBSuper:
- return 1 * swap
- default:
- return 0
- }
- }
-
- return 0
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion.go
deleted file mode 100644
index 9c59c8f7..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion.go
+++ /dev/null
@@ -1,190 +0,0 @@
-package convert
-
-import (
- "github.com/hashicorp/go-cty/cty"
-)
-
-// conversion is an internal variant of Conversion that carries around
-// a cty.Path to be used in error responses.
-type conversion func(cty.Value, cty.Path) (cty.Value, error)
-
-func getConversion(in cty.Type, out cty.Type, unsafe bool) conversion {
- conv := getConversionKnown(in, out, unsafe)
- if conv == nil {
- return nil
- }
-
- // Wrap the conversion in some standard checks that we don't want to
- // have to repeat in every conversion function.
- var ret conversion
- ret = func(in cty.Value, path cty.Path) (cty.Value, error) {
- if in.IsMarked() {
- // We must unmark during the conversion and then re-apply the
- // same marks to the result.
- in, inMarks := in.Unmark()
- v, err := ret(in, path)
- if v != cty.NilVal {
- v = v.WithMarks(inMarks)
- }
- return v, err
- }
-
- if out == cty.DynamicPseudoType {
- // Conversion to DynamicPseudoType always just passes through verbatim.
- return in, nil
- }
- if !in.IsKnown() {
- return cty.UnknownVal(out), nil
- }
- if in.IsNull() {
- // We'll pass through nulls, albeit type converted, and let
- // the caller deal with whatever handling they want to do in
- // case null values are considered valid in some applications.
- return cty.NullVal(out), nil
- }
-
- return conv(in, path)
- }
-
- return ret
-}
-
-func getConversionKnown(in cty.Type, out cty.Type, unsafe bool) conversion {
- switch {
-
- case out == cty.DynamicPseudoType:
- // Conversion *to* DynamicPseudoType means that the caller wishes
- // to allow any type in this position, so we'll produce a do-nothing
- // conversion that just passes through the value as-is.
- return dynamicPassthrough
-
- case unsafe && in == cty.DynamicPseudoType:
- // Conversion *from* DynamicPseudoType means that we have a value
- // whose type isn't yet known during type checking. For these we will
- // assume that conversion will succeed and deal with any errors that
- // result (which is why we can only do this when "unsafe" is set).
- return dynamicFixup(out)
-
- case in.IsPrimitiveType() && out.IsPrimitiveType():
- conv := primitiveConversionsSafe[in][out]
- if conv != nil {
- return conv
- }
- if unsafe {
- return primitiveConversionsUnsafe[in][out]
- }
- return nil
-
- case out.IsObjectType() && in.IsObjectType():
- return conversionObjectToObject(in, out, unsafe)
-
- case out.IsTupleType() && in.IsTupleType():
- return conversionTupleToTuple(in, out, unsafe)
-
- case out.IsListType() && (in.IsListType() || in.IsSetType()):
- inEty := in.ElementType()
- outEty := out.ElementType()
- if inEty.Equals(outEty) {
- // This indicates that we're converting from list to set with
- // the same element type, so we don't need an element converter.
- return conversionCollectionToList(outEty, nil)
- }
-
- convEty := getConversion(inEty, outEty, unsafe)
- if convEty == nil {
- return nil
- }
- return conversionCollectionToList(outEty, convEty)
-
- case out.IsSetType() && (in.IsListType() || in.IsSetType()):
- if in.IsListType() && !unsafe {
- // Conversion from list to map is unsafe because it will lose
- // information: the ordering will not be preserved, and any
- // duplicate elements will be conflated.
- return nil
- }
- inEty := in.ElementType()
- outEty := out.ElementType()
- convEty := getConversion(inEty, outEty, unsafe)
- if inEty.Equals(outEty) {
- // This indicates that we're converting from set to list with
- // the same element type, so we don't need an element converter.
- return conversionCollectionToSet(outEty, nil)
- }
-
- if convEty == nil {
- return nil
- }
- return conversionCollectionToSet(outEty, convEty)
-
- case out.IsMapType() && in.IsMapType():
- inEty := in.ElementType()
- outEty := out.ElementType()
- convEty := getConversion(inEty, outEty, unsafe)
- if convEty == nil {
- return nil
- }
- return conversionCollectionToMap(outEty, convEty)
-
- case out.IsListType() && in.IsTupleType():
- outEty := out.ElementType()
- return conversionTupleToList(in, outEty, unsafe)
-
- case out.IsSetType() && in.IsTupleType():
- outEty := out.ElementType()
- return conversionTupleToSet(in, outEty, unsafe)
-
- case out.IsMapType() && in.IsObjectType():
- outEty := out.ElementType()
- return conversionObjectToMap(in, outEty, unsafe)
-
- case out.IsObjectType() && in.IsMapType():
- if !unsafe {
- // Converting a map to an object is an "unsafe" conversion,
- // because we don't know if all the map keys will correspond to
- // object attributes.
- return nil
- }
- return conversionMapToObject(in, out, unsafe)
-
- case in.IsCapsuleType() || out.IsCapsuleType():
- if !unsafe {
- // Capsule types can only participate in "unsafe" conversions,
- // because we don't know enough about their conversion behaviors
- // to be sure that they will always be safe.
- return nil
- }
- if in.Equals(out) {
- // conversion to self is never allowed
- return nil
- }
- if out.IsCapsuleType() {
- if fn := out.CapsuleOps().ConversionTo; fn != nil {
- return conversionToCapsule(in, out, fn)
- }
- }
- if in.IsCapsuleType() {
- if fn := in.CapsuleOps().ConversionFrom; fn != nil {
- return conversionFromCapsule(in, out, fn)
- }
- }
- // No conversion operation is available, then.
- return nil
-
- default:
- return nil
-
- }
-}
-
-// retConversion wraps a conversion (internal type) so it can be returned
-// as a Conversion (public type).
-func retConversion(conv conversion) Conversion {
- if conv == nil {
- return nil
- }
-
- return func(in cty.Value) (cty.Value, error) {
- return conv(in, cty.Path(nil))
- }
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_capsule.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_capsule.go
deleted file mode 100644
index 6a6006af..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_capsule.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package convert
-
-import (
- "github.com/hashicorp/go-cty/cty"
-)
-
-func conversionToCapsule(inTy, outTy cty.Type, fn func(inTy cty.Type) func(cty.Value, cty.Path) (interface{}, error)) conversion {
- rawConv := fn(inTy)
- if rawConv == nil {
- return nil
- }
-
- return func(in cty.Value, path cty.Path) (cty.Value, error) {
- rawV, err := rawConv(in, path)
- if err != nil {
- return cty.NilVal, err
- }
- return cty.CapsuleVal(outTy, rawV), nil
- }
-}
-
-func conversionFromCapsule(inTy, outTy cty.Type, fn func(outTy cty.Type) func(interface{}, cty.Path) (cty.Value, error)) conversion {
- rawConv := fn(outTy)
- if rawConv == nil {
- return nil
- }
-
- return func(in cty.Value, path cty.Path) (cty.Value, error) {
- return rawConv(in.EncapsulatedValue(), path)
- }
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_collection.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_collection.go
deleted file mode 100644
index 575973d3..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_collection.go
+++ /dev/null
@@ -1,488 +0,0 @@
-package convert
-
-import (
- "github.com/hashicorp/go-cty/cty"
-)
-
-// conversionCollectionToList returns a conversion that will apply the given
-// conversion to all of the elements of a collection (something that supports
-// ForEachElement and LengthInt) and then returns the result as a list.
-//
-// "conv" can be nil if the elements are expected to already be of the
-// correct type and just need to be re-wrapped into a list. (For example,
-// if we're converting from a set into a list of the same element type.)
-func conversionCollectionToList(ety cty.Type, conv conversion) conversion {
- return func(val cty.Value, path cty.Path) (cty.Value, error) {
- elems := make([]cty.Value, 0, val.LengthInt())
- i := int64(0)
- elemPath := append(path.Copy(), nil)
- it := val.ElementIterator()
- for it.Next() {
- _, val := it.Element()
- var err error
-
- elemPath[len(elemPath)-1] = cty.IndexStep{
- Key: cty.NumberIntVal(i),
- }
-
- if conv != nil {
- val, err = conv(val, elemPath)
- if err != nil {
- return cty.NilVal, err
- }
- }
- elems = append(elems, val)
-
- i++
- }
-
- if len(elems) == 0 {
- if ety == cty.DynamicPseudoType {
- ety = val.Type().ElementType()
- }
- return cty.ListValEmpty(ety), nil
- }
-
- return cty.ListVal(elems), nil
- }
-}
-
-// conversionCollectionToSet returns a conversion that will apply the given
-// conversion to all of the elements of a collection (something that supports
-// ForEachElement and LengthInt) and then returns the result as a set.
-//
-// "conv" can be nil if the elements are expected to already be of the
-// correct type and just need to be re-wrapped into a set. (For example,
-// if we're converting from a list into a set of the same element type.)
-func conversionCollectionToSet(ety cty.Type, conv conversion) conversion {
- return func(val cty.Value, path cty.Path) (cty.Value, error) {
- elems := make([]cty.Value, 0, val.LengthInt())
- i := int64(0)
- elemPath := append(path.Copy(), nil)
- it := val.ElementIterator()
- for it.Next() {
- _, val := it.Element()
- var err error
-
- elemPath[len(elemPath)-1] = cty.IndexStep{
- Key: cty.NumberIntVal(i),
- }
-
- if conv != nil {
- val, err = conv(val, elemPath)
- if err != nil {
- return cty.NilVal, err
- }
- }
- elems = append(elems, val)
-
- i++
- }
-
- if len(elems) == 0 {
- // Prefer a concrete type over a dynamic type when returning an
- // empty set
- if ety == cty.DynamicPseudoType {
- ety = val.Type().ElementType()
- }
- return cty.SetValEmpty(ety), nil
- }
-
- return cty.SetVal(elems), nil
- }
-}
-
-// conversionCollectionToMap returns a conversion that will apply the given
-// conversion to all of the elements of a collection (something that supports
-// ForEachElement and LengthInt) and then returns the result as a map.
-//
-// "conv" can be nil if the elements are expected to already be of the
-// correct type and just need to be re-wrapped into a map.
-func conversionCollectionToMap(ety cty.Type, conv conversion) conversion {
- return func(val cty.Value, path cty.Path) (cty.Value, error) {
- elems := make(map[string]cty.Value, 0)
- elemPath := append(path.Copy(), nil)
- it := val.ElementIterator()
- for it.Next() {
- key, val := it.Element()
- var err error
-
- elemPath[len(elemPath)-1] = cty.IndexStep{
- Key: key,
- }
-
- keyStr, err := Convert(key, cty.String)
- if err != nil {
- // Should never happen, because keys can only be numbers or
- // strings and both can convert to string.
- return cty.DynamicVal, elemPath.NewErrorf("cannot convert key type %s to string for map", key.Type().FriendlyName())
- }
-
- if conv != nil {
- val, err = conv(val, elemPath)
- if err != nil {
- return cty.NilVal, err
- }
- }
-
- elems[keyStr.AsString()] = val
- }
-
- if len(elems) == 0 {
- // Prefer a concrete type over a dynamic type when returning an
- // empty map
- if ety == cty.DynamicPseudoType {
- ety = val.Type().ElementType()
- }
- return cty.MapValEmpty(ety), nil
- }
-
- if ety.IsCollectionType() || ety.IsObjectType() {
- var err error
- if elems, err = conversionUnifyCollectionElements(elems, path, false); err != nil {
- return cty.NilVal, err
- }
- }
-
- if err := conversionCheckMapElementTypes(elems, path); err != nil {
- return cty.NilVal, err
- }
-
- return cty.MapVal(elems), nil
- }
-}
-
-// conversionTupleToSet returns a conversion that will take a value of the
-// given tuple type and return a set of the given element type.
-//
-// Will panic if the given tupleType isn't actually a tuple type.
-func conversionTupleToSet(tupleType cty.Type, listEty cty.Type, unsafe bool) conversion {
- tupleEtys := tupleType.TupleElementTypes()
-
- if len(tupleEtys) == 0 {
- // Empty tuple short-circuit
- return func(val cty.Value, path cty.Path) (cty.Value, error) {
- return cty.SetValEmpty(listEty), nil
- }
- }
-
- if listEty == cty.DynamicPseudoType {
- // This is a special case where the caller wants us to find
- // a suitable single type that all elements can convert to, if
- // possible.
- listEty, _ = unify(tupleEtys, unsafe)
- if listEty == cty.NilType {
- return nil
- }
- }
-
- elemConvs := make([]conversion, len(tupleEtys))
- for i, tupleEty := range tupleEtys {
- if tupleEty.Equals(listEty) {
- // no conversion required
- continue
- }
-
- elemConvs[i] = getConversion(tupleEty, listEty, unsafe)
- if elemConvs[i] == nil {
- // If any of our element conversions are impossible, then the our
- // whole conversion is impossible.
- return nil
- }
- }
-
- // If we fall out here then a conversion is possible, using the
- // element conversions in elemConvs
- return func(val cty.Value, path cty.Path) (cty.Value, error) {
- elems := make([]cty.Value, 0, len(elemConvs))
- elemPath := append(path.Copy(), nil)
- i := int64(0)
- it := val.ElementIterator()
- for it.Next() {
- _, val := it.Element()
- var err error
-
- elemPath[len(elemPath)-1] = cty.IndexStep{
- Key: cty.NumberIntVal(i),
- }
-
- conv := elemConvs[i]
- if conv != nil {
- val, err = conv(val, elemPath)
- if err != nil {
- return cty.NilVal, err
- }
- }
- elems = append(elems, val)
-
- i++
- }
-
- return cty.SetVal(elems), nil
- }
-}
-
-// conversionTupleToList returns a conversion that will take a value of the
-// given tuple type and return a list of the given element type.
-//
-// Will panic if the given tupleType isn't actually a tuple type.
-func conversionTupleToList(tupleType cty.Type, listEty cty.Type, unsafe bool) conversion {
- tupleEtys := tupleType.TupleElementTypes()
-
- if len(tupleEtys) == 0 {
- // Empty tuple short-circuit
- return func(val cty.Value, path cty.Path) (cty.Value, error) {
- return cty.ListValEmpty(listEty), nil
- }
- }
-
- if listEty == cty.DynamicPseudoType {
- // This is a special case where the caller wants us to find
- // a suitable single type that all elements can convert to, if
- // possible.
- listEty, _ = unify(tupleEtys, unsafe)
- if listEty == cty.NilType {
- return nil
- }
- }
-
- elemConvs := make([]conversion, len(tupleEtys))
- for i, tupleEty := range tupleEtys {
- if tupleEty.Equals(listEty) {
- // no conversion required
- continue
- }
-
- elemConvs[i] = getConversion(tupleEty, listEty, unsafe)
- if elemConvs[i] == nil {
- // If any of our element conversions are impossible, then the our
- // whole conversion is impossible.
- return nil
- }
- }
-
- // If we fall out here then a conversion is possible, using the
- // element conversions in elemConvs
- return func(val cty.Value, path cty.Path) (cty.Value, error) {
- elems := make([]cty.Value, 0, len(elemConvs))
- elemPath := append(path.Copy(), nil)
- i := int64(0)
- it := val.ElementIterator()
- for it.Next() {
- _, val := it.Element()
- var err error
-
- elemPath[len(elemPath)-1] = cty.IndexStep{
- Key: cty.NumberIntVal(i),
- }
-
- conv := elemConvs[i]
- if conv != nil {
- val, err = conv(val, elemPath)
- if err != nil {
- return cty.NilVal, err
- }
- }
- elems = append(elems, val)
-
- i++
- }
-
- return cty.ListVal(elems), nil
- }
-}
-
-// conversionObjectToMap returns a conversion that will take a value of the
-// given object type and return a map of the given element type.
-//
-// Will panic if the given objectType isn't actually an object type.
-func conversionObjectToMap(objectType cty.Type, mapEty cty.Type, unsafe bool) conversion {
- objectAtys := objectType.AttributeTypes()
-
- if len(objectAtys) == 0 {
- // Empty object short-circuit
- return func(val cty.Value, path cty.Path) (cty.Value, error) {
- return cty.MapValEmpty(mapEty), nil
- }
- }
-
- if mapEty == cty.DynamicPseudoType {
- // This is a special case where the caller wants us to find
- // a suitable single type that all elements can convert to, if
- // possible.
- objectAtysList := make([]cty.Type, 0, len(objectAtys))
- for _, aty := range objectAtys {
- objectAtysList = append(objectAtysList, aty)
- }
- mapEty, _ = unify(objectAtysList, unsafe)
- if mapEty == cty.NilType {
- return nil
- }
- }
-
- elemConvs := make(map[string]conversion, len(objectAtys))
- for name, objectAty := range objectAtys {
- if objectAty.Equals(mapEty) {
- // no conversion required
- continue
- }
-
- elemConvs[name] = getConversion(objectAty, mapEty, unsafe)
- if elemConvs[name] == nil {
- // If any of our element conversions are impossible, then the our
- // whole conversion is impossible.
- return nil
- }
- }
-
- // If we fall out here then a conversion is possible, using the
- // element conversions in elemConvs
- return func(val cty.Value, path cty.Path) (cty.Value, error) {
- elems := make(map[string]cty.Value, len(elemConvs))
- elemPath := append(path.Copy(), nil)
- it := val.ElementIterator()
- for it.Next() {
- name, val := it.Element()
- var err error
-
- elemPath[len(elemPath)-1] = cty.IndexStep{
- Key: name,
- }
-
- conv := elemConvs[name.AsString()]
- if conv != nil {
- val, err = conv(val, elemPath)
- if err != nil {
- return cty.NilVal, err
- }
- }
- elems[name.AsString()] = val
- }
-
- if mapEty.IsCollectionType() || mapEty.IsObjectType() {
- var err error
- if elems, err = conversionUnifyCollectionElements(elems, path, unsafe); err != nil {
- return cty.NilVal, err
- }
- }
-
- if err := conversionCheckMapElementTypes(elems, path); err != nil {
- return cty.NilVal, err
- }
-
- return cty.MapVal(elems), nil
- }
-}
-
-// conversionMapToObject returns a conversion that will take a value of the
-// given map type and return an object of the given type. The object attribute
-// types must all be compatible with the map element type.
-//
-// Will panic if the given mapType and objType are not maps and objects
-// respectively.
-func conversionMapToObject(mapType cty.Type, objType cty.Type, unsafe bool) conversion {
- objectAtys := objType.AttributeTypes()
- mapEty := mapType.ElementType()
-
- elemConvs := make(map[string]conversion, len(objectAtys))
- for name, objectAty := range objectAtys {
- if objectAty.Equals(mapEty) {
- // no conversion required
- continue
- }
-
- elemConvs[name] = getConversion(mapEty, objectAty, unsafe)
- if elemConvs[name] == nil {
- // If any of our element conversions are impossible, then the our
- // whole conversion is impossible.
- return nil
- }
- }
-
- // If we fall out here then a conversion is possible, using the
- // element conversions in elemConvs
- return func(val cty.Value, path cty.Path) (cty.Value, error) {
- elems := make(map[string]cty.Value, len(elemConvs))
- elemPath := append(path.Copy(), nil)
- it := val.ElementIterator()
- for it.Next() {
- name, val := it.Element()
-
- // if there is no corresponding attribute, we skip this key
- if _, ok := objectAtys[name.AsString()]; !ok {
- continue
- }
-
- var err error
-
- elemPath[len(elemPath)-1] = cty.IndexStep{
- Key: name,
- }
-
- conv := elemConvs[name.AsString()]
- if conv != nil {
- val, err = conv(val, elemPath)
- if err != nil {
- return cty.NilVal, err
- }
- }
-
- elems[name.AsString()] = val
- }
-
- return cty.ObjectVal(elems), nil
- }
-}
-
-func conversionUnifyCollectionElements(elems map[string]cty.Value, path cty.Path, unsafe bool) (map[string]cty.Value, error) {
- elemTypes := make([]cty.Type, 0, len(elems))
- for _, elem := range elems {
- elemTypes = append(elemTypes, elem.Type())
- }
- unifiedType, _ := unify(elemTypes, unsafe)
- if unifiedType == cty.NilType {
- }
-
- unifiedElems := make(map[string]cty.Value)
- elemPath := append(path.Copy(), nil)
-
- for name, elem := range elems {
- if elem.Type().Equals(unifiedType) {
- unifiedElems[name] = elem
- continue
- }
- conv := getConversion(elem.Type(), unifiedType, unsafe)
- if conv == nil {
- }
- elemPath[len(elemPath)-1] = cty.IndexStep{
- Key: cty.StringVal(name),
- }
- val, err := conv(elem, elemPath)
- if err != nil {
- return nil, err
- }
- unifiedElems[name] = val
- }
-
- return unifiedElems, nil
-}
-
-func conversionCheckMapElementTypes(elems map[string]cty.Value, path cty.Path) error {
- elementType := cty.NilType
- elemPath := append(path.Copy(), nil)
-
- for name, elem := range elems {
- if elementType == cty.NilType {
- elementType = elem.Type()
- continue
- }
- if !elementType.Equals(elem.Type()) {
- elemPath[len(elemPath)-1] = cty.IndexStep{
- Key: cty.StringVal(name),
- }
- return elemPath.NewErrorf("%s is required", elementType.FriendlyName())
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_dynamic.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_dynamic.go
deleted file mode 100644
index 5f571da1..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_dynamic.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package convert
-
-import (
- "github.com/hashicorp/go-cty/cty"
-)
-
-// dynamicFixup deals with just-in-time conversions of values that were
-// input-typed as cty.DynamicPseudoType during analysis, ensuring that
-// we end up with the desired output type once the value is known, or
-// failing with an error if that is not possible.
-//
-// This is in the spirit of the cty philosophy of optimistically assuming that
-// DynamicPseudoType values will become the intended value eventually, and
-// dealing with any inconsistencies during final evaluation.
-func dynamicFixup(wantType cty.Type) conversion {
- return func(in cty.Value, path cty.Path) (cty.Value, error) {
- ret, err := Convert(in, wantType)
- if err != nil {
- // Re-wrap this error so that the returned path is relative
- // to the caller's original value, rather than relative to our
- // conversion value here.
- return cty.NilVal, path.NewError(err)
- }
- return ret, nil
- }
-}
-
-// dynamicPassthrough is an identity conversion that is used when the
-// target type is DynamicPseudoType, indicating that the caller doesn't care
-// which type is returned.
-func dynamicPassthrough(in cty.Value, path cty.Path) (cty.Value, error) {
- return in, nil
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_object.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_object.go
deleted file mode 100644
index 93743ca8..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_object.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package convert
-
-import (
- "github.com/hashicorp/go-cty/cty"
-)
-
-// conversionObjectToObject returns a conversion that will make the input
-// object type conform to the output object type, if possible.
-//
-// Conversion is possible only if the output type is a subset of the input
-// type, meaning that each attribute of the output type has a corresponding
-// attribute in the input type where a recursive conversion is available.
-//
-// Shallow object conversions work the same for both safe and unsafe modes,
-// but the safety flag is passed on to recursive conversions and may thus
-// limit the above definition of "subset".
-func conversionObjectToObject(in, out cty.Type, unsafe bool) conversion {
- inAtys := in.AttributeTypes()
- outAtys := out.AttributeTypes()
- attrConvs := make(map[string]conversion)
-
- for name, outAty := range outAtys {
- inAty, exists := inAtys[name]
- if !exists {
- // No conversion is available, then.
- return nil
- }
-
- if inAty.Equals(outAty) {
- // No conversion needed, but we'll still record the attribute
- // in our map for later reference.
- attrConvs[name] = nil
- continue
- }
-
- attrConvs[name] = getConversion(inAty, outAty, unsafe)
- if attrConvs[name] == nil {
- // If a recursive conversion isn't available, then our top-level
- // configuration is impossible too.
- return nil
- }
- }
-
- // If we get here then a conversion is possible, using the attribute
- // conversions given in attrConvs.
- return func(val cty.Value, path cty.Path) (cty.Value, error) {
- attrVals := make(map[string]cty.Value, len(attrConvs))
- path = append(path, nil)
- pathStep := &path[len(path)-1]
-
- for it := val.ElementIterator(); it.Next(); {
- nameVal, val := it.Element()
- var err error
-
- name := nameVal.AsString()
- *pathStep = cty.GetAttrStep{
- Name: name,
- }
-
- conv, exists := attrConvs[name]
- if !exists {
- continue
- }
- if conv != nil {
- val, err = conv(val, path)
- if err != nil {
- return cty.NilVal, err
- }
- }
-
- attrVals[name] = val
- }
-
- return cty.ObjectVal(attrVals), nil
- }
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_primitive.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_primitive.go
deleted file mode 100644
index a5534441..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_primitive.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package convert
-
-import (
- "strings"
-
- "github.com/hashicorp/go-cty/cty"
-)
-
-var stringTrue = cty.StringVal("true")
-var stringFalse = cty.StringVal("false")
-
-var primitiveConversionsSafe = map[cty.Type]map[cty.Type]conversion{
- cty.Number: {
- cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) {
- f := val.AsBigFloat()
- return cty.StringVal(f.Text('f', -1)), nil
- },
- },
- cty.Bool: {
- cty.String: func(val cty.Value, path cty.Path) (cty.Value, error) {
- if val.True() {
- return stringTrue, nil
- } else {
- return stringFalse, nil
- }
- },
- },
-}
-
-var primitiveConversionsUnsafe = map[cty.Type]map[cty.Type]conversion{
- cty.String: {
- cty.Number: func(val cty.Value, path cty.Path) (cty.Value, error) {
- v, err := cty.ParseNumberVal(val.AsString())
- if err != nil {
- return cty.NilVal, path.NewErrorf("a number is required")
- }
- return v, nil
- },
- cty.Bool: func(val cty.Value, path cty.Path) (cty.Value, error) {
- switch val.AsString() {
- case "true", "1":
- return cty.True, nil
- case "false", "0":
- return cty.False, nil
- default:
- switch strings.ToLower(val.AsString()) {
- case "true":
- return cty.NilVal, path.NewErrorf("a bool is required; to convert from string, use lowercase \"true\"")
- case "false":
- return cty.NilVal, path.NewErrorf("a bool is required; to convert from string, use lowercase \"false\"")
- default:
- return cty.NilVal, path.NewErrorf("a bool is required")
- }
- }
- },
- },
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_tuple.go b/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_tuple.go
deleted file mode 100644
index d89ec380..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/convert/conversion_tuple.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package convert
-
-import (
- "github.com/hashicorp/go-cty/cty"
-)
-
-// conversionTupleToTuple returns a conversion that will make the input
-// tuple type conform to the output tuple type, if possible.
-//
-// Conversion is possible only if the two tuple types have the same number
-// of elements and the corresponding elements by index can be converted.
-//
-// Shallow tuple conversions work the same for both safe and unsafe modes,
-// but the safety flag is passed on to recursive conversions and may thus
-// limit which element type conversions are possible.
-func conversionTupleToTuple(in, out cty.Type, unsafe bool) conversion {
- inEtys := in.TupleElementTypes()
- outEtys := out.TupleElementTypes()
-
- if len(inEtys) != len(outEtys) {
- return nil // no conversion is possible
- }
-
- elemConvs := make([]conversion, len(inEtys))
-
- for i, outEty := range outEtys {
- inEty := inEtys[i]
-
- if inEty.Equals(outEty) {
- // No conversion needed, so we can leave this one nil.
- continue
- }
-
- elemConvs[i] = getConversion(inEty, outEty, unsafe)
- if elemConvs[i] == nil {
- // If a recursive conversion isn't available, then our top-level
- // configuration is impossible too.
- return nil
- }
- }
-
- // If we get here then a conversion is possible, using the element
- // conversions given in elemConvs.
- return func(val cty.Value, path cty.Path) (cty.Value, error) {
- elemVals := make([]cty.Value, len(elemConvs))
- path = append(path, nil)
- pathStep := &path[len(path)-1]
-
- i := 0
- for it := val.ElementIterator(); it.Next(); i++ {
- _, val := it.Element()
- var err error
-
- *pathStep = cty.IndexStep{
- Key: cty.NumberIntVal(int64(i)),
- }
-
- conv := elemConvs[i]
- if conv != nil {
- val, err = conv(val, path)
- if err != nil {
- return cty.NilVal, err
- }
- }
-
- elemVals[i] = val
- }
-
- return cty.TupleVal(elemVals), nil
- }
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/doc.go b/vendor/github.com/hashicorp/go-cty/cty/convert/doc.go
deleted file mode 100644
index 2037299b..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/convert/doc.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Package convert contains some routines for converting between cty types.
-// The intent of providing this package is to encourage applications using
-// cty to have consistent type conversion behavior for maximal interoperability
-// when Values pass from one application to another.
-//
-// The conversions are categorized into two categories. "Safe" conversions are
-// ones that are guaranteed to succeed if given a non-null value of the
-// appropriate source type. "Unsafe" conversions, on the other hand, are valid
-// for only a subset of input values, and thus may fail with an error when
-// called for values outside of that valid subset.
-//
-// The functions whose names end in Unsafe support all of the conversions that
-// are supported by the corresponding functions whose names do not have that
-// suffix, and then additional unsafe conversions as well.
-package convert
diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/mismatch_msg.go b/vendor/github.com/hashicorp/go-cty/cty/convert/mismatch_msg.go
deleted file mode 100644
index 72f307f2..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/convert/mismatch_msg.go
+++ /dev/null
@@ -1,220 +0,0 @@
-package convert
-
-import (
- "bytes"
- "fmt"
- "sort"
-
- "github.com/hashicorp/go-cty/cty"
-)
-
-// MismatchMessage is a helper to return an English-language description of
-// the differences between got and want, phrased as a reason why got does
-// not conform to want.
-//
-// This function does not itself attempt conversion, and so it should generally
-// be used only after a conversion has failed, to report the conversion failure
-// to an English-speaking user. The result will be confusing got is actually
-// conforming to or convertable to want.
-//
-// The shorthand helper function Convert uses this function internally to
-// produce its error messages, so callers of that function do not need to
-// also use MismatchMessage.
-//
-// This function is similar to Type.TestConformance, but it is tailored to
-// describing conversion failures and so the messages it generates relate
-// specifically to the conversion rules implemented in this package.
-func MismatchMessage(got, want cty.Type) string {
- switch {
-
- case got.IsObjectType() && want.IsObjectType():
- // If both types are object types then we may be able to say something
- // about their respective attributes.
- return mismatchMessageObjects(got, want)
-
- case got.IsTupleType() && want.IsListType() && want.ElementType() == cty.DynamicPseudoType:
- // If conversion from tuple to list failed then it's because we couldn't
- // find a common type to convert all of the tuple elements to.
- return "all list elements must have the same type"
-
- case got.IsTupleType() && want.IsSetType() && want.ElementType() == cty.DynamicPseudoType:
- // If conversion from tuple to set failed then it's because we couldn't
- // find a common type to convert all of the tuple elements to.
- return "all set elements must have the same type"
-
- case got.IsObjectType() && want.IsMapType() && want.ElementType() == cty.DynamicPseudoType:
- // If conversion from object to map failed then it's because we couldn't
- // find a common type to convert all of the object attributes to.
- return "all map elements must have the same type"
-
- case (got.IsTupleType() || got.IsObjectType()) && want.IsCollectionType():
- return mismatchMessageCollectionsFromStructural(got, want)
-
- case got.IsCollectionType() && want.IsCollectionType():
- return mismatchMessageCollectionsFromCollections(got, want)
-
- default:
- // If we have nothing better to say, we'll just state what was required.
- return want.FriendlyNameForConstraint() + " required"
- }
-}
-
-func mismatchMessageObjects(got, want cty.Type) string {
- // Per our conversion rules, "got" is allowed to be a superset of "want",
- // and so we'll produce error messages here under that assumption.
- gotAtys := got.AttributeTypes()
- wantAtys := want.AttributeTypes()
-
- // If we find missing attributes then we'll report those in preference,
- // but if not then we will report a maximum of one non-conforming
- // attribute, just to keep our messages relatively terse.
- // We'll also prefer to report a recursive type error from an _unsafe_
- // conversion over a safe one, because these are subjectively more
- // "serious".
- var missingAttrs []string
- var unsafeMismatchAttr string
- var safeMismatchAttr string
-
- for name, wantAty := range wantAtys {
- gotAty, exists := gotAtys[name]
- if !exists {
- missingAttrs = append(missingAttrs, name)
- continue
- }
-
- // We'll now try to convert these attributes in isolation and
- // see if we have a nested conversion error to report.
- // We'll try an unsafe conversion first, and then fall back on
- // safe if unsafe is possible.
-
- // If we already have an unsafe mismatch attr error then we won't bother
- // hunting for another one.
- if unsafeMismatchAttr != "" {
- continue
- }
- if conv := GetConversionUnsafe(gotAty, wantAty); conv == nil {
- unsafeMismatchAttr = fmt.Sprintf("attribute %q: %s", name, MismatchMessage(gotAty, wantAty))
- }
-
- // If we already have a safe mismatch attr error then we won't bother
- // hunting for another one.
- if safeMismatchAttr != "" {
- continue
- }
- if conv := GetConversion(gotAty, wantAty); conv == nil {
- safeMismatchAttr = fmt.Sprintf("attribute %q: %s", name, MismatchMessage(gotAty, wantAty))
- }
- }
-
- // We should now have collected at least one problem. If we have more than
- // one then we'll use our preference order to decide what is most important
- // to report.
- switch {
-
- case len(missingAttrs) != 0:
- sort.Strings(missingAttrs)
- switch len(missingAttrs) {
- case 1:
- return fmt.Sprintf("attribute %q is required", missingAttrs[0])
- case 2:
- return fmt.Sprintf("attributes %q and %q are required", missingAttrs[0], missingAttrs[1])
- default:
- sort.Strings(missingAttrs)
- var buf bytes.Buffer
- for _, name := range missingAttrs[:len(missingAttrs)-1] {
- fmt.Fprintf(&buf, "%q, ", name)
- }
- fmt.Fprintf(&buf, "and %q", missingAttrs[len(missingAttrs)-1])
- return fmt.Sprintf("attributes %s are required", buf.Bytes())
- }
-
- case unsafeMismatchAttr != "":
- return unsafeMismatchAttr
-
- case safeMismatchAttr != "":
- return safeMismatchAttr
-
- default:
- // We should never get here, but if we do then we'll return
- // just a generic message.
- return "incorrect object attributes"
- }
-}
-
-func mismatchMessageCollectionsFromStructural(got, want cty.Type) string {
- // First some straightforward cases where the kind is just altogether wrong.
- switch {
- case want.IsListType() && !got.IsTupleType():
- return want.FriendlyNameForConstraint() + " required"
- case want.IsSetType() && !got.IsTupleType():
- return want.FriendlyNameForConstraint() + " required"
- case want.IsMapType() && !got.IsObjectType():
- return want.FriendlyNameForConstraint() + " required"
- }
-
- // If the kinds are matched well enough then we'll move on to checking
- // individual elements.
- wantEty := want.ElementType()
- switch {
- case got.IsTupleType():
- for i, gotEty := range got.TupleElementTypes() {
- if gotEty.Equals(wantEty) {
- continue // exact match, so no problem
- }
- if conv := getConversion(gotEty, wantEty, true); conv != nil {
- continue // conversion is available, so no problem
- }
- return fmt.Sprintf("element %d: %s", i, MismatchMessage(gotEty, wantEty))
- }
-
- // If we get down here then something weird is going on but we'll
- // return a reasonable fallback message anyway.
- return fmt.Sprintf("all elements must be %s", wantEty.FriendlyNameForConstraint())
-
- case got.IsObjectType():
- for name, gotAty := range got.AttributeTypes() {
- if gotAty.Equals(wantEty) {
- continue // exact match, so no problem
- }
- if conv := getConversion(gotAty, wantEty, true); conv != nil {
- continue // conversion is available, so no problem
- }
- return fmt.Sprintf("element %q: %s", name, MismatchMessage(gotAty, wantEty))
- }
-
- // If we get down here then something weird is going on but we'll
- // return a reasonable fallback message anyway.
- return fmt.Sprintf("all elements must be %s", wantEty.FriendlyNameForConstraint())
-
- default:
- // Should not be possible to get here since we only call this function
- // with got as structural types, but...
- return want.FriendlyNameForConstraint() + " required"
- }
-}
-
-func mismatchMessageCollectionsFromCollections(got, want cty.Type) string {
- // First some straightforward cases where the kind is just altogether wrong.
- switch {
- case want.IsListType() && !(got.IsListType() || got.IsSetType()):
- return want.FriendlyNameForConstraint() + " required"
- case want.IsSetType() && !(got.IsListType() || got.IsSetType()):
- return want.FriendlyNameForConstraint() + " required"
- case want.IsMapType() && !got.IsMapType():
- return want.FriendlyNameForConstraint() + " required"
- }
-
- // If the kinds are matched well enough then we'll check the element types.
- gotEty := got.ElementType()
- wantEty := want.ElementType()
- noun := "element type"
- switch {
- case want.IsListType():
- noun = "list element type"
- case want.IsSetType():
- noun = "set element type"
- case want.IsMapType():
- noun = "map element type"
- }
- return fmt.Sprintf("incorrect %s: %s", noun, MismatchMessage(gotEty, wantEty))
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/public.go b/vendor/github.com/hashicorp/go-cty/cty/convert/public.go
deleted file mode 100644
index 3b50a692..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/convert/public.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package convert
-
-import (
- "errors"
-
- "github.com/hashicorp/go-cty/cty"
-)
-
-// This file contains the public interface of this package, which is intended
-// to be a small, convenient interface designed for easy integration into
-// a hypothetical language type checker and interpreter.
-
-// Conversion is a named function type representing a conversion from a
-// value of one type to a value of another type.
-//
-// The source type for a conversion is always the source type given to
-// the function that returned the Conversion, but there is no way to recover
-// that from a Conversion value itself. If a Conversion is given a value
-// that is not of its expected type (with the exception of DynamicPseudoType,
-// which is always supported) then the function may panic or produce undefined
-// results.
-type Conversion func(in cty.Value) (out cty.Value, err error)
-
-// GetConversion returns a Conversion between the given in and out Types if
-// a safe one is available, or returns nil otherwise.
-func GetConversion(in cty.Type, out cty.Type) Conversion {
- return retConversion(getConversion(in, out, false))
-}
-
-// GetConversionUnsafe returns a Conversion between the given in and out Types
-// if either a safe or unsafe one is available, or returns nil otherwise.
-func GetConversionUnsafe(in cty.Type, out cty.Type) Conversion {
- return retConversion(getConversion(in, out, true))
-}
-
-// Convert returns the result of converting the given value to the given type
-// if an safe or unsafe conversion is available, or returns an error if such a
-// conversion is impossible.
-//
-// This is a convenience wrapper around calling GetConversionUnsafe and then
-// immediately passing the given value to the resulting function.
-func Convert(in cty.Value, want cty.Type) (cty.Value, error) {
- if in.Type().Equals(want) {
- return in, nil
- }
-
- conv := GetConversionUnsafe(in.Type(), want)
- if conv == nil {
- return cty.NilVal, errors.New(MismatchMessage(in.Type(), want))
- }
- return conv(in)
-}
-
-// Unify attempts to find the most general type that can be converted from
-// all of the given types. If this is possible, that type is returned along
-// with a slice of necessary conversions for some of the given types.
-//
-// If no common supertype can be found, this function returns cty.NilType and
-// a nil slice.
-//
-// If a common supertype *can* be found, the returned slice will always be
-// non-nil and will contain a non-nil conversion for each given type that
-// needs to be converted, with indices corresponding to the input slice.
-// Any given type that does *not* need conversion (because it is already of
-// the appropriate type) will have a nil Conversion.
-//
-// cty.DynamicPseudoType is, as usual, a special case. If the given type list
-// contains a mixture of dynamic and non-dynamic types, the dynamic types are
-// disregarded for type selection and a conversion is returned for them that
-// will attempt a late conversion of the given value to the target type,
-// failing with a conversion error if the eventual concrete type is not
-// compatible. If *all* given types are DynamicPseudoType, or in the
-// degenerate case of an empty slice of types, the returned type is itself
-// cty.DynamicPseudoType and no conversions are attempted.
-func Unify(types []cty.Type) (cty.Type, []Conversion) {
- return unify(types, false)
-}
-
-// UnifyUnsafe is the same as Unify except that it may return unsafe
-// conversions in situations where a safe conversion isn't also available.
-func UnifyUnsafe(types []cty.Type) (cty.Type, []Conversion) {
- return unify(types, true)
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/sort_types.go b/vendor/github.com/hashicorp/go-cty/cty/convert/sort_types.go
deleted file mode 100644
index 8a9c3276..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/convert/sort_types.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package convert
-
-import (
- "github.com/hashicorp/go-cty/cty"
-)
-
-// sortTypes produces an ordering of the given types that serves as a
-// preference order for the result of unification of the given types.
-// The return value is a slice of indices into the given slice, and will
-// thus always be the same length as the given slice.
-//
-// The goal is that the most general of the given types will appear first
-// in the ordering. If there are uncomparable pairs of types in the list
-// then they will appear in an undefined order, and the unification pass
-// will presumably then fail.
-func sortTypes(tys []cty.Type) []int {
- l := len(tys)
-
- // First we build a graph whose edges represent "more general than",
- // which we will then do a topological sort of.
- edges := make([][]int, l)
- for i := 0; i < (l - 1); i++ {
- for j := i + 1; j < l; j++ {
- cmp := compareTypes(tys[i], tys[j])
- switch {
- case cmp < 0:
- edges[i] = append(edges[i], j)
- case cmp > 0:
- edges[j] = append(edges[j], i)
- }
- }
- }
-
- // Compute the in-degree of each node
- inDegree := make([]int, l)
- for _, outs := range edges {
- for _, j := range outs {
- inDegree[j]++
- }
- }
-
- // The array backing our result will double as our queue for visiting
- // the nodes, with the queue slice moving along this array until it
- // is empty and positioned at the end of the array. Thus our visiting
- // order is also our result order.
- result := make([]int, l)
- queue := result[0:0]
-
- // Initialize the queue with any item of in-degree 0, preserving
- // their relative order.
- for i, n := range inDegree {
- if n == 0 {
- queue = append(queue, i)
- }
- }
-
- for len(queue) != 0 {
- i := queue[0]
- queue = queue[1:]
- for _, j := range edges[i] {
- inDegree[j]--
- if inDegree[j] == 0 {
- queue = append(queue, j)
- }
- }
- }
-
- return result
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/convert/unify.go b/vendor/github.com/hashicorp/go-cty/cty/convert/unify.go
deleted file mode 100644
index b2a3bbe5..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/convert/unify.go
+++ /dev/null
@@ -1,357 +0,0 @@
-package convert
-
-import (
- "github.com/hashicorp/go-cty/cty"
-)
-
-// The current unify implementation is somewhat inefficient, but we accept this
-// under the assumption that it will generally be used with small numbers of
-// types and with types of reasonable complexity. However, it does have a
-// "happy path" where all of the given types are equal.
-//
-// This function is likely to have poor performance in cases where any given
-// types are very complex (lots of deeply-nested structures) or if the list
-// of types itself is very large. In particular, it will walk the nested type
-// structure under the given types several times, especially when given a
-// list of types for which unification is not possible, since each permutation
-// will be tried to determine that result.
-func unify(types []cty.Type, unsafe bool) (cty.Type, []Conversion) {
- if len(types) == 0 {
- // Degenerate case
- return cty.NilType, nil
- }
-
- // If all of the given types are of the same structural kind, we may be
- // able to construct a new type that they can all be unified to, even if
- // that is not one of the given types. We must try this before the general
- // behavior below because in unsafe mode we can convert an object type to
- // a subset of that type, which would be a much less useful conversion for
- // unification purposes.
- {
- mapCt := 0
- objectCt := 0
- tupleCt := 0
- dynamicCt := 0
- for _, ty := range types {
- switch {
- case ty.IsMapType():
- mapCt++
- case ty.IsObjectType():
- objectCt++
- case ty.IsTupleType():
- tupleCt++
- case ty == cty.DynamicPseudoType:
- dynamicCt++
- default:
- break
- }
- }
- switch {
- case mapCt > 0 && (mapCt+dynamicCt) == len(types):
- return unifyMapTypes(types, unsafe, dynamicCt > 0)
- case objectCt > 0 && (objectCt+dynamicCt) == len(types):
- return unifyObjectTypes(types, unsafe, dynamicCt > 0)
- case tupleCt > 0 && (tupleCt+dynamicCt) == len(types):
- return unifyTupleTypes(types, unsafe, dynamicCt > 0)
- case objectCt > 0 && tupleCt > 0:
- // Can never unify object and tuple types since they have incompatible kinds
- return cty.NilType, nil
- }
- }
-
- prefOrder := sortTypes(types)
-
- // sortTypes gives us an order where earlier items are preferable as
- // our result type. We'll now walk through these and choose the first
- // one we encounter for which conversions exist for all source types.
- conversions := make([]Conversion, len(types))
-Preferences:
- for _, wantTypeIdx := range prefOrder {
- wantType := types[wantTypeIdx]
- for i, tryType := range types {
- if i == wantTypeIdx {
- // Don't need to convert our wanted type to itself
- conversions[i] = nil
- continue
- }
-
- if tryType.Equals(wantType) {
- conversions[i] = nil
- continue
- }
-
- if unsafe {
- conversions[i] = GetConversionUnsafe(tryType, wantType)
- } else {
- conversions[i] = GetConversion(tryType, wantType)
- }
-
- if conversions[i] == nil {
- // wantType is not a suitable unification type, so we'll
- // try the next one in our preference order.
- continue Preferences
- }
- }
-
- return wantType, conversions
- }
-
- // If we fall out here, no unification is possible
- return cty.NilType, nil
-}
-
-func unifyMapTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, []Conversion) {
- // If we had any dynamic types in the input here then we can't predict
- // what path we'll take through here once these become known types, so
- // we'll conservatively produce DynamicVal for these.
- if hasDynamic {
- return unifyAllAsDynamic(types)
- }
-
- elemTypes := make([]cty.Type, 0, len(types))
- for _, ty := range types {
- elemTypes = append(elemTypes, ty.ElementType())
- }
- retElemType, _ := unify(elemTypes, unsafe)
- if retElemType == cty.NilType {
- return cty.NilType, nil
- }
-
- retTy := cty.Map(retElemType)
-
- conversions := make([]Conversion, len(types))
- for i, ty := range types {
- if ty.Equals(retTy) {
- continue
- }
- if unsafe {
- conversions[i] = GetConversionUnsafe(ty, retTy)
- } else {
- conversions[i] = GetConversion(ty, retTy)
- }
- if conversions[i] == nil {
- // Shouldn't be reachable, since we were able to unify
- return cty.NilType, nil
- }
- }
-
- return retTy, conversions
-}
-
-func unifyObjectTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, []Conversion) {
- // If we had any dynamic types in the input here then we can't predict
- // what path we'll take through here once these become known types, so
- // we'll conservatively produce DynamicVal for these.
- if hasDynamic {
- return unifyAllAsDynamic(types)
- }
-
- // There are two different ways we can succeed here:
- // - If all of the given object types have the same set of attribute names
- // and the corresponding types are all unifyable, then we construct that
- // type.
- // - If the given object types have different attribute names or their
- // corresponding types are not unifyable, we'll instead try to unify
- // all of the attribute types together to produce a map type.
- //
- // Our unification behavior is intentionally stricter than our conversion
- // behavior for subset object types because user intent is different with
- // unification use-cases: it makes sense to allow {"foo":true} to convert
- // to emptyobjectval, but unifying an object with an attribute with the
- // empty object type should be an error because unifying to the empty
- // object type would be suprising and useless.
-
- firstAttrs := types[0].AttributeTypes()
- for _, ty := range types[1:] {
- thisAttrs := ty.AttributeTypes()
- if len(thisAttrs) != len(firstAttrs) {
- // If number of attributes is different then there can be no
- // object type in common.
- return unifyObjectTypesToMap(types, unsafe)
- }
- for name := range thisAttrs {
- if _, ok := firstAttrs[name]; !ok {
- // If attribute names don't exactly match then there can be
- // no object type in common.
- return unifyObjectTypesToMap(types, unsafe)
- }
- }
- }
-
- // If we get here then we've proven that all of the given object types
- // have exactly the same set of attribute names, though the types may
- // differ.
- retAtys := make(map[string]cty.Type)
- atysAcross := make([]cty.Type, len(types))
- for name := range firstAttrs {
- for i, ty := range types {
- atysAcross[i] = ty.AttributeType(name)
- }
- retAtys[name], _ = unify(atysAcross, unsafe)
- if retAtys[name] == cty.NilType {
- // Cannot unify this attribute alone, which means that unification
- // of everything down to a map type can't be possible either.
- return cty.NilType, nil
- }
- }
- retTy := cty.Object(retAtys)
-
- conversions := make([]Conversion, len(types))
- for i, ty := range types {
- if ty.Equals(retTy) {
- continue
- }
- if unsafe {
- conversions[i] = GetConversionUnsafe(ty, retTy)
- } else {
- conversions[i] = GetConversion(ty, retTy)
- }
- if conversions[i] == nil {
- // Shouldn't be reachable, since we were able to unify
- return unifyObjectTypesToMap(types, unsafe)
- }
- }
-
- return retTy, conversions
-}
-
-func unifyObjectTypesToMap(types []cty.Type, unsafe bool) (cty.Type, []Conversion) {
- // This is our fallback case for unifyObjectTypes, where we see if we can
- // construct a map type that can accept all of the attribute types.
-
- var atys []cty.Type
- for _, ty := range types {
- for _, aty := range ty.AttributeTypes() {
- atys = append(atys, aty)
- }
- }
-
- ety, _ := unify(atys, unsafe)
- if ety == cty.NilType {
- return cty.NilType, nil
- }
-
- retTy := cty.Map(ety)
- conversions := make([]Conversion, len(types))
- for i, ty := range types {
- if ty.Equals(retTy) {
- continue
- }
- if unsafe {
- conversions[i] = GetConversionUnsafe(ty, retTy)
- } else {
- conversions[i] = GetConversion(ty, retTy)
- }
- if conversions[i] == nil {
- return cty.NilType, nil
- }
- }
- return retTy, conversions
-}
-
-func unifyTupleTypes(types []cty.Type, unsafe bool, hasDynamic bool) (cty.Type, []Conversion) {
- // If we had any dynamic types in the input here then we can't predict
- // what path we'll take through here once these become known types, so
- // we'll conservatively produce DynamicVal for these.
- if hasDynamic {
- return unifyAllAsDynamic(types)
- }
-
- // There are two different ways we can succeed here:
- // - If all of the given tuple types have the same sequence of element types
- // and the corresponding types are all unifyable, then we construct that
- // type.
- // - If the given tuple types have different element types or their
- // corresponding types are not unifyable, we'll instead try to unify
- // all of the elements types together to produce a list type.
-
- firstEtys := types[0].TupleElementTypes()
- for _, ty := range types[1:] {
- thisEtys := ty.TupleElementTypes()
- if len(thisEtys) != len(firstEtys) {
- // If number of elements is different then there can be no
- // tuple type in common.
- return unifyTupleTypesToList(types, unsafe)
- }
- }
-
- // If we get here then we've proven that all of the given tuple types
- // have the same number of elements, though the types may differ.
- retEtys := make([]cty.Type, len(firstEtys))
- atysAcross := make([]cty.Type, len(types))
- for idx := range firstEtys {
- for tyI, ty := range types {
- atysAcross[tyI] = ty.TupleElementTypes()[idx]
- }
- retEtys[idx], _ = unify(atysAcross, unsafe)
- if retEtys[idx] == cty.NilType {
- // Cannot unify this element alone, which means that unification
- // of everything down to a map type can't be possible either.
- return cty.NilType, nil
- }
- }
- retTy := cty.Tuple(retEtys)
-
- conversions := make([]Conversion, len(types))
- for i, ty := range types {
- if ty.Equals(retTy) {
- continue
- }
- if unsafe {
- conversions[i] = GetConversionUnsafe(ty, retTy)
- } else {
- conversions[i] = GetConversion(ty, retTy)
- }
- if conversions[i] == nil {
- // Shouldn't be reachable, since we were able to unify
- return unifyTupleTypesToList(types, unsafe)
- }
- }
-
- return retTy, conversions
-}
-
-func unifyTupleTypesToList(types []cty.Type, unsafe bool) (cty.Type, []Conversion) {
- // This is our fallback case for unifyTupleTypes, where we see if we can
- // construct a list type that can accept all of the element types.
-
- var etys []cty.Type
- for _, ty := range types {
- for _, ety := range ty.TupleElementTypes() {
- etys = append(etys, ety)
- }
- }
-
- ety, _ := unify(etys, unsafe)
- if ety == cty.NilType {
- return cty.NilType, nil
- }
-
- retTy := cty.List(ety)
- conversions := make([]Conversion, len(types))
- for i, ty := range types {
- if ty.Equals(retTy) {
- continue
- }
- if unsafe {
- conversions[i] = GetConversionUnsafe(ty, retTy)
- } else {
- conversions[i] = GetConversion(ty, retTy)
- }
- if conversions[i] == nil {
- // Shouldn't be reachable, since we were able to unify
- return unifyObjectTypesToMap(types, unsafe)
- }
- }
- return retTy, conversions
-}
-
-func unifyAllAsDynamic(types []cty.Type) (cty.Type, []Conversion) {
- conversions := make([]Conversion, len(types))
- for i := range conversions {
- conversions[i] = func(cty.Value) (cty.Value, error) {
- return cty.DynamicVal, nil
- }
- }
- return cty.DynamicPseudoType, conversions
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/doc.go b/vendor/github.com/hashicorp/go-cty/cty/doc.go
deleted file mode 100644
index d31f0547..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/doc.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Package cty (pronounced see-tie) provides some infrastructure for a type
-// system that might be useful for applications that need to represent
-// configuration values provided by the user whose types are not known
-// at compile time, particularly if the calling application also allows
-// such values to be used in expressions.
-//
-// The type system consists of primitive types Number, String and Bool, as
-// well as List and Map collection types and Object types that can have
-// arbitrarily-typed sets of attributes.
-//
-// A set of operations is defined on these types, which is accessible via
-// the wrapper struct Value, which annotates the raw, internal representation
-// of a value with its corresponding type.
-//
-// This package is oriented towards being a building block for configuration
-// languages used to bootstrap an application. It is not optimized for use
-// in tight loops where CPU time or memory pressure are a concern.
-package cty
diff --git a/vendor/github.com/hashicorp/go-cty/cty/element_iterator.go b/vendor/github.com/hashicorp/go-cty/cty/element_iterator.go
deleted file mode 100644
index 31567e76..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/element_iterator.go
+++ /dev/null
@@ -1,194 +0,0 @@
-package cty
-
-import (
- "sort"
-
- "github.com/hashicorp/go-cty/cty/set"
-)
-
-// ElementIterator is the interface type returned by Value.ElementIterator to
-// allow the caller to iterate over elements of a collection-typed value.
-//
-// Its usage pattern is as follows:
-//
-// it := val.ElementIterator()
-// for it.Next() {
-// key, val := it.Element()
-// // ...
-// }
-type ElementIterator interface {
- Next() bool
- Element() (key Value, value Value)
-}
-
-func canElementIterator(val Value) bool {
- switch {
- case val.IsMarked():
- return false
- case val.ty.IsListType():
- return true
- case val.ty.IsMapType():
- return true
- case val.ty.IsSetType():
- return true
- case val.ty.IsTupleType():
- return true
- case val.ty.IsObjectType():
- return true
- default:
- return false
- }
-}
-
-func elementIterator(val Value) ElementIterator {
- val.assertUnmarked()
- switch {
- case val.ty.IsListType():
- return &listElementIterator{
- ety: val.ty.ElementType(),
- vals: val.v.([]interface{}),
- idx: -1,
- }
- case val.ty.IsMapType():
- // We iterate the keys in a predictable lexicographical order so
- // that results will always be stable given the same input map.
- rawMap := val.v.(map[string]interface{})
- keys := make([]string, 0, len(rawMap))
- for key := range rawMap {
- keys = append(keys, key)
- }
- sort.Strings(keys)
-
- return &mapElementIterator{
- ety: val.ty.ElementType(),
- vals: rawMap,
- keys: keys,
- idx: -1,
- }
- case val.ty.IsSetType():
- rawSet := val.v.(set.Set)
- return &setElementIterator{
- ety: val.ty.ElementType(),
- setIt: rawSet.Iterator(),
- }
- case val.ty.IsTupleType():
- return &tupleElementIterator{
- etys: val.ty.TupleElementTypes(),
- vals: val.v.([]interface{}),
- idx: -1,
- }
- case val.ty.IsObjectType():
- // We iterate the keys in a predictable lexicographical order so
- // that results will always be stable given the same object type.
- atys := val.ty.AttributeTypes()
- keys := make([]string, 0, len(atys))
- for key := range atys {
- keys = append(keys, key)
- }
- sort.Strings(keys)
-
- return &objectElementIterator{
- atys: atys,
- vals: val.v.(map[string]interface{}),
- attrNames: keys,
- idx: -1,
- }
- default:
- panic("attempt to iterate on non-collection, non-tuple type")
- }
-}
-
-type listElementIterator struct {
- ety Type
- vals []interface{}
- idx int
-}
-
-func (it *listElementIterator) Element() (Value, Value) {
- i := it.idx
- return NumberIntVal(int64(i)), Value{
- ty: it.ety,
- v: it.vals[i],
- }
-}
-
-func (it *listElementIterator) Next() bool {
- it.idx++
- return it.idx < len(it.vals)
-}
-
-type mapElementIterator struct {
- ety Type
- vals map[string]interface{}
- keys []string
- idx int
-}
-
-func (it *mapElementIterator) Element() (Value, Value) {
- key := it.keys[it.idx]
- return StringVal(key), Value{
- ty: it.ety,
- v: it.vals[key],
- }
-}
-
-func (it *mapElementIterator) Next() bool {
- it.idx++
- return it.idx < len(it.keys)
-}
-
-type setElementIterator struct {
- ety Type
- setIt *set.Iterator
-}
-
-func (it *setElementIterator) Element() (Value, Value) {
- val := Value{
- ty: it.ety,
- v: it.setIt.Value(),
- }
- return val, val
-}
-
-func (it *setElementIterator) Next() bool {
- return it.setIt.Next()
-}
-
-type tupleElementIterator struct {
- etys []Type
- vals []interface{}
- idx int
-}
-
-func (it *tupleElementIterator) Element() (Value, Value) {
- i := it.idx
- return NumberIntVal(int64(i)), Value{
- ty: it.etys[i],
- v: it.vals[i],
- }
-}
-
-func (it *tupleElementIterator) Next() bool {
- it.idx++
- return it.idx < len(it.vals)
-}
-
-type objectElementIterator struct {
- atys map[string]Type
- vals map[string]interface{}
- attrNames []string
- idx int
-}
-
-func (it *objectElementIterator) Element() (Value, Value) {
- key := it.attrNames[it.idx]
- return StringVal(key), Value{
- ty: it.atys[key],
- v: it.vals[key],
- }
-}
-
-func (it *objectElementIterator) Next() bool {
- it.idx++
- return it.idx < len(it.attrNames)
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/error.go b/vendor/github.com/hashicorp/go-cty/cty/error.go
deleted file mode 100644
index dd139f72..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/error.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package cty
-
-import (
- "fmt"
-)
-
-// PathError is a specialization of error that represents where in a
-// potentially-deep data structure an error occured, using a Path.
-type PathError struct {
- error
- Path Path
-}
-
-func errorf(path Path, f string, args ...interface{}) error {
- // We need to copy the Path because often our caller builds it by
- // continually mutating the same underlying buffer.
- sPath := make(Path, len(path))
- copy(sPath, path)
- return PathError{
- error: fmt.Errorf(f, args...),
- Path: sPath,
- }
-}
-
-// NewErrorf creates a new PathError for the current path by passing the
-// given format and arguments to fmt.Errorf and then wrapping the result
-// similarly to NewError.
-func (p Path) NewErrorf(f string, args ...interface{}) error {
- return errorf(p, f, args...)
-}
-
-// NewError creates a new PathError for the current path, wrapping the given
-// error.
-func (p Path) NewError(err error) error {
- // if we're being asked to wrap an existing PathError then our new
- // PathError will be the concatenation of the two paths, ensuring
- // that we still get a single flat PathError that's thus easier for
- // callers to deal with.
- perr, wrappingPath := err.(PathError)
- pathLen := len(p)
- if wrappingPath {
- pathLen = pathLen + len(perr.Path)
- }
-
- sPath := make(Path, pathLen)
- copy(sPath, p)
- if wrappingPath {
- copy(sPath[len(p):], perr.Path)
- }
-
- return PathError{
- error: err,
- Path: sPath,
- }
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/gob.go b/vendor/github.com/hashicorp/go-cty/cty/gob.go
deleted file mode 100644
index 80929aa5..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/gob.go
+++ /dev/null
@@ -1,204 +0,0 @@
-package cty
-
-import (
- "bytes"
- "encoding/gob"
- "errors"
- "fmt"
- "math/big"
-
- "github.com/hashicorp/go-cty/cty/set"
-)
-
-// GobEncode is an implementation of the gob.GobEncoder interface, which
-// allows Values to be included in structures encoded with encoding/gob.
-//
-// Currently it is not possible to represent values of capsule types in gob,
-// because the types themselves cannot be represented.
-func (val Value) GobEncode() ([]byte, error) {
- if val.IsMarked() {
- return nil, errors.New("value is marked")
- }
-
- buf := &bytes.Buffer{}
- enc := gob.NewEncoder(buf)
-
- gv := gobValue{
- Version: 0,
- Ty: val.ty,
- V: val.v,
- }
-
- err := enc.Encode(gv)
- if err != nil {
- return nil, fmt.Errorf("error encoding cty.Value: %s", err)
- }
-
- return buf.Bytes(), nil
-}
-
-// GobDecode is an implementation of the gob.GobDecoder interface, which
-// inverts the operation performed by GobEncode. See the documentation of
-// GobEncode for considerations when using cty.Value instances with gob.
-func (val *Value) GobDecode(buf []byte) error {
- r := bytes.NewReader(buf)
- dec := gob.NewDecoder(r)
-
- var gv gobValue
- err := dec.Decode(&gv)
- if err != nil {
- return fmt.Errorf("error decoding cty.Value: %s", err)
- }
- if gv.Version != 0 {
- return fmt.Errorf("unsupported cty.Value encoding version %d; only 0 is supported", gv.Version)
- }
-
- // Because big.Float.GobEncode is implemented with a pointer reciever,
- // gob encoding of an interface{} containing a *big.Float value does not
- // round-trip correctly, emerging instead as a non-pointer big.Float.
- // The rest of cty expects all number values to be represented by
- // *big.Float, so we'll fix that up here.
- gv.V = gobDecodeFixNumberPtr(gv.V, gv.Ty)
-
- val.ty = gv.Ty
- val.v = gv.V
-
- return nil
-}
-
-// GobEncode is an implementation of the gob.GobEncoder interface, which
-// allows Types to be included in structures encoded with encoding/gob.
-//
-// Currently it is not possible to represent capsule types in gob.
-func (t Type) GobEncode() ([]byte, error) {
- buf := &bytes.Buffer{}
- enc := gob.NewEncoder(buf)
-
- gt := gobType{
- Version: 0,
- Impl: t.typeImpl,
- }
-
- err := enc.Encode(gt)
- if err != nil {
- return nil, fmt.Errorf("error encoding cty.Type: %s", err)
- }
-
- return buf.Bytes(), nil
-}
-
-// GobDecode is an implementatino of the gob.GobDecoder interface, which
-// reverses the encoding performed by GobEncode to allow types to be recovered
-// from gob buffers.
-func (t *Type) GobDecode(buf []byte) error {
- r := bytes.NewReader(buf)
- dec := gob.NewDecoder(r)
-
- var gt gobType
- err := dec.Decode(>)
- if err != nil {
- return fmt.Errorf("error decoding cty.Type: %s", err)
- }
- if gt.Version != 0 {
- return fmt.Errorf("unsupported cty.Type encoding version %d; only 0 is supported", gt.Version)
- }
-
- t.typeImpl = gt.Impl
-
- return nil
-}
-
-// Capsule types cannot currently be gob-encoded, because they rely on pointer
-// equality and we have no way to recover the original pointer on decode.
-func (t *capsuleType) GobEncode() ([]byte, error) {
- return nil, fmt.Errorf("cannot gob-encode capsule type %q", t.FriendlyName(friendlyTypeName))
-}
-
-func (t *capsuleType) GobDecode() ([]byte, error) {
- return nil, fmt.Errorf("cannot gob-decode capsule type %q", t.FriendlyName(friendlyTypeName))
-}
-
-type gobValue struct {
- Version int
- Ty Type
- V interface{}
-}
-
-type gobType struct {
- Version int
- Impl typeImpl
-}
-
-type gobCapsuleTypeImpl struct {
-}
-
-// goDecodeFixNumberPtr fixes an unfortunate quirk of round-tripping cty.Number
-// values through gob: the big.Float.GobEncode method is implemented on a
-// pointer receiver, and so it loses the "pointer-ness" of the value on
-// encode, causing the values to emerge the other end as big.Float rather than
-// *big.Float as we expect elsewhere in cty.
-//
-// The implementation of gobDecodeFixNumberPtr mutates the given raw value
-// during its work, and may either return the same value mutated or a new
-// value. Callers must no longer use whatever value they pass as "raw" after
-// this function is called.
-func gobDecodeFixNumberPtr(raw interface{}, ty Type) interface{} {
- // Unfortunately we need to work recursively here because number values
- // might be embedded in structural or collection type values.
-
- switch {
- case ty.Equals(Number):
- if bf, ok := raw.(big.Float); ok {
- return &bf // wrap in pointer
- }
- case ty.IsMapType() && ty.ElementType().Equals(Number):
- if m, ok := raw.(map[string]interface{}); ok {
- for k, v := range m {
- m[k] = gobDecodeFixNumberPtr(v, ty.ElementType())
- }
- }
- case ty.IsListType() && ty.ElementType().Equals(Number):
- if s, ok := raw.([]interface{}); ok {
- for i, v := range s {
- s[i] = gobDecodeFixNumberPtr(v, ty.ElementType())
- }
- }
- case ty.IsSetType() && ty.ElementType().Equals(Number):
- if s, ok := raw.(set.Set); ok {
- newS := set.NewSet(s.Rules())
- for it := s.Iterator(); it.Next(); {
- newV := gobDecodeFixNumberPtr(it.Value(), ty.ElementType())
- newS.Add(newV)
- }
- return newS
- }
- case ty.IsObjectType():
- if m, ok := raw.(map[string]interface{}); ok {
- for k, v := range m {
- aty := ty.AttributeType(k)
- m[k] = gobDecodeFixNumberPtr(v, aty)
- }
- }
- case ty.IsTupleType():
- if s, ok := raw.([]interface{}); ok {
- for i, v := range s {
- ety := ty.TupleElementType(i)
- s[i] = gobDecodeFixNumberPtr(v, ety)
- }
- }
- }
-
- return raw
-}
-
-// gobDecodeFixNumberPtrVal is a helper wrapper around gobDecodeFixNumberPtr
-// that works with already-constructed values. This is primarily for testing,
-// to fix up intentionally-invalid number values for the parts of the test
-// code that need them to be valid, such as calling GoString on them.
-func gobDecodeFixNumberPtrVal(v Value) Value {
- raw := gobDecodeFixNumberPtr(v.v, v.ty)
- return Value{
- v: raw,
- ty: v.ty,
- }
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/doc.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/doc.go
deleted file mode 100644
index a5177d22..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/gocty/doc.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// Package gocty deals with converting between cty Values and native go
-// values.
-//
-// It operates under a similar principle to the encoding/json and
-// encoding/xml packages in the standard library, using reflection to
-// populate native Go data structures from cty values and vice-versa.
-package gocty
diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/helpers.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/helpers.go
deleted file mode 100644
index 0677a079..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/gocty/helpers.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package gocty
-
-import (
- "math/big"
- "reflect"
-
- "github.com/hashicorp/go-cty/cty"
- "github.com/hashicorp/go-cty/cty/set"
-)
-
-var valueType = reflect.TypeOf(cty.Value{})
-var typeType = reflect.TypeOf(cty.Type{})
-
-var setType = reflect.TypeOf(set.Set{})
-
-var bigFloatType = reflect.TypeOf(big.Float{})
-var bigIntType = reflect.TypeOf(big.Int{})
-
-var emptyInterfaceType = reflect.TypeOf(interface{}(nil))
-
-var stringType = reflect.TypeOf("")
-
-// structTagIndices interrogates the fields of the given type (which must
-// be a struct type, or we'll panic) and returns a map from the cty
-// attribute names declared via struct tags to the indices of the
-// fields holding those tags.
-//
-// This function will panic if two fields within the struct are tagged with
-// the same cty attribute name.
-func structTagIndices(st reflect.Type) map[string]int {
- ct := st.NumField()
- ret := make(map[string]int, ct)
-
- for i := 0; i < ct; i++ {
- field := st.Field(i)
- attrName := field.Tag.Get("cty")
- if attrName != "" {
- ret[attrName] = i
- }
- }
-
- return ret
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/in.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/in.go
deleted file mode 100644
index fc35c169..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/gocty/in.go
+++ /dev/null
@@ -1,548 +0,0 @@
-package gocty
-
-import (
- "math/big"
- "reflect"
-
- "github.com/hashicorp/go-cty/cty"
- "github.com/hashicorp/go-cty/cty/convert"
- "github.com/hashicorp/go-cty/cty/set"
-)
-
-// ToCtyValue produces a cty.Value from a Go value. The result will conform
-// to the given type, or an error will be returned if this is not possible.
-//
-// The target type serves as a hint to resolve ambiguities in the mapping.
-// For example, the Go type set.Set tells us that the value is a set but
-// does not describe the set's element type. This also allows for convenient
-// conversions, such as populating a set from a slice rather than having to
-// first explicitly instantiate a set.Set.
-//
-// The audience of this function is assumed to be the developers of Go code
-// that is integrating with cty, and thus the error messages it returns are
-// presented from Go's perspective. These messages are thus not appropriate
-// for display to end-users. An error returned from ToCtyValue represents a
-// bug in the calling program, not user error.
-func ToCtyValue(val interface{}, ty cty.Type) (cty.Value, error) {
- // 'path' starts off as empty but will grow for each level of recursive
- // call we make, so by the time toCtyValue returns it is likely to have
- // unused capacity on the end of it, depending on how deeply-recursive
- // the given Type is.
- path := make(cty.Path, 0)
- return toCtyValue(reflect.ValueOf(val), ty, path)
-}
-
-func toCtyValue(val reflect.Value, ty cty.Type, path cty.Path) (cty.Value, error) {
- if val != (reflect.Value{}) && val.Type().AssignableTo(valueType) {
- // If the source value is a cty.Value then we'll try to just pass
- // through to the target type directly.
- return toCtyPassthrough(val, ty, path)
- }
-
- switch ty {
- case cty.Bool:
- return toCtyBool(val, path)
- case cty.Number:
- return toCtyNumber(val, path)
- case cty.String:
- return toCtyString(val, path)
- case cty.DynamicPseudoType:
- return toCtyDynamic(val, path)
- }
-
- switch {
- case ty.IsListType():
- return toCtyList(val, ty.ElementType(), path)
- case ty.IsMapType():
- return toCtyMap(val, ty.ElementType(), path)
- case ty.IsSetType():
- return toCtySet(val, ty.ElementType(), path)
- case ty.IsObjectType():
- return toCtyObject(val, ty.AttributeTypes(), path)
- case ty.IsTupleType():
- return toCtyTuple(val, ty.TupleElementTypes(), path)
- case ty.IsCapsuleType():
- return toCtyCapsule(val, ty, path)
- }
-
- // We should never fall out here
- return cty.NilVal, path.NewErrorf("unsupported target type %#v", ty)
-}
-
-func toCtyBool(val reflect.Value, path cty.Path) (cty.Value, error) {
- if val = toCtyUnwrapPointer(val); !val.IsValid() {
- return cty.NullVal(cty.Bool), nil
- }
-
- switch val.Kind() {
-
- case reflect.Bool:
- return cty.BoolVal(val.Bool()), nil
-
- default:
- return cty.NilVal, path.NewErrorf("can't convert Go %s to bool", val.Kind())
-
- }
-
-}
-
-func toCtyNumber(val reflect.Value, path cty.Path) (cty.Value, error) {
- if val = toCtyUnwrapPointer(val); !val.IsValid() {
- return cty.NullVal(cty.Number), nil
- }
-
- switch val.Kind() {
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return cty.NumberIntVal(val.Int()), nil
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return cty.NumberUIntVal(val.Uint()), nil
-
- case reflect.Float32, reflect.Float64:
- return cty.NumberFloatVal(val.Float()), nil
-
- case reflect.Struct:
- if val.Type().AssignableTo(bigIntType) {
- bigInt := val.Interface().(big.Int)
- bigFloat := (&big.Float{}).SetInt(&bigInt)
- val = reflect.ValueOf(*bigFloat)
- }
-
- if val.Type().AssignableTo(bigFloatType) {
- bigFloat := val.Interface().(big.Float)
- return cty.NumberVal(&bigFloat), nil
- }
-
- fallthrough
- default:
- return cty.NilVal, path.NewErrorf("can't convert Go %s to number", val.Kind())
-
- }
-
-}
-
-func toCtyString(val reflect.Value, path cty.Path) (cty.Value, error) {
- if val = toCtyUnwrapPointer(val); !val.IsValid() {
- return cty.NullVal(cty.String), nil
- }
-
- switch val.Kind() {
-
- case reflect.String:
- return cty.StringVal(val.String()), nil
-
- default:
- return cty.NilVal, path.NewErrorf("can't convert Go %s to string", val.Kind())
-
- }
-
-}
-
-func toCtyList(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) {
- if val = toCtyUnwrapPointer(val); !val.IsValid() {
- return cty.NullVal(cty.List(ety)), nil
- }
-
- switch val.Kind() {
-
- case reflect.Slice:
- if val.IsNil() {
- return cty.NullVal(cty.List(ety)), nil
- }
- fallthrough
- case reflect.Array:
- if val.Len() == 0 {
- return cty.ListValEmpty(ety), nil
- }
-
- // While we work on our elements we'll temporarily grow
- // path to give us a place to put our index step.
- path = append(path, cty.PathStep(nil))
-
- vals := make([]cty.Value, val.Len())
- for i := range vals {
- var err error
- path[len(path)-1] = cty.IndexStep{
- Key: cty.NumberIntVal(int64(i)),
- }
- vals[i], err = toCtyValue(val.Index(i), ety, path)
- if err != nil {
- return cty.NilVal, err
- }
- }
-
- // Discard our extra path segment, retaining it as extra capacity
- // for future appending to the path.
- path = path[:len(path)-1]
-
- return cty.ListVal(vals), nil
-
- default:
- return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.List(ety))
-
- }
-}
-
-func toCtyMap(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) {
- if val = toCtyUnwrapPointer(val); !val.IsValid() {
- return cty.NullVal(cty.Map(ety)), nil
- }
-
- switch val.Kind() {
-
- case reflect.Map:
- if val.IsNil() {
- return cty.NullVal(cty.Map(ety)), nil
- }
-
- if val.Len() == 0 {
- return cty.MapValEmpty(ety), nil
- }
-
- keyType := val.Type().Key()
- if keyType.Kind() != reflect.String {
- return cty.NilVal, path.NewErrorf("can't convert Go map with key type %s; key type must be string", keyType)
- }
-
- // While we work on our elements we'll temporarily grow
- // path to give us a place to put our index step.
- path = append(path, cty.PathStep(nil))
-
- vals := make(map[string]cty.Value, val.Len())
- for _, kv := range val.MapKeys() {
- k := kv.String()
- var err error
- path[len(path)-1] = cty.IndexStep{
- Key: cty.StringVal(k),
- }
- vals[k], err = toCtyValue(val.MapIndex(reflect.ValueOf(k)), ety, path)
- if err != nil {
- return cty.NilVal, err
- }
- }
-
- // Discard our extra path segment, retaining it as extra capacity
- // for future appending to the path.
- path = path[:len(path)-1]
-
- return cty.MapVal(vals), nil
-
- default:
- return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Map(ety))
-
- }
-}
-
-func toCtySet(val reflect.Value, ety cty.Type, path cty.Path) (cty.Value, error) {
- if val = toCtyUnwrapPointer(val); !val.IsValid() {
- return cty.NullVal(cty.Set(ety)), nil
- }
-
- var vals []cty.Value
-
- switch val.Kind() {
-
- case reflect.Slice:
- if val.IsNil() {
- return cty.NullVal(cty.Set(ety)), nil
- }
- fallthrough
- case reflect.Array:
- if val.Len() == 0 {
- return cty.SetValEmpty(ety), nil
- }
-
- vals = make([]cty.Value, val.Len())
- for i := range vals {
- var err error
- vals[i], err = toCtyValue(val.Index(i), ety, path)
- if err != nil {
- return cty.NilVal, err
- }
- }
-
- case reflect.Struct:
-
- if !val.Type().AssignableTo(setType) {
- return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Type(), cty.Set(ety))
- }
-
- rawSet := val.Interface().(set.Set)
- inVals := rawSet.Values()
-
- if len(inVals) == 0 {
- return cty.SetValEmpty(ety), nil
- }
-
- vals = make([]cty.Value, len(inVals))
- for i := range inVals {
- var err error
- vals[i], err = toCtyValue(reflect.ValueOf(inVals[i]), ety, path)
- if err != nil {
- return cty.NilVal, err
- }
- }
-
- default:
- return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Set(ety))
-
- }
-
- return cty.SetVal(vals), nil
-}
-
-func toCtyObject(val reflect.Value, attrTypes map[string]cty.Type, path cty.Path) (cty.Value, error) {
- if val = toCtyUnwrapPointer(val); !val.IsValid() {
- return cty.NullVal(cty.Object(attrTypes)), nil
- }
-
- switch val.Kind() {
-
- case reflect.Map:
- if val.IsNil() {
- return cty.NullVal(cty.Object(attrTypes)), nil
- }
-
- keyType := val.Type().Key()
- if keyType.Kind() != reflect.String {
- return cty.NilVal, path.NewErrorf("can't convert Go map with key type %s; key type must be string", keyType)
- }
-
- if len(attrTypes) == 0 {
- return cty.EmptyObjectVal, nil
- }
-
- // While we work on our elements we'll temporarily grow
- // path to give us a place to put our GetAttr step.
- path = append(path, cty.PathStep(nil))
-
- haveKeys := make(map[string]struct{}, val.Len())
- for _, kv := range val.MapKeys() {
- haveKeys[kv.String()] = struct{}{}
- }
-
- vals := make(map[string]cty.Value, len(attrTypes))
- for k, at := range attrTypes {
- var err error
- path[len(path)-1] = cty.GetAttrStep{
- Name: k,
- }
-
- if _, have := haveKeys[k]; !have {
- vals[k] = cty.NullVal(at)
- continue
- }
-
- vals[k], err = toCtyValue(val.MapIndex(reflect.ValueOf(k)), at, path)
- if err != nil {
- return cty.NilVal, err
- }
- }
-
- // Discard our extra path segment, retaining it as extra capacity
- // for future appending to the path.
- path = path[:len(path)-1]
-
- return cty.ObjectVal(vals), nil
-
- case reflect.Struct:
- if len(attrTypes) == 0 {
- return cty.EmptyObjectVal, nil
- }
-
- // While we work on our elements we'll temporarily grow
- // path to give us a place to put our GetAttr step.
- path = append(path, cty.PathStep(nil))
-
- attrFields := structTagIndices(val.Type())
-
- vals := make(map[string]cty.Value, len(attrTypes))
- for k, at := range attrTypes {
- path[len(path)-1] = cty.GetAttrStep{
- Name: k,
- }
-
- if fieldIdx, have := attrFields[k]; have {
- var err error
- vals[k], err = toCtyValue(val.Field(fieldIdx), at, path)
- if err != nil {
- return cty.NilVal, err
- }
- } else {
- vals[k] = cty.NullVal(at)
- }
- }
-
- // Discard our extra path segment, retaining it as extra capacity
- // for future appending to the path.
- path = path[:len(path)-1]
-
- return cty.ObjectVal(vals), nil
-
- default:
- return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Object(attrTypes))
-
- }
-}
-
-func toCtyTuple(val reflect.Value, elemTypes []cty.Type, path cty.Path) (cty.Value, error) {
- if val = toCtyUnwrapPointer(val); !val.IsValid() {
- return cty.NullVal(cty.Tuple(elemTypes)), nil
- }
-
- switch val.Kind() {
-
- case reflect.Slice:
- if val.IsNil() {
- return cty.NullVal(cty.Tuple(elemTypes)), nil
- }
-
- if val.Len() != len(elemTypes) {
- return cty.NilVal, path.NewErrorf("wrong number of elements %d; need %d", val.Len(), len(elemTypes))
- }
-
- if len(elemTypes) == 0 {
- return cty.EmptyTupleVal, nil
- }
-
- // While we work on our elements we'll temporarily grow
- // path to give us a place to put our Index step.
- path = append(path, cty.PathStep(nil))
-
- vals := make([]cty.Value, len(elemTypes))
- for i, ety := range elemTypes {
- var err error
-
- path[len(path)-1] = cty.IndexStep{
- Key: cty.NumberIntVal(int64(i)),
- }
-
- vals[i], err = toCtyValue(val.Index(i), ety, path)
- if err != nil {
- return cty.NilVal, err
- }
- }
-
- // Discard our extra path segment, retaining it as extra capacity
- // for future appending to the path.
- path = path[:len(path)-1]
-
- return cty.TupleVal(vals), nil
-
- case reflect.Struct:
- fieldCount := val.Type().NumField()
- if fieldCount != len(elemTypes) {
- return cty.NilVal, path.NewErrorf("wrong number of struct fields %d; need %d", fieldCount, len(elemTypes))
- }
-
- if len(elemTypes) == 0 {
- return cty.EmptyTupleVal, nil
- }
-
- // While we work on our elements we'll temporarily grow
- // path to give us a place to put our Index step.
- path = append(path, cty.PathStep(nil))
-
- vals := make([]cty.Value, len(elemTypes))
- for i, ety := range elemTypes {
- var err error
-
- path[len(path)-1] = cty.IndexStep{
- Key: cty.NumberIntVal(int64(i)),
- }
-
- vals[i], err = toCtyValue(val.Field(i), ety, path)
- if err != nil {
- return cty.NilVal, err
- }
- }
-
- // Discard our extra path segment, retaining it as extra capacity
- // for future appending to the path.
- path = path[:len(path)-1]
-
- return cty.TupleVal(vals), nil
-
- default:
- return cty.NilVal, path.NewErrorf("can't convert Go %s to %#v", val.Kind(), cty.Tuple(elemTypes))
-
- }
-}
-
-func toCtyCapsule(val reflect.Value, capsuleType cty.Type, path cty.Path) (cty.Value, error) {
- if val = toCtyUnwrapPointer(val); !val.IsValid() {
- return cty.NullVal(capsuleType), nil
- }
-
- if val.Kind() != reflect.Ptr {
- if !val.CanAddr() {
- return cty.NilVal, path.NewErrorf("source value for capsule %#v must be addressable", capsuleType)
- }
-
- val = val.Addr()
- }
-
- if !val.Type().Elem().AssignableTo(capsuleType.EncapsulatedType()) {
- return cty.NilVal, path.NewErrorf("value of type %T not compatible with capsule %#v", val.Interface(), capsuleType)
- }
-
- return cty.CapsuleVal(capsuleType, val.Interface()), nil
-}
-
-func toCtyDynamic(val reflect.Value, path cty.Path) (cty.Value, error) {
- if val = toCtyUnwrapPointer(val); !val.IsValid() {
- return cty.NullVal(cty.DynamicPseudoType), nil
- }
-
- switch val.Kind() {
-
- case reflect.Struct:
- if !val.Type().AssignableTo(valueType) {
- return cty.NilVal, path.NewErrorf("can't convert Go %s dynamically; only cty.Value allowed", val.Type())
- }
-
- return val.Interface().(cty.Value), nil
-
- default:
- return cty.NilVal, path.NewErrorf("can't convert Go %s dynamically; only cty.Value allowed", val.Kind())
-
- }
-
-}
-
-func toCtyPassthrough(wrappedVal reflect.Value, wantTy cty.Type, path cty.Path) (cty.Value, error) {
- if wrappedVal = toCtyUnwrapPointer(wrappedVal); !wrappedVal.IsValid() {
- return cty.NullVal(wantTy), nil
- }
-
- givenVal := wrappedVal.Interface().(cty.Value)
-
- val, err := convert.Convert(givenVal, wantTy)
- if err != nil {
- return cty.NilVal, path.NewErrorf("unsuitable value: %s", err)
- }
- return val, nil
-}
-
-// toCtyUnwrapPointer is a helper for dealing with Go pointers. It has three
-// possible outcomes:
-//
-// - Given value isn't a pointer, so it's just returned as-is.
-// - Given value is a non-nil pointer, in which case it is dereferenced
-// and the result returned.
-// - Given value is a nil pointer, in which case an invalid value is returned.
-//
-// For nested pointer types, like **int, they are all dereferenced in turn
-// until a non-pointer value is found, or until a nil pointer is encountered.
-func toCtyUnwrapPointer(val reflect.Value) reflect.Value {
- for val.Kind() == reflect.Ptr || val.Kind() == reflect.Interface {
- if val.IsNil() {
- return reflect.Value{}
- }
-
- val = val.Elem()
- }
-
- return val
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/out.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/out.go
deleted file mode 100644
index 404faba1..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/gocty/out.go
+++ /dev/null
@@ -1,686 +0,0 @@
-package gocty
-
-import (
- "math"
- "math/big"
- "reflect"
-
- "github.com/hashicorp/go-cty/cty"
-)
-
-// FromCtyValue assigns a cty.Value to a reflect.Value, which must be a pointer,
-// using a fixed set of conversion rules.
-//
-// This function considers its audience to be the creator of the cty Value
-// given, and thus the error messages it generates are (unlike with ToCtyValue)
-// presented in cty terminology that is generally appropriate to return to
-// end-users in applications where cty data structures are built from
-// user-provided configuration. In particular this means that if incorrect
-// target types are provided by the calling application the resulting error
-// messages are likely to be confusing, since we assume that the given target
-// type is correct and the cty.Value is where the error lies.
-//
-// If an error is returned, the target data structure may have been partially
-// populated, but the degree to which this is true is an implementation
-// detail that the calling application should not rely on.
-//
-// The function will panic if given a non-pointer as the Go value target,
-// since that is considered to be a bug in the calling program.
-func FromCtyValue(val cty.Value, target interface{}) error {
- tVal := reflect.ValueOf(target)
- if tVal.Kind() != reflect.Ptr {
- panic("target value is not a pointer")
- }
- if tVal.IsNil() {
- panic("target value is nil pointer")
- }
-
- // 'path' starts off as empty but will grow for each level of recursive
- // call we make, so by the time fromCtyValue returns it is likely to have
- // unused capacity on the end of it, depending on how deeply-recursive
- // the given cty.Value is.
- path := make(cty.Path, 0)
- return fromCtyValue(val, tVal, path)
-}
-
-func fromCtyValue(val cty.Value, target reflect.Value, path cty.Path) error {
- ty := val.Type()
-
- deepTarget := fromCtyPopulatePtr(target, false)
-
- // If we're decoding into a cty.Value then we just pass through the
- // value as-is, to enable partial decoding. This is the only situation
- // where unknown values are permitted.
- if deepTarget.Kind() == reflect.Struct && deepTarget.Type().AssignableTo(valueType) {
- deepTarget.Set(reflect.ValueOf(val))
- return nil
- }
-
- // Lists and maps can be nil without indirection, but everything else
- // requires a pointer and we set it immediately to nil.
- // We also make an exception for capsule types because we want to handle
- // pointers specially for these.
- // (fromCtyList and fromCtyMap must therefore deal with val.IsNull, while
- // other types can assume no nulls after this point.)
- if val.IsNull() && !val.Type().IsListType() && !val.Type().IsMapType() && !val.Type().IsCapsuleType() {
- target = fromCtyPopulatePtr(target, true)
- if target.Kind() != reflect.Ptr {
- return path.NewErrorf("null value is not allowed")
- }
-
- target.Set(reflect.Zero(target.Type()))
- return nil
- }
-
- target = deepTarget
-
- if !val.IsKnown() {
- return path.NewErrorf("value must be known")
- }
-
- switch ty {
- case cty.Bool:
- return fromCtyBool(val, target, path)
- case cty.Number:
- return fromCtyNumber(val, target, path)
- case cty.String:
- return fromCtyString(val, target, path)
- }
-
- switch {
- case ty.IsListType():
- return fromCtyList(val, target, path)
- case ty.IsMapType():
- return fromCtyMap(val, target, path)
- case ty.IsSetType():
- return fromCtySet(val, target, path)
- case ty.IsObjectType():
- return fromCtyObject(val, target, path)
- case ty.IsTupleType():
- return fromCtyTuple(val, target, path)
- case ty.IsCapsuleType():
- return fromCtyCapsule(val, target, path)
- }
-
- // We should never fall out here; reaching here indicates a bug in this
- // function.
- return path.NewErrorf("unsupported source type %#v", ty)
-}
-
-func fromCtyBool(val cty.Value, target reflect.Value, path cty.Path) error {
- switch target.Kind() {
-
- case reflect.Bool:
- target.SetBool(val.True())
- return nil
-
- default:
- return likelyRequiredTypesError(path, target)
-
- }
-}
-
-func fromCtyNumber(val cty.Value, target reflect.Value, path cty.Path) error {
- bf := val.AsBigFloat()
-
- switch target.Kind() {
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return fromCtyNumberInt(bf, target, path)
-
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return fromCtyNumberUInt(bf, target, path)
-
- case reflect.Float32, reflect.Float64:
- return fromCtyNumberFloat(bf, target, path)
-
- case reflect.Struct:
- return fromCtyNumberBig(bf, target, path)
-
- default:
- return likelyRequiredTypesError(path, target)
-
- }
-}
-
-func fromCtyNumberInt(bf *big.Float, target reflect.Value, path cty.Path) error {
- // Doing this with switch rather than << arithmetic because << with
- // result >32-bits is not portable to 32-bit systems.
- var min int64
- var max int64
- switch target.Type().Bits() {
- case 8:
- min = math.MinInt8
- max = math.MaxInt8
- case 16:
- min = math.MinInt16
- max = math.MaxInt16
- case 32:
- min = math.MinInt32
- max = math.MaxInt32
- case 64:
- min = math.MinInt64
- max = math.MaxInt64
- default:
- panic("weird number of bits in target int")
- }
-
- iv, accuracy := bf.Int64()
- if accuracy != big.Exact || iv < min || iv > max {
- return path.NewErrorf("value must be a whole number, between %d and %d", min, max)
- }
-
- target.SetInt(iv)
- return nil
-}
-
-func fromCtyNumberUInt(bf *big.Float, target reflect.Value, path cty.Path) error {
- // Doing this with switch rather than << arithmetic because << with
- // result >32-bits is not portable to 32-bit systems.
- var max uint64
- switch target.Type().Bits() {
- case 8:
- max = math.MaxUint8
- case 16:
- max = math.MaxUint16
- case 32:
- max = math.MaxUint32
- case 64:
- max = math.MaxUint64
- default:
- panic("weird number of bits in target uint")
- }
-
- iv, accuracy := bf.Uint64()
- if accuracy != big.Exact || iv > max {
- return path.NewErrorf("value must be a whole number, between 0 and %d inclusive", max)
- }
-
- target.SetUint(iv)
- return nil
-}
-
-func fromCtyNumberFloat(bf *big.Float, target reflect.Value, path cty.Path) error {
- switch target.Kind() {
- case reflect.Float32, reflect.Float64:
- fv, accuracy := bf.Float64()
- if accuracy != big.Exact {
- // We allow the precision to be truncated as part of our conversion,
- // but we don't want to silently introduce infinities.
- if math.IsInf(fv, 0) {
- return path.NewErrorf("value must be between %f and %f inclusive", -math.MaxFloat64, math.MaxFloat64)
- }
- }
- target.SetFloat(fv)
- return nil
- default:
- panic("unsupported kind of float")
- }
-}
-
-func fromCtyNumberBig(bf *big.Float, target reflect.Value, path cty.Path) error {
- switch {
-
- case bigFloatType.ConvertibleTo(target.Type()):
- // Easy!
- target.Set(reflect.ValueOf(bf).Elem().Convert(target.Type()))
- return nil
-
- case bigIntType.ConvertibleTo(target.Type()):
- bi, accuracy := bf.Int(nil)
- if accuracy != big.Exact {
- return path.NewErrorf("value must be a whole number")
- }
- target.Set(reflect.ValueOf(bi).Elem().Convert(target.Type()))
- return nil
-
- default:
- return likelyRequiredTypesError(path, target)
- }
-}
-
-func fromCtyString(val cty.Value, target reflect.Value, path cty.Path) error {
- switch target.Kind() {
- case reflect.String:
- target.SetString(val.AsString())
- return nil
-
- default:
- return likelyRequiredTypesError(path, target)
-
- }
-}
-
-func fromCtyList(val cty.Value, target reflect.Value, path cty.Path) error {
- switch target.Kind() {
-
- case reflect.Slice:
- if val.IsNull() {
- target.Set(reflect.Zero(target.Type()))
- return nil
- }
-
- length := val.LengthInt()
- tv := reflect.MakeSlice(target.Type(), length, length)
-
- path = append(path, nil)
-
- i := 0
- var err error
- val.ForEachElement(func(key cty.Value, val cty.Value) bool {
- path[len(path)-1] = cty.IndexStep{
- Key: cty.NumberIntVal(int64(i)),
- }
-
- targetElem := tv.Index(i)
- err = fromCtyValue(val, targetElem, path)
- if err != nil {
- return true
- }
-
- i++
- return false
- })
- if err != nil {
- return err
- }
-
- path = path[:len(path)-1]
-
- target.Set(tv)
- return nil
-
- case reflect.Array:
- if val.IsNull() {
- return path.NewErrorf("null value is not allowed")
- }
-
- length := val.LengthInt()
- if length != target.Len() {
- return path.NewErrorf("must be a list of length %d", target.Len())
- }
-
- path = append(path, nil)
-
- i := 0
- var err error
- val.ForEachElement(func(key cty.Value, val cty.Value) bool {
- path[len(path)-1] = cty.IndexStep{
- Key: cty.NumberIntVal(int64(i)),
- }
-
- targetElem := target.Index(i)
- err = fromCtyValue(val, targetElem, path)
- if err != nil {
- return true
- }
-
- i++
- return false
- })
- if err != nil {
- return err
- }
-
- path = path[:len(path)-1]
-
- return nil
-
- default:
- return likelyRequiredTypesError(path, target)
-
- }
-}
-
-func fromCtyMap(val cty.Value, target reflect.Value, path cty.Path) error {
-
- switch target.Kind() {
-
- case reflect.Map:
- if val.IsNull() {
- target.Set(reflect.Zero(target.Type()))
- return nil
- }
-
- tv := reflect.MakeMap(target.Type())
- et := target.Type().Elem()
-
- path = append(path, nil)
-
- var err error
- val.ForEachElement(func(key cty.Value, val cty.Value) bool {
- path[len(path)-1] = cty.IndexStep{
- Key: key,
- }
-
- ks := key.AsString()
-
- targetElem := reflect.New(et)
- err = fromCtyValue(val, targetElem, path)
-
- tv.SetMapIndex(reflect.ValueOf(ks), targetElem.Elem())
-
- return err != nil
- })
- if err != nil {
- return err
- }
-
- path = path[:len(path)-1]
-
- target.Set(tv)
- return nil
-
- default:
- return likelyRequiredTypesError(path, target)
-
- }
-}
-
-func fromCtySet(val cty.Value, target reflect.Value, path cty.Path) error {
- switch target.Kind() {
-
- case reflect.Slice:
- if val.IsNull() {
- target.Set(reflect.Zero(target.Type()))
- return nil
- }
-
- length := val.LengthInt()
- tv := reflect.MakeSlice(target.Type(), length, length)
-
- i := 0
- var err error
- val.ForEachElement(func(key cty.Value, val cty.Value) bool {
- targetElem := tv.Index(i)
- err = fromCtyValue(val, targetElem, path)
- if err != nil {
- return true
- }
-
- i++
- return false
- })
- if err != nil {
- return err
- }
-
- target.Set(tv)
- return nil
-
- case reflect.Array:
- if val.IsNull() {
- return path.NewErrorf("null value is not allowed")
- }
-
- length := val.LengthInt()
- if length != target.Len() {
- return path.NewErrorf("must be a set of length %d", target.Len())
- }
-
- i := 0
- var err error
- val.ForEachElement(func(key cty.Value, val cty.Value) bool {
- targetElem := target.Index(i)
- err = fromCtyValue(val, targetElem, path)
- if err != nil {
- return true
- }
-
- i++
- return false
- })
- if err != nil {
- return err
- }
-
- return nil
-
- // TODO: decode into set.Set instance
-
- default:
- return likelyRequiredTypesError(path, target)
-
- }
-}
-
-func fromCtyObject(val cty.Value, target reflect.Value, path cty.Path) error {
-
- switch target.Kind() {
-
- case reflect.Struct:
-
- attrTypes := val.Type().AttributeTypes()
- targetFields := structTagIndices(target.Type())
-
- path = append(path, nil)
-
- for k, i := range targetFields {
- if _, exists := attrTypes[k]; !exists {
- // If the field in question isn't able to represent nil,
- // that's an error.
- fk := target.Field(i).Kind()
- switch fk {
- case reflect.Ptr, reflect.Slice, reflect.Map, reflect.Interface:
- // okay
- default:
- return path.NewErrorf("missing required attribute %q", k)
- }
- }
- }
-
- for k := range attrTypes {
- path[len(path)-1] = cty.GetAttrStep{
- Name: k,
- }
-
- fieldIdx, exists := targetFields[k]
- if !exists {
- return path.NewErrorf("unsupported attribute %q", k)
- }
-
- ev := val.GetAttr(k)
-
- targetField := target.Field(fieldIdx)
- err := fromCtyValue(ev, targetField, path)
- if err != nil {
- return err
- }
- }
-
- path = path[:len(path)-1]
-
- return nil
-
- default:
- return likelyRequiredTypesError(path, target)
-
- }
-}
-
-func fromCtyTuple(val cty.Value, target reflect.Value, path cty.Path) error {
-
- switch target.Kind() {
-
- case reflect.Struct:
-
- elemTypes := val.Type().TupleElementTypes()
- fieldCount := target.Type().NumField()
-
- if fieldCount != len(elemTypes) {
- return path.NewErrorf("a tuple of %d elements is required", fieldCount)
- }
-
- path = append(path, nil)
-
- for i := range elemTypes {
- path[len(path)-1] = cty.IndexStep{
- Key: cty.NumberIntVal(int64(i)),
- }
-
- ev := val.Index(cty.NumberIntVal(int64(i)))
-
- targetField := target.Field(i)
- err := fromCtyValue(ev, targetField, path)
- if err != nil {
- return err
- }
- }
-
- path = path[:len(path)-1]
-
- return nil
-
- default:
- return likelyRequiredTypesError(path, target)
-
- }
-}
-
-func fromCtyCapsule(val cty.Value, target reflect.Value, path cty.Path) error {
-
- if target.Kind() == reflect.Ptr {
- // Walk through indirection until we get to the last pointer,
- // which we might set to null below.
- target = fromCtyPopulatePtr(target, true)
-
- if val.IsNull() {
- target.Set(reflect.Zero(target.Type()))
- return nil
- }
-
- // Since a capsule contains a pointer to an object, we'll preserve
- // that pointer on the way out and thus allow the caller to recover
- // the original object, rather than a copy of it.
-
- eType := val.Type().EncapsulatedType()
-
- if !eType.AssignableTo(target.Elem().Type()) {
- // Our interface contract promises that we won't expose Go
- // implementation details in error messages, so we need to keep
- // this vague. This can only arise if a calling application has
- // more than one capsule type in play and a user mixes them up.
- return path.NewErrorf("incorrect type %s", val.Type().FriendlyName())
- }
-
- target.Set(reflect.ValueOf(val.EncapsulatedValue()))
-
- return nil
- } else {
- if val.IsNull() {
- return path.NewErrorf("null value is not allowed")
- }
-
- // If our target isn't a pointer then we will attempt to copy
- // the encapsulated value into it.
-
- eType := val.Type().EncapsulatedType()
-
- if !eType.AssignableTo(target.Type()) {
- // Our interface contract promises that we won't expose Go
- // implementation details in error messages, so we need to keep
- // this vague. This can only arise if a calling application has
- // more than one capsule type in play and a user mixes them up.
- return path.NewErrorf("incorrect type %s", val.Type().FriendlyName())
- }
-
- // We know that EncapsulatedValue is always a pointer, so we
- // can safely call .Elem on its reflect.Value.
- target.Set(reflect.ValueOf(val.EncapsulatedValue()).Elem())
-
- return nil
- }
-
-}
-
-// fromCtyPopulatePtr recognizes when target is a pointer type and allocates
-// a value to assign to that pointer, which it returns.
-//
-// If the given value has multiple levels of indirection, like **int, these
-// will be processed in turn so that the return value is guaranteed to be
-// a non-pointer.
-//
-// As an exception, if decodingNull is true then the returned value will be
-// the final level of pointer, if any, so that the caller can assign it
-// as nil to represent a null value. If the given target value is not a pointer
-// at all then the returned value will be just the given target, so the caller
-// must test if the returned value is a pointer before trying to assign nil
-// to it.
-func fromCtyPopulatePtr(target reflect.Value, decodingNull bool) reflect.Value {
- for {
- if target.Kind() == reflect.Interface && !target.IsNil() {
- e := target.Elem()
- if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
- target = e
- }
- }
-
- if target.Kind() != reflect.Ptr {
- break
- }
-
- // Stop early if we're decodingNull and we've found our last indirection
- if target.Elem().Kind() != reflect.Ptr && decodingNull && target.CanSet() {
- break
- }
-
- if target.IsNil() {
- target.Set(reflect.New(target.Type().Elem()))
- }
-
- target = target.Elem()
- }
- return target
-}
-
-// likelyRequiredTypesError returns an error that states which types are
-// acceptable by making some assumptions about what types we support for
-// each target Go kind. It's not a precise science but it allows us to return
-// an error message that is cty-user-oriented rather than Go-oriented.
-//
-// Generally these error messages should be a matter of last resort, since
-// the calling application should be validating user-provided value types
-// before decoding anyway.
-func likelyRequiredTypesError(path cty.Path, target reflect.Value) error {
- switch target.Kind() {
-
- case reflect.Bool:
- return path.NewErrorf("bool value is required")
-
- case reflect.String:
- return path.NewErrorf("string value is required")
-
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- fallthrough
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- fallthrough
- case reflect.Float32, reflect.Float64:
- return path.NewErrorf("number value is required")
-
- case reflect.Slice, reflect.Array:
- return path.NewErrorf("list or set value is required")
-
- case reflect.Map:
- return path.NewErrorf("map or object value is required")
-
- case reflect.Struct:
- switch {
-
- case target.Type().AssignableTo(bigFloatType) || target.Type().AssignableTo(bigIntType):
- return path.NewErrorf("number value is required")
-
- case target.Type().AssignableTo(setType):
- return path.NewErrorf("set or list value is required")
-
- default:
- return path.NewErrorf("object or tuple value is required")
-
- }
-
- default:
- // We should avoid getting into this path, since this error
- // message is rather useless.
- return path.NewErrorf("incorrect type")
-
- }
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/gocty/type_implied.go b/vendor/github.com/hashicorp/go-cty/cty/gocty/type_implied.go
deleted file mode 100644
index b4134253..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/gocty/type_implied.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package gocty
-
-import (
- "reflect"
-
- "github.com/hashicorp/go-cty/cty"
-)
-
-// ImpliedType takes an arbitrary Go value (as an interface{}) and attempts
-// to find a suitable cty.Type instance that could be used for a conversion
-// with ToCtyValue.
-//
-// This allows -- for simple situations at least -- types to be defined just
-// once in Go and the cty types derived from the Go types, but in the process
-// it makes some assumptions that may be undesirable so applications are
-// encouraged to build their cty types directly if exacting control is
-// required.
-//
-// Not all Go types can be represented as cty types, so an error may be
-// returned which is usually considered to be a bug in the calling program.
-// In particular, ImpliedType will never use capsule types in its returned
-// type, because it cannot know the capsule types supported by the calling
-// program.
-func ImpliedType(gv interface{}) (cty.Type, error) {
- rt := reflect.TypeOf(gv)
- var path cty.Path
- return impliedType(rt, path)
-}
-
-func impliedType(rt reflect.Type, path cty.Path) (cty.Type, error) {
- switch rt.Kind() {
-
- case reflect.Ptr:
- return impliedType(rt.Elem(), path)
-
- // Primitive types
- case reflect.Bool:
- return cty.Bool, nil
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return cty.Number, nil
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return cty.Number, nil
- case reflect.Float32, reflect.Float64:
- return cty.Number, nil
- case reflect.String:
- return cty.String, nil
-
- // Collection types
- case reflect.Slice:
- path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.Number)})
- ety, err := impliedType(rt.Elem(), path)
- if err != nil {
- return cty.NilType, err
- }
- return cty.List(ety), nil
- case reflect.Map:
- if !stringType.AssignableTo(rt.Key()) {
- return cty.NilType, path.NewErrorf("no cty.Type for %s (must have string keys)", rt)
- }
- path := append(path, cty.IndexStep{Key: cty.UnknownVal(cty.String)})
- ety, err := impliedType(rt.Elem(), path)
- if err != nil {
- return cty.NilType, err
- }
- return cty.Map(ety), nil
-
- // Structural types
- case reflect.Struct:
- return impliedStructType(rt, path)
-
- default:
- return cty.NilType, path.NewErrorf("no cty.Type for %s", rt)
- }
-}
-
-func impliedStructType(rt reflect.Type, path cty.Path) (cty.Type, error) {
- if valueType.AssignableTo(rt) {
- // Special case: cty.Value represents cty.DynamicPseudoType, for
- // type conformance checking.
- return cty.DynamicPseudoType, nil
- }
-
- fieldIdxs := structTagIndices(rt)
- if len(fieldIdxs) == 0 {
- return cty.NilType, path.NewErrorf("no cty.Type for %s (no cty field tags)", rt)
- }
-
- atys := make(map[string]cty.Type, len(fieldIdxs))
-
- {
- // Temporary extension of path for attributes
- path := append(path, nil)
-
- for k, fi := range fieldIdxs {
- path[len(path)-1] = cty.GetAttrStep{Name: k}
-
- ft := rt.Field(fi).Type
- aty, err := impliedType(ft, path)
- if err != nil {
- return cty.NilType, err
- }
-
- atys[k] = aty
- }
- }
-
- return cty.Object(atys), nil
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/helper.go b/vendor/github.com/hashicorp/go-cty/cty/helper.go
deleted file mode 100644
index 1b88e9fa..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/helper.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package cty
-
-import (
- "fmt"
-)
-
-// anyUnknown is a helper to easily check if a set of values contains any
-// unknowns, for operations that short-circuit to return unknown in that case.
-func anyUnknown(values ...Value) bool {
- for _, val := range values {
- if val.v == unknown {
- return true
- }
- }
- return false
-}
-
-// typeCheck tests whether all of the given values belong to the given type.
-// If the given types are a mixture of the given type and the dynamic
-// pseudo-type then a short-circuit dynamic value is returned. If the given
-// values are all of the correct type but at least one is unknown then
-// a short-circuit unknown value is returned. If any other types appear then
-// an error is returned. Otherwise (finally!) the result is nil, nil.
-func typeCheck(required Type, ret Type, values ...Value) (shortCircuit *Value, err error) {
- hasDynamic := false
- hasUnknown := false
-
- for i, val := range values {
- if val.ty == DynamicPseudoType {
- hasDynamic = true
- continue
- }
-
- if !val.Type().Equals(required) {
- return nil, fmt.Errorf(
- "type mismatch: want %s but value %d is %s",
- required.FriendlyName(),
- i, val.ty.FriendlyName(),
- )
- }
-
- if val.v == unknown {
- hasUnknown = true
- }
- }
-
- if hasDynamic {
- return &DynamicVal, nil
- }
-
- if hasUnknown {
- ret := UnknownVal(ret)
- return &ret, nil
- }
-
- return nil, nil
-}
-
-// mustTypeCheck is a wrapper around typeCheck that immediately panics if
-// any error is returned.
-func mustTypeCheck(required Type, ret Type, values ...Value) *Value {
- shortCircuit, err := typeCheck(required, ret, values...)
- if err != nil {
- panic(err)
- }
- return shortCircuit
-}
-
-// shortCircuitForceType takes the return value from mustTypeCheck and
-// replaces it with an unknown of the given type if the original value was
-// DynamicVal.
-//
-// This is useful for operations that are specified to always return a
-// particular type, since then a dynamic result can safely be "upgrade" to
-// a strongly-typed unknown, which then allows subsequent operations to
-// be actually type-checked.
-//
-// It is safe to use this only if the operation in question is defined as
-// returning either a value of the given type or panicking, since we know
-// then that subsequent operations won't run if the operation panics.
-//
-// If the given short-circuit value is *not* DynamicVal then it must be
-// of the given type, or this function will panic.
-func forceShortCircuitType(shortCircuit *Value, ty Type) *Value {
- if shortCircuit == nil {
- return nil
- }
-
- if shortCircuit.ty == DynamicPseudoType {
- ret := UnknownVal(ty)
- return &ret
- }
-
- if !shortCircuit.ty.Equals(ty) {
- panic("forceShortCircuitType got value of wrong type")
- }
-
- return shortCircuit
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/json.go b/vendor/github.com/hashicorp/go-cty/cty/json.go
deleted file mode 100644
index c421a62e..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/json.go
+++ /dev/null
@@ -1,176 +0,0 @@
-package cty
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
-)
-
-// MarshalJSON is an implementation of json.Marshaler that allows Type
-// instances to be serialized as JSON.
-//
-// All standard types can be serialized, but capsule types cannot since there
-// is no way to automatically recover the original pointer and capsule types
-// compare by equality.
-func (t Type) MarshalJSON() ([]byte, error) {
- switch impl := t.typeImpl.(type) {
- case primitiveType:
- switch impl.Kind {
- case primitiveTypeBool:
- return []byte{'"', 'b', 'o', 'o', 'l', '"'}, nil
- case primitiveTypeNumber:
- return []byte{'"', 'n', 'u', 'm', 'b', 'e', 'r', '"'}, nil
- case primitiveTypeString:
- return []byte{'"', 's', 't', 'r', 'i', 'n', 'g', '"'}, nil
- default:
- panic("unknown primitive type kind")
- }
- case typeList, typeMap, typeSet:
- buf := &bytes.Buffer{}
- etyJSON, err := t.ElementType().MarshalJSON()
- if err != nil {
- return nil, err
- }
- buf.WriteRune('[')
- switch impl.(type) {
- case typeList:
- buf.WriteString(`"list"`)
- case typeMap:
- buf.WriteString(`"map"`)
- case typeSet:
- buf.WriteString(`"set"`)
- }
- buf.WriteRune(',')
- buf.Write(etyJSON)
- buf.WriteRune(']')
- return buf.Bytes(), nil
- case typeObject:
- buf := &bytes.Buffer{}
- atysJSON, err := json.Marshal(t.AttributeTypes())
- if err != nil {
- return nil, err
- }
- buf.WriteString(`["object",`)
- buf.Write(atysJSON)
- buf.WriteRune(']')
- return buf.Bytes(), nil
- case typeTuple:
- buf := &bytes.Buffer{}
- etysJSON, err := json.Marshal(t.TupleElementTypes())
- if err != nil {
- return nil, err
- }
- buf.WriteString(`["tuple",`)
- buf.Write(etysJSON)
- buf.WriteRune(']')
- return buf.Bytes(), nil
- case pseudoTypeDynamic:
- return []byte{'"', 'd', 'y', 'n', 'a', 'm', 'i', 'c', '"'}, nil
- case *capsuleType:
- return nil, fmt.Errorf("type not allowed: %s", t.FriendlyName())
- default:
- // should never happen
- panic("unknown type implementation")
- }
-}
-
-// UnmarshalJSON is the opposite of MarshalJSON. See the documentation of
-// MarshalJSON for information on the limitations of JSON serialization of
-// types.
-func (t *Type) UnmarshalJSON(buf []byte) error {
- r := bytes.NewReader(buf)
- dec := json.NewDecoder(r)
-
- tok, err := dec.Token()
- if err != nil {
- return err
- }
-
- switch v := tok.(type) {
- case string:
- switch v {
- case "bool":
- *t = Bool
- case "number":
- *t = Number
- case "string":
- *t = String
- case "dynamic":
- *t = DynamicPseudoType
- default:
- return fmt.Errorf("invalid primitive type name %q", v)
- }
-
- if dec.More() {
- return fmt.Errorf("extraneous data after type description")
- }
- return nil
- case json.Delim:
- if rune(v) != '[' {
- return fmt.Errorf("invalid complex type description")
- }
-
- tok, err = dec.Token()
- if err != nil {
- return err
- }
-
- kind, ok := tok.(string)
- if !ok {
- return fmt.Errorf("invalid complex type kind name")
- }
-
- switch kind {
- case "list":
- var ety Type
- err = dec.Decode(&ety)
- if err != nil {
- return err
- }
- *t = List(ety)
- case "map":
- var ety Type
- err = dec.Decode(&ety)
- if err != nil {
- return err
- }
- *t = Map(ety)
- case "set":
- var ety Type
- err = dec.Decode(&ety)
- if err != nil {
- return err
- }
- *t = Set(ety)
- case "object":
- var atys map[string]Type
- err = dec.Decode(&atys)
- if err != nil {
- return err
- }
- *t = Object(atys)
- case "tuple":
- var etys []Type
- err = dec.Decode(&etys)
- if err != nil {
- return err
- }
- *t = Tuple(etys)
- default:
- return fmt.Errorf("invalid complex type kind name")
- }
-
- tok, err = dec.Token()
- if err != nil {
- return err
- }
- if delim, ok := tok.(json.Delim); !ok || rune(delim) != ']' || dec.More() {
- return fmt.Errorf("unexpected extra data in type description")
- }
-
- return nil
-
- default:
- return fmt.Errorf("invalid type description")
- }
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/doc.go b/vendor/github.com/hashicorp/go-cty/cty/json/doc.go
deleted file mode 100644
index 8916513d..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/json/doc.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Package json provides functions for serializing cty types and values in
-// JSON format, and for decoding them again.
-//
-// Since the cty type system is a superset of the JSON type system,
-// round-tripping through JSON is lossy unless type information is provided
-// both at encoding time and decoding time. Callers of this package are
-// therefore suggested to define their expected structure as a cty.Type
-// and pass it in consistently both when encoding and when decoding, though
-// default (type-lossy) behavior is provided for situations where the precise
-// representation of the data is not significant.
-package json
diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/marshal.go b/vendor/github.com/hashicorp/go-cty/cty/json/marshal.go
deleted file mode 100644
index 728ab010..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/json/marshal.go
+++ /dev/null
@@ -1,193 +0,0 @@
-package json
-
-import (
- "bytes"
- "encoding/json"
- "sort"
-
- "github.com/hashicorp/go-cty/cty"
-)
-
-func marshal(val cty.Value, t cty.Type, path cty.Path, b *bytes.Buffer) error {
- if val.IsMarked() {
- return path.NewErrorf("value has marks, so it cannot be seralized")
- }
-
- // If we're going to decode as DynamicPseudoType then we need to save
- // dynamic type information to recover the real type.
- if t == cty.DynamicPseudoType && val.Type() != cty.DynamicPseudoType {
- return marshalDynamic(val, path, b)
- }
-
- if val.IsNull() {
- b.WriteString("null")
- return nil
- }
-
- if !val.IsKnown() {
- return path.NewErrorf("value is not known")
- }
-
- // The caller should've guaranteed that the given val is conformant with
- // the given type t, so we'll proceed under that assumption here.
-
- switch {
- case t.IsPrimitiveType():
- switch t {
- case cty.String:
- json, err := json.Marshal(val.AsString())
- if err != nil {
- return path.NewErrorf("failed to serialize value: %s", err)
- }
- b.Write(json)
- return nil
- case cty.Number:
- if val.RawEquals(cty.PositiveInfinity) || val.RawEquals(cty.NegativeInfinity) {
- return path.NewErrorf("cannot serialize infinity as JSON")
- }
- b.WriteString(val.AsBigFloat().Text('f', -1))
- return nil
- case cty.Bool:
- if val.True() {
- b.WriteString("true")
- } else {
- b.WriteString("false")
- }
- return nil
- default:
- panic("unsupported primitive type")
- }
- case t.IsListType(), t.IsSetType():
- b.WriteRune('[')
- first := true
- ety := t.ElementType()
- it := val.ElementIterator()
- path := append(path, nil) // local override of 'path' with extra element
- for it.Next() {
- if !first {
- b.WriteRune(',')
- }
- ek, ev := it.Element()
- path[len(path)-1] = cty.IndexStep{
- Key: ek,
- }
- err := marshal(ev, ety, path, b)
- if err != nil {
- return err
- }
- first = false
- }
- b.WriteRune(']')
- return nil
- case t.IsMapType():
- b.WriteRune('{')
- first := true
- ety := t.ElementType()
- it := val.ElementIterator()
- path := append(path, nil) // local override of 'path' with extra element
- for it.Next() {
- if !first {
- b.WriteRune(',')
- }
- ek, ev := it.Element()
- path[len(path)-1] = cty.IndexStep{
- Key: ek,
- }
- var err error
- err = marshal(ek, ek.Type(), path, b)
- if err != nil {
- return err
- }
- b.WriteRune(':')
- err = marshal(ev, ety, path, b)
- if err != nil {
- return err
- }
- first = false
- }
- b.WriteRune('}')
- return nil
- case t.IsTupleType():
- b.WriteRune('[')
- etys := t.TupleElementTypes()
- it := val.ElementIterator()
- path := append(path, nil) // local override of 'path' with extra element
- i := 0
- for it.Next() {
- if i > 0 {
- b.WriteRune(',')
- }
- ety := etys[i]
- ek, ev := it.Element()
- path[len(path)-1] = cty.IndexStep{
- Key: ek,
- }
- err := marshal(ev, ety, path, b)
- if err != nil {
- return err
- }
- i++
- }
- b.WriteRune(']')
- return nil
- case t.IsObjectType():
- b.WriteRune('{')
- atys := t.AttributeTypes()
- path := append(path, nil) // local override of 'path' with extra element
-
- names := make([]string, 0, len(atys))
- for k := range atys {
- names = append(names, k)
- }
- sort.Strings(names)
-
- for i, k := range names {
- aty := atys[k]
- if i > 0 {
- b.WriteRune(',')
- }
- av := val.GetAttr(k)
- path[len(path)-1] = cty.GetAttrStep{
- Name: k,
- }
- var err error
- err = marshal(cty.StringVal(k), cty.String, path, b)
- if err != nil {
- return err
- }
- b.WriteRune(':')
- err = marshal(av, aty, path, b)
- if err != nil {
- return err
- }
- }
- b.WriteRune('}')
- return nil
- case t.IsCapsuleType():
- rawVal := val.EncapsulatedValue()
- jsonVal, err := json.Marshal(rawVal)
- if err != nil {
- return path.NewError(err)
- }
- b.Write(jsonVal)
- return nil
- default:
- // should never happen
- return path.NewErrorf("cannot JSON-serialize %s", t.FriendlyName())
- }
-}
-
-// marshalDynamic adds an extra wrapping object containing dynamic type
-// information for the given value.
-func marshalDynamic(val cty.Value, path cty.Path, b *bytes.Buffer) error {
- typeJSON, err := MarshalType(val.Type())
- if err != nil {
- return path.NewErrorf("failed to serialize type: %s", err)
- }
- b.WriteString(`{"value":`)
- marshal(val, val.Type(), path, b)
- b.WriteString(`,"type":`)
- b.Write(typeJSON)
- b.WriteRune('}')
- return nil
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/simple.go b/vendor/github.com/hashicorp/go-cty/cty/json/simple.go
deleted file mode 100644
index aaba8c3b..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/json/simple.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package json
-
-import (
- "github.com/hashicorp/go-cty/cty"
-)
-
-// SimpleJSONValue is a wrapper around cty.Value that adds implementations of
-// json.Marshaler and json.Unmarshaler for simple-but-type-lossy automatic
-// encoding and decoding of values.
-//
-// The couplet Marshal and Unmarshal both take extra type information to
-// inform the encoding and decoding process so that all of the cty types
-// can be represented even though JSON's type system is a subset.
-//
-// SimpleJSONValue instead takes the approach of discarding the value's type
-// information and then deriving a new type from the stored structure when
-// decoding. This results in the same data being returned but not necessarily
-// with exactly the same type.
-//
-// For information on how types are inferred when decoding, see the
-// documentation of the function ImpliedType.
-type SimpleJSONValue struct {
- cty.Value
-}
-
-// MarshalJSON is an implementation of json.Marshaler. See the documentation
-// of SimpleJSONValue for more information.
-func (v SimpleJSONValue) MarshalJSON() ([]byte, error) {
- return Marshal(v.Value, v.Type())
-}
-
-// UnmarshalJSON is an implementation of json.Unmarshaler. See the
-// documentation of SimpleJSONValue for more information.
-func (v *SimpleJSONValue) UnmarshalJSON(buf []byte) error {
- t, err := ImpliedType(buf)
- if err != nil {
- return err
- }
- v.Value, err = Unmarshal(buf, t)
- return err
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/type.go b/vendor/github.com/hashicorp/go-cty/cty/json/type.go
deleted file mode 100644
index 59d7f2e1..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/json/type.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package json
-
-import (
- "github.com/hashicorp/go-cty/cty"
-)
-
-// MarshalType returns a JSON serialization of the given type.
-//
-// This is just a thin wrapper around t.MarshalJSON, for symmetry with
-// UnmarshalType.
-func MarshalType(t cty.Type) ([]byte, error) {
- return t.MarshalJSON()
-}
-
-// UnmarshalType decodes a JSON serialization of the given type as produced
-// by either Type.MarshalJSON or MarshalType.
-//
-// This is a convenience wrapper around Type.UnmarshalJSON.
-func UnmarshalType(buf []byte) (cty.Type, error) {
- var t cty.Type
- err := t.UnmarshalJSON(buf)
- return t, err
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/type_implied.go b/vendor/github.com/hashicorp/go-cty/cty/json/type_implied.go
deleted file mode 100644
index 8adf22bb..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/json/type_implied.go
+++ /dev/null
@@ -1,170 +0,0 @@
-package json
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
-
- "github.com/hashicorp/go-cty/cty"
-)
-
-// ImpliedType returns the cty Type implied by the structure of the given
-// JSON-compliant buffer. This function implements the default type mapping
-// behavior used when decoding arbitrary JSON without explicit cty Type
-// information.
-//
-// The rules are as follows:
-//
-// JSON strings, numbers and bools map to their equivalent primitive type in
-// cty.
-//
-// JSON objects map to cty object types, with the attributes defined by the
-// object keys and the types of their values.
-//
-// JSON arrays map to cty tuple types, with the elements defined by the
-// types of the array members.
-//
-// Any nulls are typed as DynamicPseudoType, so callers of this function
-// must be prepared to deal with this. Callers that do not wish to deal with
-// dynamic typing should not use this function and should instead describe
-// their required types explicitly with a cty.Type instance when decoding.
-//
-// Any JSON syntax errors will be returned as an error, and the type will
-// be the invalid value cty.NilType.
-func ImpliedType(buf []byte) (cty.Type, error) {
- r := bytes.NewReader(buf)
- dec := json.NewDecoder(r)
- dec.UseNumber()
-
- ty, err := impliedType(dec)
- if err != nil {
- return cty.NilType, err
- }
-
- if dec.More() {
- return cty.NilType, fmt.Errorf("extraneous data after JSON object")
- }
-
- return ty, nil
-}
-
-func impliedType(dec *json.Decoder) (cty.Type, error) {
- tok, err := dec.Token()
- if err != nil {
- return cty.NilType, err
- }
-
- return impliedTypeForTok(tok, dec)
-}
-
-func impliedTypeForTok(tok json.Token, dec *json.Decoder) (cty.Type, error) {
- if tok == nil {
- return cty.DynamicPseudoType, nil
- }
-
- switch ttok := tok.(type) {
- case bool:
- return cty.Bool, nil
-
- case json.Number:
- return cty.Number, nil
-
- case string:
- return cty.String, nil
-
- case json.Delim:
-
- switch rune(ttok) {
- case '{':
- return impliedObjectType(dec)
- case '[':
- return impliedTupleType(dec)
- default:
- return cty.NilType, fmt.Errorf("unexpected token %q", ttok)
- }
-
- default:
- return cty.NilType, fmt.Errorf("unsupported JSON token %#v", tok)
- }
-}
-
-func impliedObjectType(dec *json.Decoder) (cty.Type, error) {
- // By the time we get in here, we've already consumed the { delimiter
- // and so our next token should be the first object key.
-
- var atys map[string]cty.Type
-
- for {
- // Read the object key first
- tok, err := dec.Token()
- if err != nil {
- return cty.NilType, err
- }
-
- if ttok, ok := tok.(json.Delim); ok {
- if rune(ttok) != '}' {
- return cty.NilType, fmt.Errorf("unexpected delimiter %q", ttok)
- }
- break
- }
-
- key, ok := tok.(string)
- if !ok {
- return cty.NilType, fmt.Errorf("expected string but found %T", tok)
- }
-
- // Now read the value
- tok, err = dec.Token()
- if err != nil {
- return cty.NilType, err
- }
-
- aty, err := impliedTypeForTok(tok, dec)
- if err != nil {
- return cty.NilType, err
- }
-
- if atys == nil {
- atys = make(map[string]cty.Type)
- }
- atys[key] = aty
- }
-
- if len(atys) == 0 {
- return cty.EmptyObject, nil
- }
-
- return cty.Object(atys), nil
-}
-
-func impliedTupleType(dec *json.Decoder) (cty.Type, error) {
- // By the time we get in here, we've already consumed the [ delimiter
- // and so our next token should be the first value.
-
- var etys []cty.Type
-
- for {
- tok, err := dec.Token()
- if err != nil {
- return cty.NilType, err
- }
-
- if ttok, ok := tok.(json.Delim); ok {
- if rune(ttok) == ']' {
- break
- }
- }
-
- ety, err := impliedTypeForTok(tok, dec)
- if err != nil {
- return cty.NilType, err
- }
- etys = append(etys, ety)
- }
-
- if len(etys) == 0 {
- return cty.EmptyTuple, nil
- }
-
- return cty.Tuple(etys), nil
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/unmarshal.go b/vendor/github.com/hashicorp/go-cty/cty/json/unmarshal.go
deleted file mode 100644
index 5ad190d3..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/json/unmarshal.go
+++ /dev/null
@@ -1,459 +0,0 @@
-package json
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "reflect"
-
- "github.com/hashicorp/go-cty/cty"
- "github.com/hashicorp/go-cty/cty/convert"
-)
-
-func unmarshal(buf []byte, t cty.Type, path cty.Path) (cty.Value, error) {
- dec := bufDecoder(buf)
-
- tok, err := dec.Token()
- if err != nil {
- return cty.NilVal, path.NewError(err)
- }
-
- if tok == nil {
- return cty.NullVal(t), nil
- }
-
- if t == cty.DynamicPseudoType {
- return unmarshalDynamic(buf, path)
- }
-
- switch {
- case t.IsPrimitiveType():
- val, err := unmarshalPrimitive(tok, t, path)
- if err != nil {
- return cty.NilVal, err
- }
- return val, nil
- case t.IsListType():
- return unmarshalList(buf, t.ElementType(), path)
- case t.IsSetType():
- return unmarshalSet(buf, t.ElementType(), path)
- case t.IsMapType():
- return unmarshalMap(buf, t.ElementType(), path)
- case t.IsTupleType():
- return unmarshalTuple(buf, t.TupleElementTypes(), path)
- case t.IsObjectType():
- return unmarshalObject(buf, t.AttributeTypes(), path)
- case t.IsCapsuleType():
- return unmarshalCapsule(buf, t, path)
- default:
- return cty.NilVal, path.NewErrorf("unsupported type %s", t.FriendlyName())
- }
-}
-
-func unmarshalPrimitive(tok json.Token, t cty.Type, path cty.Path) (cty.Value, error) {
-
- switch t {
- case cty.Bool:
- switch v := tok.(type) {
- case bool:
- return cty.BoolVal(v), nil
- case string:
- val, err := convert.Convert(cty.StringVal(v), t)
- if err != nil {
- return cty.NilVal, path.NewError(err)
- }
- return val, nil
- default:
- return cty.NilVal, path.NewErrorf("bool is required")
- }
- case cty.Number:
- if v, ok := tok.(json.Number); ok {
- tok = string(v)
- }
- switch v := tok.(type) {
- case string:
- val, err := cty.ParseNumberVal(v)
- if err != nil {
- return cty.NilVal, path.NewError(err)
- }
- return val, nil
- default:
- return cty.NilVal, path.NewErrorf("number is required")
- }
- case cty.String:
- switch v := tok.(type) {
- case string:
- return cty.StringVal(v), nil
- case json.Number:
- return cty.StringVal(string(v)), nil
- case bool:
- val, err := convert.Convert(cty.BoolVal(v), t)
- if err != nil {
- return cty.NilVal, path.NewError(err)
- }
- return val, nil
- default:
- return cty.NilVal, path.NewErrorf("string is required")
- }
- default:
- // should never happen
- panic("unsupported primitive type")
- }
-}
-
-func unmarshalList(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) {
- dec := bufDecoder(buf)
- if err := requireDelim(dec, '['); err != nil {
- return cty.NilVal, path.NewError(err)
- }
-
- var vals []cty.Value
-
- {
- path := append(path, nil)
- var idx int64
-
- for dec.More() {
- path[len(path)-1] = cty.IndexStep{
- Key: cty.NumberIntVal(idx),
- }
- idx++
-
- rawVal, err := readRawValue(dec)
- if err != nil {
- return cty.NilVal, path.NewErrorf("failed to read list value: %s", err)
- }
-
- el, err := unmarshal(rawVal, ety, path)
- if err != nil {
- return cty.NilVal, err
- }
-
- vals = append(vals, el)
- }
- }
-
- if err := requireDelim(dec, ']'); err != nil {
- return cty.NilVal, path.NewError(err)
- }
-
- if len(vals) == 0 {
- return cty.ListValEmpty(ety), nil
- }
-
- return cty.ListVal(vals), nil
-}
-
-func unmarshalSet(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) {
- dec := bufDecoder(buf)
- if err := requireDelim(dec, '['); err != nil {
- return cty.NilVal, path.NewError(err)
- }
-
- var vals []cty.Value
-
- {
- path := append(path, nil)
-
- for dec.More() {
- path[len(path)-1] = cty.IndexStep{
- Key: cty.UnknownVal(ety),
- }
-
- rawVal, err := readRawValue(dec)
- if err != nil {
- return cty.NilVal, path.NewErrorf("failed to read set value: %s", err)
- }
-
- el, err := unmarshal(rawVal, ety, path)
- if err != nil {
- return cty.NilVal, err
- }
-
- vals = append(vals, el)
- }
- }
-
- if err := requireDelim(dec, ']'); err != nil {
- return cty.NilVal, path.NewError(err)
- }
-
- if len(vals) == 0 {
- return cty.SetValEmpty(ety), nil
- }
-
- return cty.SetVal(vals), nil
-}
-
-func unmarshalMap(buf []byte, ety cty.Type, path cty.Path) (cty.Value, error) {
- dec := bufDecoder(buf)
- if err := requireDelim(dec, '{'); err != nil {
- return cty.NilVal, path.NewError(err)
- }
-
- vals := make(map[string]cty.Value)
-
- {
- path := append(path, nil)
-
- for dec.More() {
- path[len(path)-1] = cty.IndexStep{
- Key: cty.UnknownVal(cty.String),
- }
-
- var err error
-
- k, err := requireObjectKey(dec)
- if err != nil {
- return cty.NilVal, path.NewErrorf("failed to read map key: %s", err)
- }
-
- path[len(path)-1] = cty.IndexStep{
- Key: cty.StringVal(k),
- }
-
- rawVal, err := readRawValue(dec)
- if err != nil {
- return cty.NilVal, path.NewErrorf("failed to read map value: %s", err)
- }
-
- el, err := unmarshal(rawVal, ety, path)
- if err != nil {
- return cty.NilVal, err
- }
-
- vals[k] = el
- }
- }
-
- if err := requireDelim(dec, '}'); err != nil {
- return cty.NilVal, path.NewError(err)
- }
-
- if len(vals) == 0 {
- return cty.MapValEmpty(ety), nil
- }
-
- return cty.MapVal(vals), nil
-}
-
-func unmarshalTuple(buf []byte, etys []cty.Type, path cty.Path) (cty.Value, error) {
- dec := bufDecoder(buf)
- if err := requireDelim(dec, '['); err != nil {
- return cty.NilVal, path.NewError(err)
- }
-
- var vals []cty.Value
-
- {
- path := append(path, nil)
- var idx int
-
- for dec.More() {
- if idx >= len(etys) {
- return cty.NilVal, path[:len(path)-1].NewErrorf("too many tuple elements (need %d)", len(etys))
- }
-
- path[len(path)-1] = cty.IndexStep{
- Key: cty.NumberIntVal(int64(idx)),
- }
- ety := etys[idx]
- idx++
-
- rawVal, err := readRawValue(dec)
- if err != nil {
- return cty.NilVal, path.NewErrorf("failed to read tuple value: %s", err)
- }
-
- el, err := unmarshal(rawVal, ety, path)
- if err != nil {
- return cty.NilVal, err
- }
-
- vals = append(vals, el)
- }
- }
-
- if err := requireDelim(dec, ']'); err != nil {
- return cty.NilVal, path.NewError(err)
- }
-
- if len(vals) != len(etys) {
- return cty.NilVal, path[:len(path)-1].NewErrorf("not enough tuple elements (need %d)", len(etys))
- }
-
- if len(vals) == 0 {
- return cty.EmptyTupleVal, nil
- }
-
- return cty.TupleVal(vals), nil
-}
-
-func unmarshalObject(buf []byte, atys map[string]cty.Type, path cty.Path) (cty.Value, error) {
- dec := bufDecoder(buf)
- if err := requireDelim(dec, '{'); err != nil {
- return cty.NilVal, path.NewError(err)
- }
-
- vals := make(map[string]cty.Value)
-
- {
- objPath := path // some errors report from the object's perspective
- path := append(path, nil) // path to a specific attribute
-
- for dec.More() {
-
- var err error
-
- k, err := requireObjectKey(dec)
- if err != nil {
- return cty.NilVal, path.NewErrorf("failed to read object key: %s", err)
- }
-
- aty, ok := atys[k]
- if !ok {
- return cty.NilVal, objPath.NewErrorf("unsupported attribute %q", k)
- }
-
- path[len(path)-1] = cty.GetAttrStep{
- Name: k,
- }
-
- rawVal, err := readRawValue(dec)
- if err != nil {
- return cty.NilVal, path.NewErrorf("failed to read object value: %s", err)
- }
-
- el, err := unmarshal(rawVal, aty, path)
- if err != nil {
- return cty.NilVal, err
- }
-
- vals[k] = el
- }
- }
-
- if err := requireDelim(dec, '}'); err != nil {
- return cty.NilVal, path.NewError(err)
- }
-
- // Make sure we have a value for every attribute
- for k, aty := range atys {
- if _, exists := vals[k]; !exists {
- vals[k] = cty.NullVal(aty)
- }
- }
-
- if len(vals) == 0 {
- return cty.EmptyObjectVal, nil
- }
-
- return cty.ObjectVal(vals), nil
-}
-
-func unmarshalCapsule(buf []byte, t cty.Type, path cty.Path) (cty.Value, error) {
- rawType := t.EncapsulatedType()
- ptrPtr := reflect.New(reflect.PtrTo(rawType))
- ptrPtr.Elem().Set(reflect.New(rawType))
- ptr := ptrPtr.Elem().Interface()
- err := json.Unmarshal(buf, ptr)
- if err != nil {
- return cty.NilVal, path.NewError(err)
- }
-
- return cty.CapsuleVal(t, ptr), nil
-}
-
-func unmarshalDynamic(buf []byte, path cty.Path) (cty.Value, error) {
- dec := bufDecoder(buf)
- if err := requireDelim(dec, '{'); err != nil {
- return cty.NilVal, path.NewError(err)
- }
-
- var t cty.Type
- var valBody []byte // defer actual decoding until we know the type
-
- for dec.More() {
- var err error
-
- key, err := requireObjectKey(dec)
- if err != nil {
- return cty.NilVal, path.NewErrorf("failed to read dynamic type descriptor key: %s", err)
- }
-
- rawVal, err := readRawValue(dec)
- if err != nil {
- return cty.NilVal, path.NewErrorf("failed to read dynamic type descriptor value: %s", err)
- }
-
- switch key {
- case "type":
- err := json.Unmarshal(rawVal, &t)
- if err != nil {
- return cty.NilVal, path.NewErrorf("failed to decode type for dynamic value: %s", err)
- }
- case "value":
- valBody = rawVal
- default:
- return cty.NilVal, path.NewErrorf("invalid key %q in dynamically-typed value", key)
- }
-
- }
-
- if err := requireDelim(dec, '}'); err != nil {
- return cty.NilVal, path.NewError(err)
- }
-
- if t == cty.NilType {
- return cty.NilVal, path.NewErrorf("missing type in dynamically-typed value")
- }
- if valBody == nil {
- return cty.NilVal, path.NewErrorf("missing value in dynamically-typed value")
- }
-
- val, err := Unmarshal([]byte(valBody), t)
- if err != nil {
- return cty.NilVal, path.NewError(err)
- }
- return val, nil
-}
-
-func requireDelim(dec *json.Decoder, d rune) error {
- tok, err := dec.Token()
- if err != nil {
- return err
- }
-
- if tok != json.Delim(d) {
- return fmt.Errorf("missing expected %c", d)
- }
-
- return nil
-}
-
-func requireObjectKey(dec *json.Decoder) (string, error) {
- tok, err := dec.Token()
- if err != nil {
- return "", err
- }
- if s, ok := tok.(string); ok {
- return s, nil
- }
- return "", fmt.Errorf("missing expected object key")
-}
-
-func readRawValue(dec *json.Decoder) ([]byte, error) {
- var rawVal json.RawMessage
- err := dec.Decode(&rawVal)
- if err != nil {
- return nil, err
- }
- return []byte(rawVal), nil
-}
-
-func bufDecoder(buf []byte) *json.Decoder {
- r := bytes.NewReader(buf)
- dec := json.NewDecoder(r)
- dec.UseNumber()
- return dec
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/json/value.go b/vendor/github.com/hashicorp/go-cty/cty/json/value.go
deleted file mode 100644
index 50748f70..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/json/value.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package json
-
-import (
- "bytes"
-
- "github.com/hashicorp/go-cty/cty"
- "github.com/hashicorp/go-cty/cty/convert"
-)
-
-// Marshal produces a JSON representation of the given value that can later
-// be decoded into a value of the given type.
-//
-// A type is specified separately to allow for the given type to include
-// cty.DynamicPseudoType to represent situations where any type is permitted
-// and so type information must be included to allow recovery of the stored
-// structure when decoding.
-//
-// The given type will also be used to attempt automatic conversions of any
-// non-conformant types in the given value, although this will not always
-// be possible. If the value cannot be made to be conformant then an error is
-// returned, which may be a cty.PathError.
-//
-// Capsule-typed values can be marshalled, but with some caveats. Since
-// capsule values are compared by pointer equality, it is impossible to recover
-// a value that will compare equal to the original value. Additionally,
-// it's not possible to JSON-serialize the capsule type itself, so it's not
-// valid to use capsule types within parts of the value that are conformed to
-// cty.DynamicPseudoType. Otherwise, a capsule value can be used as long as
-// the encapsulated type itself is serializable with the Marshal function
-// in encoding/json.
-func Marshal(val cty.Value, t cty.Type) ([]byte, error) {
- errs := val.Type().TestConformance(t)
- if errs != nil {
- // Attempt a conversion
- var err error
- val, err = convert.Convert(val, t)
- if err != nil {
- return nil, err
- }
- }
-
- // From this point onward, val can be assumed to be conforming to t.
-
- buf := &bytes.Buffer{}
- var path cty.Path
- err := marshal(val, t, path, buf)
-
- if err != nil {
- return nil, err
- }
-
- return buf.Bytes(), nil
-}
-
-// Unmarshal decodes a JSON representation of the given value into a cty Value
-// conforming to the given type.
-//
-// While decoding, type conversions will be done where possible to make
-// the result conformant even if the types given in JSON are not exactly
-// correct. If conversion isn't possible then an error is returned, which
-// may be a cty.PathError.
-func Unmarshal(buf []byte, t cty.Type) (cty.Value, error) {
- var path cty.Path
- return unmarshal(buf, t, path)
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/list_type.go b/vendor/github.com/hashicorp/go-cty/cty/list_type.go
deleted file mode 100644
index 2ef02a12..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/list_type.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package cty
-
-import (
- "fmt"
-)
-
-// TypeList instances represent specific list types. Each distinct ElementType
-// creates a distinct, non-equal list type.
-type typeList struct {
- typeImplSigil
- ElementTypeT Type
-}
-
-// List creates a map type with the given element Type.
-//
-// List types are CollectionType implementations.
-func List(elem Type) Type {
- return Type{
- typeList{
- ElementTypeT: elem,
- },
- }
-}
-
-// Equals returns true if the other Type is a list whose element type is
-// equal to that of the receiver.
-func (t typeList) Equals(other Type) bool {
- ot, isList := other.typeImpl.(typeList)
- if !isList {
- return false
- }
-
- return t.ElementTypeT.Equals(ot.ElementTypeT)
-}
-
-func (t typeList) FriendlyName(mode friendlyTypeNameMode) string {
- elemName := t.ElementTypeT.friendlyNameMode(mode)
- if mode == friendlyTypeConstraintName {
- if t.ElementTypeT == DynamicPseudoType {
- elemName = "any single type"
- }
- }
- return "list of " + elemName
-}
-
-func (t typeList) ElementType() Type {
- return t.ElementTypeT
-}
-
-func (t typeList) GoString() string {
- return fmt.Sprintf("cty.List(%#v)", t.ElementTypeT)
-}
-
-// IsListType returns true if the given type is a list type, regardless of its
-// element type.
-func (t Type) IsListType() bool {
- _, ok := t.typeImpl.(typeList)
- return ok
-}
-
-// ListElementType is a convenience method that checks if the given type is
-// a list type, returning a pointer to its element type if so and nil
-// otherwise. This is intended to allow convenient conditional branches,
-// like so:
-//
-// if et := t.ListElementType(); et != nil {
-// // Do something with *et
-// }
-func (t Type) ListElementType() *Type {
- if lt, ok := t.typeImpl.(typeList); ok {
- return <.ElementTypeT
- }
- return nil
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/map_type.go b/vendor/github.com/hashicorp/go-cty/cty/map_type.go
deleted file mode 100644
index 82d36c62..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/map_type.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package cty
-
-import (
- "fmt"
-)
-
-// TypeList instances represent specific list types. Each distinct ElementType
-// creates a distinct, non-equal list type.
-type typeMap struct {
- typeImplSigil
- ElementTypeT Type
-}
-
-// Map creates a map type with the given element Type.
-//
-// Map types are CollectionType implementations.
-func Map(elem Type) Type {
- return Type{
- typeMap{
- ElementTypeT: elem,
- },
- }
-}
-
-// Equals returns true if the other Type is a map whose element type is
-// equal to that of the receiver.
-func (t typeMap) Equals(other Type) bool {
- ot, isMap := other.typeImpl.(typeMap)
- if !isMap {
- return false
- }
-
- return t.ElementTypeT.Equals(ot.ElementTypeT)
-}
-
-func (t typeMap) FriendlyName(mode friendlyTypeNameMode) string {
- elemName := t.ElementTypeT.friendlyNameMode(mode)
- if mode == friendlyTypeConstraintName {
- if t.ElementTypeT == DynamicPseudoType {
- elemName = "any single type"
- }
- }
- return "map of " + elemName
-}
-
-func (t typeMap) ElementType() Type {
- return t.ElementTypeT
-}
-
-func (t typeMap) GoString() string {
- return fmt.Sprintf("cty.Map(%#v)", t.ElementTypeT)
-}
-
-// IsMapType returns true if the given type is a list type, regardless of its
-// element type.
-func (t Type) IsMapType() bool {
- _, ok := t.typeImpl.(typeMap)
- return ok
-}
-
-// MapElementType is a convenience method that checks if the given type is
-// a map type, returning a pointer to its element type if so and nil
-// otherwise. This is intended to allow convenient conditional branches,
-// like so:
-//
-// if et := t.MapElementType(); et != nil {
-// // Do something with *et
-// }
-func (t Type) MapElementType() *Type {
- if lt, ok := t.typeImpl.(typeMap); ok {
- return <.ElementTypeT
- }
- return nil
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/marks.go b/vendor/github.com/hashicorp/go-cty/cty/marks.go
deleted file mode 100644
index 3898e455..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/marks.go
+++ /dev/null
@@ -1,296 +0,0 @@
-package cty
-
-import (
- "fmt"
- "strings"
-)
-
-// marker is an internal wrapper type used to add special "marks" to values.
-//
-// A "mark" is an annotation that can be used to represent additional
-// characteristics of values that propagate through operation methods to
-// result values. However, a marked value cannot be used with integration
-// methods normally associated with its type, in order to ensure that
-// calling applications don't inadvertently drop marks as they round-trip
-// values out of cty and back in again.
-//
-// Marked values are created only explicitly by the calling application, so
-// an application that never marks a value does not need to worry about
-// encountering marked values.
-type marker struct {
- realV interface{}
- marks ValueMarks
-}
-
-// ValueMarks is a map, representing a set, of "mark" values associated with
-// a Value. See Value.Mark for more information on the usage of mark values.
-type ValueMarks map[interface{}]struct{}
-
-// NewValueMarks constructs a new ValueMarks set with the given mark values.
-func NewValueMarks(marks ...interface{}) ValueMarks {
- if len(marks) == 0 {
- return nil
- }
- ret := make(ValueMarks, len(marks))
- for _, v := range marks {
- ret[v] = struct{}{}
- }
- return ret
-}
-
-// Equal returns true if the receiver and the given ValueMarks both contain
-// the same marks.
-func (m ValueMarks) Equal(o ValueMarks) bool {
- if len(m) != len(o) {
- return false
- }
- for v := range m {
- if _, ok := o[v]; !ok {
- return false
- }
- }
- return true
-}
-
-func (m ValueMarks) GoString() string {
- var s strings.Builder
- s.WriteString("cty.NewValueMarks(")
- i := 0
- for mv := range m {
- if i != 0 {
- s.WriteString(", ")
- }
- s.WriteString(fmt.Sprintf("%#v", mv))
- i++
- }
- s.WriteString(")")
- return s.String()
-}
-
-// IsMarked returns true if and only if the receiving value carries at least
-// one mark. A marked value cannot be used directly with integration methods
-// without explicitly unmarking it (and retrieving the markings) first.
-func (val Value) IsMarked() bool {
- _, ok := val.v.(marker)
- return ok
-}
-
-// HasMark returns true if and only if the receiving value has the given mark.
-func (val Value) HasMark(mark interface{}) bool {
- if mr, ok := val.v.(marker); ok {
- _, ok := mr.marks[mark]
- return ok
- }
- return false
-}
-
-// ContainsMarked returns true if the receiving value or any value within it
-// is marked.
-//
-// This operation is relatively expensive. If you only need a shallow result,
-// use IsMarked instead.
-func (val Value) ContainsMarked() bool {
- ret := false
- Walk(val, func(_ Path, v Value) (bool, error) {
- if v.IsMarked() {
- ret = true
- return false, nil
- }
- return true, nil
- })
- return ret
-}
-
-func (val Value) assertUnmarked() {
- if val.IsMarked() {
- panic("value is marked, so must be unmarked first")
- }
-}
-
-// Marks returns a map (representing a set) of all of the mark values
-// associated with the receiving value, without changing the marks. Returns nil
-// if the value is not marked at all.
-func (val Value) Marks() ValueMarks {
- if mr, ok := val.v.(marker); ok {
- // copy so that the caller can't mutate our internals
- ret := make(ValueMarks, len(mr.marks))
- for k, v := range mr.marks {
- ret[k] = v
- }
- return ret
- }
- return nil
-}
-
-// HasSameMarks returns true if an only if the receiver and the given other
-// value have identical marks.
-func (val Value) HasSameMarks(other Value) bool {
- vm, vmOK := val.v.(marker)
- om, omOK := other.v.(marker)
- if vmOK != omOK {
- return false
- }
- if vmOK {
- return vm.marks.Equal(om.marks)
- }
- return true
-}
-
-// Mark returns a new value that as the same type and underlying value as
-// the receiver but that also carries the given value as a "mark".
-//
-// Marks are used to carry additional application-specific characteristics
-// associated with values. A marked value can be used with operation methods,
-// in which case the marks are propagated to the operation results. A marked
-// value _cannot_ be used with integration methods, so callers of those
-// must derive an unmarked value using Unmark (and thus explicitly handle
-// the markings) before calling the integration methods.
-//
-// The mark value can be any value that would be valid to use as a map key.
-// The mark value should be of a named type in order to use the type itself
-// as a namespace for markings. That type can be unexported if desired, in
-// order to ensure that the mark can only be handled through the defining
-// package's own functions.
-//
-// An application that never calls this method does not need to worry about
-// handling marked values.
-func (val Value) Mark(mark interface{}) Value {
- var newMarker marker
- newMarker.realV = val.v
- if mr, ok := val.v.(marker); ok {
- // It's already a marker, so we'll retain existing marks.
- newMarker.marks = make(ValueMarks, len(mr.marks)+1)
- for k, v := range mr.marks {
- newMarker.marks[k] = v
- }
- } else {
- // It's not a marker yet, so we're creating the first mark.
- newMarker.marks = make(ValueMarks, 1)
- }
- newMarker.marks[mark] = struct{}{}
- return Value{
- ty: val.ty,
- v: newMarker,
- }
-}
-
-// Unmark separates the marks of the receiving value from the value itself,
-// removing a new unmarked value and a map (representing a set) of the marks.
-//
-// If the receiver isn't marked, Unmark returns it verbatim along with a nil
-// map of marks.
-func (val Value) Unmark() (Value, ValueMarks) {
- if !val.IsMarked() {
- return val, nil
- }
- mr := val.v.(marker)
- marks := val.Marks() // copy so that the caller can't mutate our internals
- return Value{
- ty: val.ty,
- v: mr.realV,
- }, marks
-}
-
-// UnmarkDeep is similar to Unmark, but it works with an entire nested structure
-// rather than just the given value directly.
-//
-// The result is guaranteed to contain no nested values that are marked, and
-// the returned marks set includes the superset of all of the marks encountered
-// during the operation.
-func (val Value) UnmarkDeep() (Value, ValueMarks) {
- marks := make(ValueMarks)
- ret, _ := Transform(val, func(_ Path, v Value) (Value, error) {
- unmarkedV, valueMarks := v.Unmark()
- for m, s := range valueMarks {
- marks[m] = s
- }
- return unmarkedV, nil
- })
- return ret, marks
-}
-
-func (val Value) unmarkForce() Value {
- unw, _ := val.Unmark()
- return unw
-}
-
-// WithMarks returns a new value that has the same type and underlying value
-// as the receiver and also has the marks from the given maps (representing
-// sets).
-func (val Value) WithMarks(marks ...ValueMarks) Value {
- if len(marks) == 0 {
- return val
- }
- ownMarks := val.Marks()
- markCount := len(ownMarks)
- for _, s := range marks {
- markCount += len(s)
- }
- if markCount == 0 {
- return val
- }
- newMarks := make(ValueMarks, markCount)
- for m := range ownMarks {
- newMarks[m] = struct{}{}
- }
- for _, s := range marks {
- for m := range s {
- newMarks[m] = struct{}{}
- }
- }
- v := val.v
- if mr, ok := v.(marker); ok {
- v = mr.realV
- }
- return Value{
- ty: val.ty,
- v: marker{
- realV: v,
- marks: newMarks,
- },
- }
-}
-
-// WithSameMarks returns a new value that has the same type and underlying
-// value as the receiver and also has the marks from the given source values.
-//
-// Use this if you are implementing your own higher-level operations against
-// cty using the integration methods, to re-introduce the marks from the
-// source values of the operation.
-func (val Value) WithSameMarks(srcs ...Value) Value {
- if len(srcs) == 0 {
- return val
- }
- ownMarks := val.Marks()
- markCount := len(ownMarks)
- for _, sv := range srcs {
- if mr, ok := sv.v.(marker); ok {
- markCount += len(mr.marks)
- }
- }
- if markCount == 0 {
- return val
- }
- newMarks := make(ValueMarks, markCount)
- for m := range ownMarks {
- newMarks[m] = struct{}{}
- }
- for _, sv := range srcs {
- if mr, ok := sv.v.(marker); ok {
- for m := range mr.marks {
- newMarks[m] = struct{}{}
- }
- }
- }
- v := val.v
- if mr, ok := v.(marker); ok {
- v = mr.realV
- }
- return Value{
- ty: val.ty,
- v: marker{
- realV: v,
- marks: newMarks,
- },
- }
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/msgpack/doc.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/doc.go
deleted file mode 100644
index 1eb99f28..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/msgpack/doc.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Package msgpack provides functions for serializing cty values in the
-// msgpack encoding, and decoding them again.
-//
-// If the same type information is provided both at encoding and decoding time
-// then values can be round-tripped without loss, except for capsule types
-// which are not currently supported.
-//
-// If any unknown values are passed to Marshal then they will be represented
-// using a msgpack extension with type code zero, which is understood by
-// the Unmarshal function within this package but will not be understood by
-// a generic (non-cty-aware) msgpack decoder. Ensure that no unknown values
-// are used if interoperability with other msgpack implementations is
-// required.
-package msgpack
diff --git a/vendor/github.com/hashicorp/go-cty/cty/msgpack/dynamic.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/dynamic.go
deleted file mode 100644
index ce59d9ff..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/msgpack/dynamic.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package msgpack
-
-import (
- "bytes"
-
- "github.com/hashicorp/go-cty/cty"
- "github.com/vmihailenco/msgpack"
-)
-
-type dynamicVal struct {
- Value cty.Value
- Path cty.Path
-}
-
-func (dv *dynamicVal) MarshalMsgpack() ([]byte, error) {
- // Rather than defining a msgpack-specific serialization of types,
- // instead we use the existing JSON serialization.
- typeJSON, err := dv.Value.Type().MarshalJSON()
- if err != nil {
- return nil, dv.Path.NewErrorf("failed to serialize type: %s", err)
- }
- var buf bytes.Buffer
- enc := msgpack.NewEncoder(&buf)
- enc.EncodeArrayLen(2)
- enc.EncodeBytes(typeJSON)
- err = marshal(dv.Value, dv.Value.Type(), dv.Path, enc)
- if err != nil {
- return nil, err
- }
- return buf.Bytes(), nil
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/msgpack/infinity.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/infinity.go
deleted file mode 100644
index 6db0815e..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/msgpack/infinity.go
+++ /dev/null
@@ -1,8 +0,0 @@
-package msgpack
-
-import (
- "math"
-)
-
-var negativeInfinity = math.Inf(-1)
-var positiveInfinity = math.Inf(1)
diff --git a/vendor/github.com/hashicorp/go-cty/cty/msgpack/marshal.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/marshal.go
deleted file mode 100644
index 8a43c16a..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/msgpack/marshal.go
+++ /dev/null
@@ -1,211 +0,0 @@
-package msgpack
-
-import (
- "bytes"
- "math/big"
- "sort"
-
- "github.com/hashicorp/go-cty/cty"
- "github.com/hashicorp/go-cty/cty/convert"
- "github.com/vmihailenco/msgpack"
-)
-
-// Marshal produces a msgpack serialization of the given value that
-// can be decoded into the given type later using Unmarshal.
-//
-// The given value must conform to the given type, or an error will
-// be returned.
-func Marshal(val cty.Value, ty cty.Type) ([]byte, error) {
- errs := val.Type().TestConformance(ty)
- if errs != nil {
- // Attempt a conversion
- var err error
- val, err = convert.Convert(val, ty)
- if err != nil {
- return nil, err
- }
- }
-
- // From this point onward, val can be assumed to be conforming to t.
-
- var path cty.Path
- var buf bytes.Buffer
- enc := msgpack.NewEncoder(&buf)
-
- err := marshal(val, ty, path, enc)
- if err != nil {
- return nil, err
- }
-
- return buf.Bytes(), nil
-}
-
-func marshal(val cty.Value, ty cty.Type, path cty.Path, enc *msgpack.Encoder) error {
- if val.IsMarked() {
- return path.NewErrorf("value has marks, so it cannot be seralized")
- }
-
- // If we're going to decode as DynamicPseudoType then we need to save
- // dynamic type information to recover the real type.
- if ty == cty.DynamicPseudoType && val.Type() != cty.DynamicPseudoType {
- return marshalDynamic(val, path, enc)
- }
-
- if !val.IsKnown() {
- err := enc.Encode(unknownVal)
- if err != nil {
- return path.NewError(err)
- }
- return nil
- }
- if val.IsNull() {
- err := enc.EncodeNil()
- if err != nil {
- return path.NewError(err)
- }
- return nil
- }
-
- // The caller should've guaranteed that the given val is conformant with
- // the given type ty, so we'll proceed under that assumption here.
- switch {
- case ty.IsPrimitiveType():
- switch ty {
- case cty.String:
- err := enc.EncodeString(val.AsString())
- if err != nil {
- return path.NewError(err)
- }
- return nil
- case cty.Number:
- var err error
- switch {
- case val.RawEquals(cty.PositiveInfinity):
- err = enc.EncodeFloat64(positiveInfinity)
- case val.RawEquals(cty.NegativeInfinity):
- err = enc.EncodeFloat64(negativeInfinity)
- default:
- bf := val.AsBigFloat()
- if iv, acc := bf.Int64(); acc == big.Exact {
- err = enc.EncodeInt(iv)
- } else if fv, acc := bf.Float64(); acc == big.Exact {
- err = enc.EncodeFloat64(fv)
- } else {
- err = enc.EncodeString(bf.Text('f', -1))
- }
- }
- if err != nil {
- return path.NewError(err)
- }
- return nil
- case cty.Bool:
- err := enc.EncodeBool(val.True())
- if err != nil {
- return path.NewError(err)
- }
- return nil
- default:
- panic("unsupported primitive type")
- }
- case ty.IsListType(), ty.IsSetType():
- enc.EncodeArrayLen(val.LengthInt())
- ety := ty.ElementType()
- it := val.ElementIterator()
- path := append(path, nil) // local override of 'path' with extra element
- for it.Next() {
- ek, ev := it.Element()
- path[len(path)-1] = cty.IndexStep{
- Key: ek,
- }
- err := marshal(ev, ety, path, enc)
- if err != nil {
- return err
- }
- }
- return nil
- case ty.IsMapType():
- enc.EncodeMapLen(val.LengthInt())
- ety := ty.ElementType()
- it := val.ElementIterator()
- path := append(path, nil) // local override of 'path' with extra element
- for it.Next() {
- ek, ev := it.Element()
- path[len(path)-1] = cty.IndexStep{
- Key: ek,
- }
- var err error
- err = marshal(ek, ek.Type(), path, enc)
- if err != nil {
- return err
- }
- err = marshal(ev, ety, path, enc)
- if err != nil {
- return err
- }
- }
- return nil
- case ty.IsTupleType():
- etys := ty.TupleElementTypes()
- it := val.ElementIterator()
- path := append(path, nil) // local override of 'path' with extra element
- i := 0
- enc.EncodeArrayLen(len(etys))
- for it.Next() {
- ety := etys[i]
- ek, ev := it.Element()
- path[len(path)-1] = cty.IndexStep{
- Key: ek,
- }
- err := marshal(ev, ety, path, enc)
- if err != nil {
- return err
- }
- i++
- }
- return nil
- case ty.IsObjectType():
- atys := ty.AttributeTypes()
- path := append(path, nil) // local override of 'path' with extra element
-
- names := make([]string, 0, len(atys))
- for k := range atys {
- names = append(names, k)
- }
- sort.Strings(names)
-
- enc.EncodeMapLen(len(names))
-
- for _, k := range names {
- aty := atys[k]
- av := val.GetAttr(k)
- path[len(path)-1] = cty.GetAttrStep{
- Name: k,
- }
- var err error
- err = marshal(cty.StringVal(k), cty.String, path, enc)
- if err != nil {
- return err
- }
- err = marshal(av, aty, path, enc)
- if err != nil {
- return err
- }
- }
- return nil
- case ty.IsCapsuleType():
- return path.NewErrorf("capsule types not supported for msgpack encoding")
- default:
- // should never happen
- return path.NewErrorf("cannot msgpack-serialize %s", ty.FriendlyName())
- }
-}
-
-// marshalDynamic adds an extra wrapping object containing dynamic type
-// information for the given value.
-func marshalDynamic(val cty.Value, path cty.Path, enc *msgpack.Encoder) error {
- dv := dynamicVal{
- Value: val,
- Path: path,
- }
- return enc.Encode(&dv)
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/msgpack/type_implied.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/type_implied.go
deleted file mode 100644
index 86664bac..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/msgpack/type_implied.go
+++ /dev/null
@@ -1,167 +0,0 @@
-package msgpack
-
-import (
- "bytes"
- "fmt"
- "io"
-
- "github.com/hashicorp/go-cty/cty"
- "github.com/vmihailenco/msgpack"
- msgpackcodes "github.com/vmihailenco/msgpack/codes"
-)
-
-// ImpliedType returns the cty Type implied by the structure of the given
-// msgpack-compliant buffer. This function implements the default type mapping
-// behavior used when decoding arbitrary msgpack without explicit cty Type
-// information.
-//
-// The rules are as follows:
-//
-// msgpack strings, numbers and bools map to their equivalent primitive type in
-// cty.
-//
-// msgpack maps become cty object types, with the attributes defined by the
-// map keys and the types of their values.
-//
-// msgpack arrays become cty tuple types, with the elements defined by the
-// types of the array members.
-//
-// Any nulls are typed as DynamicPseudoType, so callers of this function
-// must be prepared to deal with this. Callers that do not wish to deal with
-// dynamic typing should not use this function and should instead describe
-// their required types explicitly with a cty.Type instance when decoding.
-//
-// Any unknown values are similarly typed as DynamicPseudoType, because these
-// do not carry type information on the wire.
-//
-// Any parse errors will be returned as an error, and the type will be the
-// invalid value cty.NilType.
-func ImpliedType(buf []byte) (cty.Type, error) {
- r := bytes.NewReader(buf)
- dec := msgpack.NewDecoder(r)
-
- ty, err := impliedType(dec)
- if err != nil {
- return cty.NilType, err
- }
-
- // We must now be at the end of the buffer
- err = dec.Skip()
- if err != io.EOF {
- return ty, fmt.Errorf("extra bytes after msgpack value")
- }
-
- return ty, nil
-}
-
-func impliedType(dec *msgpack.Decoder) (cty.Type, error) {
- // If this function returns with a nil error then it must have already
- // consumed the next value from the decoder, since when called recursively
- // the caller will be expecting to find a following value here.
-
- code, err := dec.PeekCode()
- if err != nil {
- return cty.NilType, err
- }
-
- switch {
-
- case code == msgpackcodes.Nil || msgpackcodes.IsExt(code):
- err := dec.Skip()
- return cty.DynamicPseudoType, err
-
- case code == msgpackcodes.True || code == msgpackcodes.False:
- _, err := dec.DecodeBool()
- return cty.Bool, err
-
- case msgpackcodes.IsFixedNum(code):
- _, err := dec.DecodeInt64()
- return cty.Number, err
-
- case code == msgpackcodes.Int8 || code == msgpackcodes.Int16 || code == msgpackcodes.Int32 || code == msgpackcodes.Int64:
- _, err := dec.DecodeInt64()
- return cty.Number, err
-
- case code == msgpackcodes.Uint8 || code == msgpackcodes.Uint16 || code == msgpackcodes.Uint32 || code == msgpackcodes.Uint64:
- _, err := dec.DecodeUint64()
- return cty.Number, err
-
- case code == msgpackcodes.Float || code == msgpackcodes.Double:
- _, err := dec.DecodeFloat64()
- return cty.Number, err
-
- case msgpackcodes.IsString(code):
- _, err := dec.DecodeString()
- return cty.String, err
-
- case msgpackcodes.IsFixedMap(code) || code == msgpackcodes.Map16 || code == msgpackcodes.Map32:
- return impliedObjectType(dec)
-
- case msgpackcodes.IsFixedArray(code) || code == msgpackcodes.Array16 || code == msgpackcodes.Array32:
- return impliedTupleType(dec)
-
- default:
- return cty.NilType, fmt.Errorf("unsupported msgpack code %#v", code)
- }
-}
-
-func impliedObjectType(dec *msgpack.Decoder) (cty.Type, error) {
- // If we get in here then we've already peeked the next code and know
- // it's some sort of map.
- l, err := dec.DecodeMapLen()
- if err != nil {
- return cty.DynamicPseudoType, nil
- }
-
- var atys map[string]cty.Type
-
- for i := 0; i < l; i++ {
- // Read the map key first. We require maps to be strings, but msgpack
- // doesn't so we're prepared to error here if not.
- k, err := dec.DecodeString()
- if err != nil {
- return cty.DynamicPseudoType, err
- }
-
- aty, err := impliedType(dec)
- if err != nil {
- return cty.DynamicPseudoType, err
- }
-
- if atys == nil {
- atys = make(map[string]cty.Type)
- }
- atys[k] = aty
- }
-
- if len(atys) == 0 {
- return cty.EmptyObject, nil
- }
-
- return cty.Object(atys), nil
-}
-
-func impliedTupleType(dec *msgpack.Decoder) (cty.Type, error) {
- // If we get in here then we've already peeked the next code and know
- // it's some sort of array.
- l, err := dec.DecodeArrayLen()
- if err != nil {
- return cty.DynamicPseudoType, nil
- }
-
- if l == 0 {
- return cty.EmptyTuple, nil
- }
-
- etys := make([]cty.Type, l)
-
- for i := 0; i < l; i++ {
- ety, err := impliedType(dec)
- if err != nil {
- return cty.DynamicPseudoType, err
- }
- etys[i] = ety
- }
-
- return cty.Tuple(etys), nil
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/msgpack/unknown.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/unknown.go
deleted file mode 100644
index 6507bc4b..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/msgpack/unknown.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package msgpack
-
-type unknownType struct{}
-
-var unknownVal = unknownType{}
-
-// unknownValBytes is the raw bytes of the msgpack fixext1 value we
-// write to represent an unknown value. It's an extension value of
-// type zero whose value is irrelevant. Since it's irrelevant, we
-// set it to a single byte whose value is also zero, since that's
-// the most compact possible representation.
-var unknownValBytes = []byte{0xd4, 0, 0}
-
-func (uv unknownType) MarshalMsgpack() ([]byte, error) {
- return unknownValBytes, nil
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/msgpack/unmarshal.go b/vendor/github.com/hashicorp/go-cty/cty/msgpack/unmarshal.go
deleted file mode 100644
index 67f4c9a4..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/msgpack/unmarshal.go
+++ /dev/null
@@ -1,334 +0,0 @@
-package msgpack
-
-import (
- "bytes"
-
- "github.com/hashicorp/go-cty/cty"
- "github.com/vmihailenco/msgpack"
- msgpackCodes "github.com/vmihailenco/msgpack/codes"
-)
-
-// Unmarshal interprets the given bytes as a msgpack-encoded cty Value of
-// the given type, returning the result.
-//
-// If an error is returned, the error is written with a hypothetical
-// end-user that wrote the msgpack file as its audience, using cty type
-// system concepts rather than Go type system concepts.
-func Unmarshal(b []byte, ty cty.Type) (cty.Value, error) {
- r := bytes.NewReader(b)
- dec := msgpack.NewDecoder(r)
-
- var path cty.Path
- return unmarshal(dec, ty, path)
-}
-
-func unmarshal(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) {
- peek, err := dec.PeekCode()
- if err != nil {
- return cty.DynamicVal, path.NewError(err)
- }
- if msgpackCodes.IsExt(peek) {
- // We just assume _all_ extensions are unknown values,
- // since we don't have any other extensions.
- dec.Skip() // skip what we've peeked
- return cty.UnknownVal(ty), nil
- }
- if ty == cty.DynamicPseudoType {
- return unmarshalDynamic(dec, path)
- }
- if peek == msgpackCodes.Nil {
- dec.Skip() // skip what we've peeked
- return cty.NullVal(ty), nil
- }
-
- switch {
- case ty.IsPrimitiveType():
- val, err := unmarshalPrimitive(dec, ty, path)
- if err != nil {
- return cty.NilVal, err
- }
- return val, nil
- case ty.IsListType():
- return unmarshalList(dec, ty.ElementType(), path)
- case ty.IsSetType():
- return unmarshalSet(dec, ty.ElementType(), path)
- case ty.IsMapType():
- return unmarshalMap(dec, ty.ElementType(), path)
- case ty.IsTupleType():
- return unmarshalTuple(dec, ty.TupleElementTypes(), path)
- case ty.IsObjectType():
- return unmarshalObject(dec, ty.AttributeTypes(), path)
- default:
- return cty.NilVal, path.NewErrorf("unsupported type %s", ty.FriendlyName())
- }
-}
-
-func unmarshalPrimitive(dec *msgpack.Decoder, ty cty.Type, path cty.Path) (cty.Value, error) {
- switch ty {
- case cty.Bool:
- rv, err := dec.DecodeBool()
- if err != nil {
- return cty.DynamicVal, path.NewErrorf("bool is required")
- }
- return cty.BoolVal(rv), nil
- case cty.Number:
- // Marshal will try int and float first, if the value can be
- // losslessly represented in these encodings, and then fall
- // back on a string if the number is too large or too precise.
- peek, err := dec.PeekCode()
- if err != nil {
- return cty.DynamicVal, path.NewErrorf("number is required")
- }
-
- if msgpackCodes.IsFixedNum(peek) {
- rv, err := dec.DecodeInt64()
- if err != nil {
- return cty.DynamicVal, path.NewErrorf("number is required")
- }
- return cty.NumberIntVal(rv), nil
- }
-
- switch peek {
- case msgpackCodes.Int8, msgpackCodes.Int16, msgpackCodes.Int32, msgpackCodes.Int64:
- rv, err := dec.DecodeInt64()
- if err != nil {
- return cty.DynamicVal, path.NewErrorf("number is required")
- }
- return cty.NumberIntVal(rv), nil
- case msgpackCodes.Uint8, msgpackCodes.Uint16, msgpackCodes.Uint32, msgpackCodes.Uint64:
- rv, err := dec.DecodeUint64()
- if err != nil {
- return cty.DynamicVal, path.NewErrorf("number is required")
- }
- return cty.NumberUIntVal(rv), nil
- case msgpackCodes.Float, msgpackCodes.Double:
- rv, err := dec.DecodeFloat64()
- if err != nil {
- return cty.DynamicVal, path.NewErrorf("number is required")
- }
- return cty.NumberFloatVal(rv), nil
- default:
- rv, err := dec.DecodeString()
- if err != nil {
- return cty.DynamicVal, path.NewErrorf("number is required")
- }
- v, err := cty.ParseNumberVal(rv)
- if err != nil {
- return cty.DynamicVal, path.NewErrorf("number is required")
- }
- return v, nil
- }
- case cty.String:
- rv, err := dec.DecodeString()
- if err != nil {
- return cty.DynamicVal, path.NewErrorf("string is required")
- }
- return cty.StringVal(rv), nil
- default:
- // should never happen
- panic("unsupported primitive type")
- }
-}
-
-func unmarshalList(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) {
- length, err := dec.DecodeArrayLen()
- if err != nil {
- return cty.DynamicVal, path.NewErrorf("a list is required")
- }
-
- switch {
- case length < 0:
- return cty.NullVal(cty.List(ety)), nil
- case length == 0:
- return cty.ListValEmpty(ety), nil
- }
-
- vals := make([]cty.Value, 0, length)
- path = append(path, nil)
- for i := 0; i < length; i++ {
- path[len(path)-1] = cty.IndexStep{
- Key: cty.NumberIntVal(int64(i)),
- }
-
- val, err := unmarshal(dec, ety, path)
- if err != nil {
- return cty.DynamicVal, err
- }
-
- vals = append(vals, val)
- }
-
- return cty.ListVal(vals), nil
-}
-
-func unmarshalSet(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) {
- length, err := dec.DecodeArrayLen()
- if err != nil {
- return cty.DynamicVal, path.NewErrorf("a set is required")
- }
-
- switch {
- case length < 0:
- return cty.NullVal(cty.Set(ety)), nil
- case length == 0:
- return cty.SetValEmpty(ety), nil
- }
-
- vals := make([]cty.Value, 0, length)
- path = append(path, nil)
- for i := 0; i < length; i++ {
- path[len(path)-1] = cty.IndexStep{
- Key: cty.NumberIntVal(int64(i)),
- }
-
- val, err := unmarshal(dec, ety, path)
- if err != nil {
- return cty.DynamicVal, err
- }
-
- vals = append(vals, val)
- }
-
- return cty.SetVal(vals), nil
-}
-
-func unmarshalMap(dec *msgpack.Decoder, ety cty.Type, path cty.Path) (cty.Value, error) {
- length, err := dec.DecodeMapLen()
- if err != nil {
- return cty.DynamicVal, path.NewErrorf("a map is required")
- }
-
- switch {
- case length < 0:
- return cty.NullVal(cty.Map(ety)), nil
- case length == 0:
- return cty.MapValEmpty(ety), nil
- }
-
- vals := make(map[string]cty.Value, length)
- path = append(path, nil)
- for i := 0; i < length; i++ {
- key, err := dec.DecodeString()
- if err != nil {
- path[:len(path)-1].NewErrorf("non-string key in map")
- }
-
- path[len(path)-1] = cty.IndexStep{
- Key: cty.StringVal(key),
- }
-
- val, err := unmarshal(dec, ety, path)
- if err != nil {
- return cty.DynamicVal, err
- }
-
- vals[key] = val
- }
-
- return cty.MapVal(vals), nil
-}
-
-func unmarshalTuple(dec *msgpack.Decoder, etys []cty.Type, path cty.Path) (cty.Value, error) {
- length, err := dec.DecodeArrayLen()
- if err != nil {
- return cty.DynamicVal, path.NewErrorf("a tuple is required")
- }
-
- switch {
- case length < 0:
- return cty.NullVal(cty.Tuple(etys)), nil
- case length == 0:
- return cty.TupleVal(nil), nil
- case length != len(etys):
- return cty.DynamicVal, path.NewErrorf("a tuple of length %d is required", len(etys))
- }
-
- vals := make([]cty.Value, 0, length)
- path = append(path, nil)
- for i := 0; i < length; i++ {
- path[len(path)-1] = cty.IndexStep{
- Key: cty.NumberIntVal(int64(i)),
- }
- ety := etys[i]
-
- val, err := unmarshal(dec, ety, path)
- if err != nil {
- return cty.DynamicVal, err
- }
-
- vals = append(vals, val)
- }
-
- return cty.TupleVal(vals), nil
-}
-
-func unmarshalObject(dec *msgpack.Decoder, atys map[string]cty.Type, path cty.Path) (cty.Value, error) {
- length, err := dec.DecodeMapLen()
- if err != nil {
- return cty.DynamicVal, path.NewErrorf("an object is required")
- }
-
- switch {
- case length < 0:
- return cty.NullVal(cty.Object(atys)), nil
- case length == 0:
- return cty.ObjectVal(nil), nil
- case length != len(atys):
- return cty.DynamicVal, path.NewErrorf("an object with %d attributes is required (%d given)",
- len(atys), length)
- }
-
- vals := make(map[string]cty.Value, length)
- path = append(path, nil)
- for i := 0; i < length; i++ {
- key, err := dec.DecodeString()
- if err != nil {
- return cty.DynamicVal, path[:len(path)-1].NewErrorf("all keys must be strings")
- }
-
- path[len(path)-1] = cty.IndexStep{
- Key: cty.StringVal(key),
- }
- aty, exists := atys[key]
- if !exists {
- return cty.DynamicVal, path.NewErrorf("unsupported attribute")
- }
-
- val, err := unmarshal(dec, aty, path)
- if err != nil {
- return cty.DynamicVal, err
- }
-
- vals[key] = val
- }
-
- return cty.ObjectVal(vals), nil
-}
-
-func unmarshalDynamic(dec *msgpack.Decoder, path cty.Path) (cty.Value, error) {
- length, err := dec.DecodeArrayLen()
- if err != nil {
- return cty.DynamicVal, path.NewError(err)
- }
-
- switch {
- case length == -1:
- return cty.NullVal(cty.DynamicPseudoType), nil
- case length != 2:
- return cty.DynamicVal, path.NewErrorf(
- "dynamic value array must have exactly two elements",
- )
- }
-
- typeJSON, err := dec.DecodeBytes()
- if err != nil {
- return cty.DynamicVal, path.NewError(err)
- }
- var ty cty.Type
- err = (&ty).UnmarshalJSON(typeJSON)
- if err != nil {
- return cty.DynamicVal, path.NewError(err)
- }
-
- return unmarshal(dec, ty, path)
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/null.go b/vendor/github.com/hashicorp/go-cty/cty/null.go
deleted file mode 100644
index d58d0287..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/null.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package cty
-
-// NullVal returns a null value of the given type. A null can be created of any
-// type, but operations on such values will always panic. Calling applications
-// are encouraged to use nulls only sparingly, particularly when user-provided
-// expressions are to be evaluated, since the precence of nulls creates a
-// much higher chance of evaluation errors that can't be caught by a type
-// checker.
-func NullVal(t Type) Value {
- return Value{
- ty: t,
- v: nil,
- }
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/object_type.go b/vendor/github.com/hashicorp/go-cty/cty/object_type.go
deleted file mode 100644
index 187d3875..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/object_type.go
+++ /dev/null
@@ -1,135 +0,0 @@
-package cty
-
-import (
- "fmt"
-)
-
-type typeObject struct {
- typeImplSigil
- AttrTypes map[string]Type
-}
-
-// Object creates an object type with the given attribute types.
-//
-// After a map is passed to this function the caller must no longer access it,
-// since ownership is transferred to this library.
-func Object(attrTypes map[string]Type) Type {
- attrTypesNorm := make(map[string]Type, len(attrTypes))
- for k, v := range attrTypes {
- attrTypesNorm[NormalizeString(k)] = v
- }
-
- return Type{
- typeObject{
- AttrTypes: attrTypesNorm,
- },
- }
-}
-
-func (t typeObject) Equals(other Type) bool {
- if ot, ok := other.typeImpl.(typeObject); ok {
- if len(t.AttrTypes) != len(ot.AttrTypes) {
- // Fast path: if we don't have the same number of attributes
- // then we can't possibly be equal. This also avoids the need
- // to test attributes in both directions below, since we know
- // there can't be extras in "other".
- return false
- }
-
- for attr, ty := range t.AttrTypes {
- oty, ok := ot.AttrTypes[attr]
- if !ok {
- return false
- }
- if !oty.Equals(ty) {
- return false
- }
- }
-
- return true
- }
- return false
-}
-
-func (t typeObject) FriendlyName(mode friendlyTypeNameMode) string {
- // There isn't really a friendly way to write an object type due to its
- // complexity, so we'll just do something English-ish. Callers will
- // probably want to make some extra effort to avoid ever printing out
- // an object type FriendlyName in its entirety. For example, could
- // produce an error message by diffing two object types and saying
- // something like "Expected attribute foo to be string, but got number".
- // TODO: Finish this
- return "object"
-}
-
-func (t typeObject) GoString() string {
- if len(t.AttrTypes) == 0 {
- return "cty.EmptyObject"
- }
- return fmt.Sprintf("cty.Object(%#v)", t.AttrTypes)
-}
-
-// EmptyObject is a shorthand for Object(map[string]Type{}), to more
-// easily talk about the empty object type.
-var EmptyObject Type
-
-// EmptyObjectVal is the only possible non-null, non-unknown value of type
-// EmptyObject.
-var EmptyObjectVal Value
-
-func init() {
- EmptyObject = Object(map[string]Type{})
- EmptyObjectVal = Value{
- ty: EmptyObject,
- v: map[string]interface{}{},
- }
-}
-
-// IsObjectType returns true if the given type is an object type, regardless
-// of its element type.
-func (t Type) IsObjectType() bool {
- _, ok := t.typeImpl.(typeObject)
- return ok
-}
-
-// HasAttribute returns true if the receiver has an attribute with the given
-// name, regardless of its type. Will panic if the reciever isn't an object
-// type; use IsObjectType to determine whether this operation will succeed.
-func (t Type) HasAttribute(name string) bool {
- name = NormalizeString(name)
- if ot, ok := t.typeImpl.(typeObject); ok {
- _, hasAttr := ot.AttrTypes[name]
- return hasAttr
- }
- panic("HasAttribute on non-object Type")
-}
-
-// AttributeType returns the type of the attribute with the given name. Will
-// panic if the receiver is not an object type (use IsObjectType to confirm)
-// or if the object type has no such attribute (use HasAttribute to confirm).
-func (t Type) AttributeType(name string) Type {
- name = NormalizeString(name)
- if ot, ok := t.typeImpl.(typeObject); ok {
- aty, hasAttr := ot.AttrTypes[name]
- if !hasAttr {
- panic("no such attribute")
- }
- return aty
- }
- panic("AttributeType on non-object Type")
-}
-
-// AttributeTypes returns a map from attribute names to their associated
-// types. Will panic if the receiver is not an object type (use IsObjectType
-// to confirm).
-//
-// The returned map is part of the internal state of the type, and is provided
-// for read access only. It is forbidden for any caller to modify the returned
-// map. For many purposes the attribute-related methods of Value are more
-// appropriate and more convenient to use.
-func (t Type) AttributeTypes() map[string]Type {
- if ot, ok := t.typeImpl.(typeObject); ok {
- return ot.AttrTypes
- }
- panic("AttributeTypes on non-object Type")
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/path.go b/vendor/github.com/hashicorp/go-cty/cty/path.go
deleted file mode 100644
index 636e68c6..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/path.go
+++ /dev/null
@@ -1,270 +0,0 @@
-package cty
-
-import (
- "errors"
- "fmt"
-)
-
-// A Path is a sequence of operations to locate a nested value within a
-// data structure.
-//
-// The empty Path represents the given item. Any PathSteps within represent
-// taking a single step down into a data structure.
-//
-// Path has some convenience methods for gradually constructing a path,
-// but callers can also feel free to just produce a slice of PathStep manually
-// and convert to this type, which may be more appropriate in environments
-// where memory pressure is a concern.
-//
-// Although a Path is technically mutable, by convention callers should not
-// mutate a path once it has been built and passed to some other subsystem.
-// Instead, use Copy and then mutate the copy before using it.
-type Path []PathStep
-
-// PathStep represents a single step down into a data structure, as part
-// of a Path. PathStep is a closed interface, meaning that the only
-// permitted implementations are those within this package.
-type PathStep interface {
- pathStepSigil() pathStepImpl
- Apply(Value) (Value, error)
-}
-
-// embed pathImpl into a struct to declare it a PathStep implementation
-type pathStepImpl struct{}
-
-func (p pathStepImpl) pathStepSigil() pathStepImpl {
- return p
-}
-
-// Index returns a new Path that is the reciever with an IndexStep appended
-// to the end.
-//
-// This is provided as a convenient way to construct paths, but each call
-// will create garbage so it should not be used where memory pressure is a
-// concern.
-func (p Path) Index(v Value) Path {
- ret := make(Path, len(p)+1)
- copy(ret, p)
- ret[len(p)] = IndexStep{
- Key: v,
- }
- return ret
-}
-
-// IndexInt is a typed convenience method for Index.
-func (p Path) IndexInt(v int) Path {
- return p.Index(NumberIntVal(int64(v)))
-}
-
-// IndexString is a typed convenience method for Index.
-func (p Path) IndexString(v string) Path {
- return p.Index(StringVal(v))
-}
-
-// IndexPath is a convenience method to start a new Path with an IndexStep.
-func IndexPath(v Value) Path {
- return Path{}.Index(v)
-}
-
-// IndexIntPath is a typed convenience method for IndexPath.
-func IndexIntPath(v int) Path {
- return IndexPath(NumberIntVal(int64(v)))
-}
-
-// IndexStringPath is a typed convenience method for IndexPath.
-func IndexStringPath(v string) Path {
- return IndexPath(StringVal(v))
-}
-
-// GetAttr returns a new Path that is the reciever with a GetAttrStep appended
-// to the end.
-//
-// This is provided as a convenient way to construct paths, but each call
-// will create garbage so it should not be used where memory pressure is a
-// concern.
-func (p Path) GetAttr(name string) Path {
- ret := make(Path, len(p)+1)
- copy(ret, p)
- ret[len(p)] = GetAttrStep{
- Name: name,
- }
- return ret
-}
-
-// Equals compares 2 Paths for exact equality.
-func (p Path) Equals(other Path) bool {
- if len(p) != len(other) {
- return false
- }
-
- for i := range p {
- pv := p[i]
- switch pv := pv.(type) {
- case GetAttrStep:
- ov, ok := other[i].(GetAttrStep)
- if !ok || pv != ov {
- return false
- }
- case IndexStep:
- ov, ok := other[i].(IndexStep)
- if !ok {
- return false
- }
-
- if !pv.Key.RawEquals(ov.Key) {
- return false
- }
- default:
- // Any invalid steps default to evaluating false.
- return false
- }
- }
-
- return true
-
-}
-
-// HasPrefix determines if the path p contains the provided prefix.
-func (p Path) HasPrefix(prefix Path) bool {
- if len(prefix) > len(p) {
- return false
- }
-
- return p[:len(prefix)].Equals(prefix)
-}
-
-// GetAttrPath is a convenience method to start a new Path with a GetAttrStep.
-func GetAttrPath(name string) Path {
- return Path{}.GetAttr(name)
-}
-
-// Apply applies each of the steps in turn to successive values starting with
-// the given value, and returns the result. If any step returns an error,
-// the whole operation returns an error.
-func (p Path) Apply(val Value) (Value, error) {
- var err error
- for i, step := range p {
- val, err = step.Apply(val)
- if err != nil {
- return NilVal, fmt.Errorf("at step %d: %s", i, err)
- }
- }
- return val, nil
-}
-
-// LastStep applies the given path up to the last step and then returns
-// the resulting value and the final step.
-//
-// This is useful when dealing with assignment operations, since in that
-// case the *value* of the last step is not important (and may not, in fact,
-// present at all) and we care only about its location.
-//
-// Since LastStep applies all steps except the last, it will return errors
-// for those steps in the same way as Apply does.
-//
-// If the path has *no* steps then the returned PathStep will be nil,
-// representing that any operation should be applied directly to the
-// given value.
-func (p Path) LastStep(val Value) (Value, PathStep, error) {
- var err error
-
- if len(p) == 0 {
- return val, nil, nil
- }
-
- journey := p[:len(p)-1]
- val, err = journey.Apply(val)
- if err != nil {
- return NilVal, nil, err
- }
- return val, p[len(p)-1], nil
-}
-
-// Copy makes a shallow copy of the receiver. Often when paths are passed to
-// caller code they come with the constraint that they are valid only until
-// the caller returns, due to how they are constructed internally. Callers
-// can use Copy to conveniently produce a copy of the value that _they_ control
-// the validity of.
-func (p Path) Copy() Path {
- ret := make(Path, len(p))
- copy(ret, p)
- return ret
-}
-
-// IndexStep is a Step implementation representing applying the index operation
-// to a value, which must be of either a list, map, or set type.
-//
-// When describing a path through a *type* rather than a concrete value,
-// the Key may be an unknown value, indicating that the step applies to
-// *any* key of the given type.
-//
-// When indexing into a set, the Key is actually the element being accessed
-// itself, since in sets elements are their own identity.
-type IndexStep struct {
- pathStepImpl
- Key Value
-}
-
-// Apply returns the value resulting from indexing the given value with
-// our key value.
-func (s IndexStep) Apply(val Value) (Value, error) {
- if val == NilVal || val.IsNull() {
- return NilVal, errors.New("cannot index a null value")
- }
-
- switch s.Key.Type() {
- case Number:
- if !(val.Type().IsListType() || val.Type().IsTupleType()) {
- return NilVal, errors.New("not a list type")
- }
- case String:
- if !val.Type().IsMapType() {
- return NilVal, errors.New("not a map type")
- }
- default:
- return NilVal, errors.New("key value not number or string")
- }
-
- has := val.HasIndex(s.Key)
- if !has.IsKnown() {
- return UnknownVal(val.Type().ElementType()), nil
- }
- if !has.True() {
- return NilVal, errors.New("value does not have given index key")
- }
-
- return val.Index(s.Key), nil
-}
-
-func (s IndexStep) GoString() string {
- return fmt.Sprintf("cty.IndexStep{Key:%#v}", s.Key)
-}
-
-// GetAttrStep is a Step implementation representing retrieving an attribute
-// from a value, which must be of an object type.
-type GetAttrStep struct {
- pathStepImpl
- Name string
-}
-
-// Apply returns the value of our named attribute from the given value, which
-// must be of an object type that has a value of that name.
-func (s GetAttrStep) Apply(val Value) (Value, error) {
- if val == NilVal || val.IsNull() {
- return NilVal, errors.New("cannot access attributes on a null value")
- }
-
- if !val.Type().IsObjectType() {
- return NilVal, errors.New("not an object type")
- }
-
- if !val.Type().HasAttribute(s.Name) {
- return NilVal, fmt.Errorf("object has no attribute %q", s.Name)
- }
-
- return val.GetAttr(s.Name), nil
-}
-
-func (s GetAttrStep) GoString() string {
- return fmt.Sprintf("cty.GetAttrStep{Name:%q}", s.Name)
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/path_set.go b/vendor/github.com/hashicorp/go-cty/cty/path_set.go
deleted file mode 100644
index 977523de..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/path_set.go
+++ /dev/null
@@ -1,198 +0,0 @@
-package cty
-
-import (
- "fmt"
- "hash/crc64"
-
- "github.com/hashicorp/go-cty/cty/set"
-)
-
-// PathSet represents a set of Path objects. This can be used, for example,
-// to talk about a subset of paths within a value that meet some criteria,
-// without directly modifying the values at those paths.
-type PathSet struct {
- set set.Set
-}
-
-// NewPathSet creates and returns a PathSet, with initial contents optionally
-// set by the given arguments.
-func NewPathSet(paths ...Path) PathSet {
- ret := PathSet{
- set: set.NewSet(pathSetRules{}),
- }
-
- for _, path := range paths {
- ret.Add(path)
- }
-
- return ret
-}
-
-// Add inserts a single given path into the set.
-//
-// Paths are immutable after construction by convention. It is particularly
-// important not to mutate a path after it has been placed into a PathSet.
-// If a Path is mutated while in a set, behavior is undefined.
-func (s PathSet) Add(path Path) {
- s.set.Add(path)
-}
-
-// AddAllSteps is like Add but it also adds all of the steps leading to
-// the given path.
-//
-// For example, if given a path representing "foo.bar", it will add both
-// "foo" and "bar".
-func (s PathSet) AddAllSteps(path Path) {
- for i := 1; i <= len(path); i++ {
- s.Add(path[:i])
- }
-}
-
-// Has returns true if the given path is in the receiving set.
-func (s PathSet) Has(path Path) bool {
- return s.set.Has(path)
-}
-
-// List makes and returns a slice of all of the paths in the receiving set,
-// in an undefined but consistent order.
-func (s PathSet) List() []Path {
- if s.Empty() {
- return nil
- }
- ret := make([]Path, 0, s.set.Length())
- for it := s.set.Iterator(); it.Next(); {
- ret = append(ret, it.Value().(Path))
- }
- return ret
-}
-
-// Remove modifies the receving set to no longer include the given path.
-// If the given path was already absent, this is a no-op.
-func (s PathSet) Remove(path Path) {
- s.set.Remove(path)
-}
-
-// Empty returns true if the length of the receiving set is zero.
-func (s PathSet) Empty() bool {
- return s.set.Length() == 0
-}
-
-// Union returns a new set whose contents are the union of the receiver and
-// the given other set.
-func (s PathSet) Union(other PathSet) PathSet {
- return PathSet{
- set: s.set.Union(other.set),
- }
-}
-
-// Intersection returns a new set whose contents are the intersection of the
-// receiver and the given other set.
-func (s PathSet) Intersection(other PathSet) PathSet {
- return PathSet{
- set: s.set.Intersection(other.set),
- }
-}
-
-// Subtract returns a new set whose contents are those from the receiver with
-// any elements of the other given set subtracted.
-func (s PathSet) Subtract(other PathSet) PathSet {
- return PathSet{
- set: s.set.Subtract(other.set),
- }
-}
-
-// SymmetricDifference returns a new set whose contents are the symmetric
-// difference of the receiver and the given other set.
-func (s PathSet) SymmetricDifference(other PathSet) PathSet {
- return PathSet{
- set: s.set.SymmetricDifference(other.set),
- }
-}
-
-// Equal returns true if and only if both the receiver and the given other
-// set contain exactly the same paths.
-func (s PathSet) Equal(other PathSet) bool {
- if s.set.Length() != other.set.Length() {
- return false
- }
- // Now we know the lengths are the same we only need to test in one
- // direction whether everything in one is in the other.
- for it := s.set.Iterator(); it.Next(); {
- if !other.set.Has(it.Value()) {
- return false
- }
- }
- return true
-}
-
-var crc64Table = crc64.MakeTable(crc64.ISO)
-
-var indexStepPlaceholder = []byte("#")
-
-// pathSetRules is an implementation of set.Rules from the set package,
-// used internally within PathSet.
-type pathSetRules struct {
-}
-
-func (r pathSetRules) Hash(v interface{}) int {
- path := v.(Path)
- hash := crc64.New(crc64Table)
-
- for _, rawStep := range path {
- switch step := rawStep.(type) {
- case GetAttrStep:
- // (this creates some garbage converting the string name to a
- // []byte, but that's okay since cty is not designed to be
- // used in tight loops under memory pressure.)
- hash.Write([]byte(step.Name))
- default:
- // For any other step type we just append a predefined value,
- // which means that e.g. all indexes into a given collection will
- // hash to the same value but we assume that collections are
- // small and thus this won't hurt too much.
- hash.Write(indexStepPlaceholder)
- }
- }
-
- // We discard half of the hash on 32-bit platforms; collisions just make
- // our lookups take marginally longer, so not a big deal.
- return int(hash.Sum64())
-}
-
-func (r pathSetRules) Equivalent(a, b interface{}) bool {
- aPath := a.(Path)
- bPath := b.(Path)
-
- if len(aPath) != len(bPath) {
- return false
- }
-
- for i := range aPath {
- switch aStep := aPath[i].(type) {
- case GetAttrStep:
- bStep, ok := bPath[i].(GetAttrStep)
- if !ok {
- return false
- }
-
- if aStep.Name != bStep.Name {
- return false
- }
- case IndexStep:
- bStep, ok := bPath[i].(IndexStep)
- if !ok {
- return false
- }
-
- eq := aStep.Key.Equals(bStep.Key)
- if !eq.IsKnown() || eq.False() {
- return false
- }
- default:
- // Should never happen, since we document PathStep as a closed type.
- panic(fmt.Errorf("unsupported step type %T", aStep))
- }
- }
-
- return true
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/primitive_type.go b/vendor/github.com/hashicorp/go-cty/cty/primitive_type.go
deleted file mode 100644
index 7b3d1196..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/primitive_type.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package cty
-
-import "math/big"
-
-// primitiveType is the hidden implementation of the various primitive types
-// that are exposed as variables in this package.
-type primitiveType struct {
- typeImplSigil
- Kind primitiveTypeKind
-}
-
-type primitiveTypeKind byte
-
-const (
- primitiveTypeBool primitiveTypeKind = 'B'
- primitiveTypeNumber primitiveTypeKind = 'N'
- primitiveTypeString primitiveTypeKind = 'S'
-)
-
-func (t primitiveType) Equals(other Type) bool {
- if otherP, ok := other.typeImpl.(primitiveType); ok {
- return otherP.Kind == t.Kind
- }
- return false
-}
-
-func (t primitiveType) FriendlyName(mode friendlyTypeNameMode) string {
- switch t.Kind {
- case primitiveTypeBool:
- return "bool"
- case primitiveTypeNumber:
- return "number"
- case primitiveTypeString:
- return "string"
- default:
- // should never happen
- panic("invalid primitive type")
- }
-}
-
-func (t primitiveType) GoString() string {
- switch t.Kind {
- case primitiveTypeBool:
- return "cty.Bool"
- case primitiveTypeNumber:
- return "cty.Number"
- case primitiveTypeString:
- return "cty.String"
- default:
- // should never happen
- panic("invalid primitive type")
- }
-}
-
-// Number is the numeric type. Number values are arbitrary-precision
-// decimal numbers, which can then be converted into Go's various numeric
-// types only if they are in the appropriate range.
-var Number Type
-
-// String is the string type. String values are sequences of unicode codepoints
-// encoded internally as UTF-8.
-var String Type
-
-// Bool is the boolean type. The two values of this type are True and False.
-var Bool Type
-
-// True is the truthy value of type Bool
-var True Value
-
-// False is the falsey value of type Bool
-var False Value
-
-// Zero is a number value representing exactly zero.
-var Zero Value
-
-// PositiveInfinity is a Number value representing positive infinity
-var PositiveInfinity Value
-
-// NegativeInfinity is a Number value representing negative infinity
-var NegativeInfinity Value
-
-func init() {
- Number = Type{
- primitiveType{Kind: primitiveTypeNumber},
- }
- String = Type{
- primitiveType{Kind: primitiveTypeString},
- }
- Bool = Type{
- primitiveType{Kind: primitiveTypeBool},
- }
- True = Value{
- ty: Bool,
- v: true,
- }
- False = Value{
- ty: Bool,
- v: false,
- }
- Zero = Value{
- ty: Number,
- v: big.NewFloat(0),
- }
- PositiveInfinity = Value{
- ty: Number,
- v: (&big.Float{}).SetInf(false),
- }
- NegativeInfinity = Value{
- ty: Number,
- v: (&big.Float{}).SetInf(true),
- }
-}
-
-// IsPrimitiveType returns true if and only if the reciever is a primitive
-// type, which means it's either number, string, or bool. Any two primitive
-// types can be safely compared for equality using the standard == operator
-// without panic, which is not a guarantee that holds for all types. Primitive
-// types can therefore also be used in switch statements.
-func (t Type) IsPrimitiveType() bool {
- _, ok := t.typeImpl.(primitiveType)
- return ok
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/gob.go b/vendor/github.com/hashicorp/go-cty/cty/set/gob.go
deleted file mode 100644
index da2978f6..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/set/gob.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package set
-
-import (
- "bytes"
- "encoding/gob"
- "fmt"
-)
-
-// GobEncode is an implementation of the interface gob.GobEncoder, allowing
-// sets to be included in structures encoded via gob.
-//
-// The set rules are included in the serialized value, so the caller must
-// register its concrete rules type with gob.Register before using a
-// set in a gob, and possibly also implement GobEncode/GobDecode to customize
-// how any parameters are persisted.
-//
-// The set elements are also included, so if they are of non-primitive types
-// they too must be registered with gob.
-//
-// If the produced gob values will persist for a long time, the caller must
-// ensure compatibility of the rules implementation. In particular, if the
-// definition of element equivalence changes between encoding and decoding
-// then two distinct stored elements may be considered equivalent on decoding,
-// causing the recovered set to have fewer elements than when it was stored.
-func (s Set) GobEncode() ([]byte, error) {
- gs := gobSet{
- Version: 0,
- Rules: s.rules,
- Values: s.Values(),
- }
-
- buf := &bytes.Buffer{}
- enc := gob.NewEncoder(buf)
- err := enc.Encode(gs)
- if err != nil {
- return nil, fmt.Errorf("error encoding set.Set: %s", err)
- }
-
- return buf.Bytes(), nil
-}
-
-// GobDecode is the opposite of GobEncode. See GobEncode for information
-// on the requirements for and caveats of including set values in gobs.
-func (s *Set) GobDecode(buf []byte) error {
- r := bytes.NewReader(buf)
- dec := gob.NewDecoder(r)
-
- var gs gobSet
- err := dec.Decode(&gs)
- if err != nil {
- return fmt.Errorf("error decoding set.Set: %s", err)
- }
- if gs.Version != 0 {
- return fmt.Errorf("unsupported set.Set encoding version %d; need 0", gs.Version)
- }
-
- victim := NewSetFromSlice(gs.Rules, gs.Values)
- s.vals = victim.vals
- s.rules = victim.rules
- return nil
-}
-
-type gobSet struct {
- Version int
- Rules Rules
-
- // The bucket-based representation is for efficient in-memory access, but
- // for serialization it's enough to just retain the values themselves,
- // which we can re-bucket using the rules (which may have changed!) when
- // we re-inflate.
- Values []interface{}
-}
-
-func init() {
- gob.Register([]interface{}(nil))
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/iterator.go b/vendor/github.com/hashicorp/go-cty/cty/set/iterator.go
deleted file mode 100644
index 4a60494f..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/set/iterator.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package set
-
-type Iterator struct {
- vals []interface{}
- idx int
-}
-
-func (it *Iterator) Value() interface{} {
- return it.vals[it.idx]
-}
-
-func (it *Iterator) Next() bool {
- it.idx++
- return it.idx < len(it.vals)
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/ops.go b/vendor/github.com/hashicorp/go-cty/cty/set/ops.go
deleted file mode 100644
index fd1555f2..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/set/ops.go
+++ /dev/null
@@ -1,210 +0,0 @@
-package set
-
-import (
- "sort"
-)
-
-// Add inserts the given value into the receiving Set.
-//
-// This mutates the set in-place. This operation is not thread-safe.
-func (s Set) Add(val interface{}) {
- hv := s.rules.Hash(val)
- if _, ok := s.vals[hv]; !ok {
- s.vals[hv] = make([]interface{}, 0, 1)
- }
- bucket := s.vals[hv]
-
- // See if an equivalent value is already present
- for _, ev := range bucket {
- if s.rules.Equivalent(val, ev) {
- return
- }
- }
-
- s.vals[hv] = append(bucket, val)
-}
-
-// Remove deletes the given value from the receiving set, if indeed it was
-// there in the first place. If the value is not present, this is a no-op.
-func (s Set) Remove(val interface{}) {
- hv := s.rules.Hash(val)
- bucket, ok := s.vals[hv]
- if !ok {
- return
- }
-
- for i, ev := range bucket {
- if s.rules.Equivalent(val, ev) {
- newBucket := make([]interface{}, 0, len(bucket)-1)
- newBucket = append(newBucket, bucket[:i]...)
- newBucket = append(newBucket, bucket[i+1:]...)
- if len(newBucket) > 0 {
- s.vals[hv] = newBucket
- } else {
- delete(s.vals, hv)
- }
- return
- }
- }
-}
-
-// Has returns true if the given value is in the receiving set, or false if
-// it is not.
-func (s Set) Has(val interface{}) bool {
- hv := s.rules.Hash(val)
- bucket, ok := s.vals[hv]
- if !ok {
- return false
- }
-
- for _, ev := range bucket {
- if s.rules.Equivalent(val, ev) {
- return true
- }
- }
- return false
-}
-
-// Copy performs a shallow copy of the receiving set, returning a new set
-// with the same rules and elements.
-func (s Set) Copy() Set {
- ret := NewSet(s.rules)
- for k, v := range s.vals {
- ret.vals[k] = v
- }
- return ret
-}
-
-// Iterator returns an iterator over values in the set. If the set's rules
-// implement OrderedRules then the result is ordered per those rules. If
-// no order is provided, or if it is not a total order, then the iteration
-// order is undefined but consistent for a particular version of cty. Do not
-// rely on specific ordering between cty releases unless the rules order is a
-// total order.
-//
-// The pattern for using the returned iterator is:
-//
-// it := set.Iterator()
-// for it.Next() {
-// val := it.Value()
-// // ...
-// }
-//
-// Once an iterator has been created for a set, the set *must not* be mutated
-// until the iterator is no longer in use.
-func (s Set) Iterator() *Iterator {
- vals := s.Values()
-
- return &Iterator{
- vals: vals,
- idx: -1,
- }
-}
-
-// EachValue calls the given callback once for each value in the set, in an
-// undefined order that callers should not depend on.
-func (s Set) EachValue(cb func(interface{})) {
- it := s.Iterator()
- for it.Next() {
- cb(it.Value())
- }
-}
-
-// Values returns a slice of all the values in the set. If the set rules have
-// an order then the result is in that order. If no order is provided or if
-// it is not a total order then the result order is undefined, but consistent
-// for a particular set value within a specific release of cty.
-func (s Set) Values() []interface{} {
- var ret []interface{}
- // Sort the bucketIds to ensure that we always traverse in a
- // consistent order.
- bucketIDs := make([]int, 0, len(s.vals))
- for id := range s.vals {
- bucketIDs = append(bucketIDs, id)
- }
- sort.Ints(bucketIDs)
-
- for _, bucketID := range bucketIDs {
- ret = append(ret, s.vals[bucketID]...)
- }
-
- if orderRules, ok := s.rules.(OrderedRules); ok {
- sort.SliceStable(ret, func(i, j int) bool {
- return orderRules.Less(ret[i], ret[j])
- })
- }
-
- return ret
-}
-
-// Length returns the number of values in the set.
-func (s Set) Length() int {
- var count int
- for _, bucket := range s.vals {
- count = count + len(bucket)
- }
- return count
-}
-
-// Union returns a new set that contains all of the members of both the
-// receiving set and the given set. Both sets must have the same rules, or
-// else this function will panic.
-func (s1 Set) Union(s2 Set) Set {
- mustHaveSameRules(s1, s2)
- rs := NewSet(s1.rules)
- s1.EachValue(func(v interface{}) {
- rs.Add(v)
- })
- s2.EachValue(func(v interface{}) {
- rs.Add(v)
- })
- return rs
-}
-
-// Intersection returns a new set that contains the values that both the
-// receiver and given sets have in common. Both sets must have the same rules,
-// or else this function will panic.
-func (s1 Set) Intersection(s2 Set) Set {
- mustHaveSameRules(s1, s2)
- rs := NewSet(s1.rules)
- s1.EachValue(func(v interface{}) {
- if s2.Has(v) {
- rs.Add(v)
- }
- })
- return rs
-}
-
-// Subtract returns a new set that contains all of the values from the receiver
-// that are not also in the given set. Both sets must have the same rules,
-// or else this function will panic.
-func (s1 Set) Subtract(s2 Set) Set {
- mustHaveSameRules(s1, s2)
- rs := NewSet(s1.rules)
- s1.EachValue(func(v interface{}) {
- if !s2.Has(v) {
- rs.Add(v)
- }
- })
- return rs
-}
-
-// SymmetricDifference returns a new set that contains all of the values from
-// both the receiver and given sets, except those that both sets have in
-// common. Both sets must have the same rules, or else this function will
-// panic.
-func (s1 Set) SymmetricDifference(s2 Set) Set {
- mustHaveSameRules(s1, s2)
- rs := NewSet(s1.rules)
- s1.EachValue(func(v interface{}) {
- if !s2.Has(v) {
- rs.Add(v)
- }
- })
- s2.EachValue(func(v interface{}) {
- if !s1.Has(v) {
- rs.Add(v)
- }
- })
- return rs
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/rules.go b/vendor/github.com/hashicorp/go-cty/cty/set/rules.go
deleted file mode 100644
index 51f744b5..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/set/rules.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package set
-
-// Rules represents the operations that define membership for a Set.
-//
-// Each Set has a Rules instance, whose methods must satisfy the interface
-// contracts given below for any value that will be added to the set.
-type Rules interface {
- // Hash returns an int that somewhat-uniquely identifies the given value.
- //
- // A good hash function will minimize collisions for values that will be
- // added to the set, though collisions *are* permitted. Collisions will
- // simply reduce the efficiency of operations on the set.
- Hash(interface{}) int
-
- // Equivalent returns true if and only if the two values are considered
- // equivalent for the sake of set membership. Two values that are
- // equivalent cannot exist in the set at the same time, and if two
- // equivalent values are added it is undefined which one will be
- // returned when enumerating all of the set members.
- //
- // Two values that are equivalent *must* result in the same hash value,
- // though it is *not* required that two values with the same hash value
- // be equivalent.
- Equivalent(interface{}, interface{}) bool
-}
-
-// OrderedRules is an extension of Rules that can apply a partial order to
-// element values. When a set's Rules implements OrderedRules an iterator
-// over the set will return items in the order described by the rules.
-//
-// If the given order is not a total order (that is, some pairs of non-equivalent
-// elements do not have a defined order) then the resulting iteration order
-// is undefined but consistent for a particular version of cty. The exact
-// order in that case is not part of the contract and is subject to change
-// between versions.
-type OrderedRules interface {
- Rules
-
- // Less returns true if and only if the first argument should sort before
- // the second argument. If the second argument should sort before the first
- // or if there is no defined order for the values, return false.
- Less(interface{}, interface{}) bool
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/set/set.go b/vendor/github.com/hashicorp/go-cty/cty/set/set.go
deleted file mode 100644
index b4fb316f..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/set/set.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package set
-
-import (
- "fmt"
-)
-
-// Set is an implementation of the concept of a set: a collection where all
-// values are conceptually either in or out of the set, but the members are
-// not ordered.
-//
-// This type primarily exists to be the internal type of sets in cty, but
-// it is considered to be at the same level of abstraction as Go's built in
-// slice and map collection types, and so should make no cty-specific
-// assumptions.
-//
-// Set operations are not thread safe. It is the caller's responsibility to
-// provide mutex guarantees where necessary.
-//
-// Set operations are not optimized to minimize memory pressure. Mutating
-// a set will generally create garbage and so should perhaps be avoided in
-// tight loops where memory pressure is a concern.
-type Set struct {
- vals map[int][]interface{}
- rules Rules
-}
-
-// NewSet returns an empty set with the membership rules given.
-func NewSet(rules Rules) Set {
- return Set{
- vals: map[int][]interface{}{},
- rules: rules,
- }
-}
-
-func NewSetFromSlice(rules Rules, vals []interface{}) Set {
- s := NewSet(rules)
- for _, v := range vals {
- s.Add(v)
- }
- return s
-}
-
-func sameRules(s1 Set, s2 Set) bool {
- return s1.rules == s2.rules
-}
-
-func mustHaveSameRules(s1 Set, s2 Set) {
- if !sameRules(s1, s2) {
- panic(fmt.Errorf("incompatible set rules: %#v, %#v", s1.rules, s2.rules))
- }
-}
-
-// HasRules returns true if and only if the receiving set has the given rules
-// instance as its rules.
-func (s Set) HasRules(rules Rules) bool {
- return s.rules == rules
-}
-
-// Rules returns the receiving set's rules instance.
-func (s Set) Rules() Rules {
- return s.rules
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/set_helper.go b/vendor/github.com/hashicorp/go-cty/cty/set_helper.go
deleted file mode 100644
index 31622842..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/set_helper.go
+++ /dev/null
@@ -1,132 +0,0 @@
-package cty
-
-import (
- "fmt"
-
- "github.com/hashicorp/go-cty/cty/set"
-)
-
-// ValueSet is to cty.Set what []cty.Value is to cty.List and
-// map[string]cty.Value is to cty.Map. It's provided to allow callers a
-// convenient interface for manipulating sets before wrapping them in cty.Set
-// values using cty.SetValFromValueSet.
-//
-// Unlike value slices and value maps, ValueSet instances have a single
-// homogenous element type because that is a requirement of the underlying
-// set implementation, which uses the element type to select a suitable
-// hashing function.
-//
-// Set mutations are not concurrency-safe.
-type ValueSet struct {
- // ValueSet is just a thin wrapper around a set.Set with our value-oriented
- // "rules" applied. We do this so that the caller can work in terms of
- // cty.Value objects even though the set internals use the raw values.
- s set.Set
-}
-
-// NewValueSet creates and returns a new ValueSet with the given element type.
-func NewValueSet(ety Type) ValueSet {
- return newValueSet(set.NewSet(setRules{Type: ety}))
-}
-
-func newValueSet(s set.Set) ValueSet {
- return ValueSet{
- s: s,
- }
-}
-
-// ElementType returns the element type for the receiving ValueSet.
-func (s ValueSet) ElementType() Type {
- return s.s.Rules().(setRules).Type
-}
-
-// Add inserts the given value into the receiving set.
-func (s ValueSet) Add(v Value) {
- s.requireElementType(v)
- s.s.Add(v.v)
-}
-
-// Remove deletes the given value from the receiving set, if indeed it was
-// there in the first place. If the value is not present, this is a no-op.
-func (s ValueSet) Remove(v Value) {
- s.requireElementType(v)
- s.s.Remove(v.v)
-}
-
-// Has returns true if the given value is in the receiving set, or false if
-// it is not.
-func (s ValueSet) Has(v Value) bool {
- s.requireElementType(v)
- return s.s.Has(v.v)
-}
-
-// Copy performs a shallow copy of the receiving set, returning a new set
-// with the same rules and elements.
-func (s ValueSet) Copy() ValueSet {
- return newValueSet(s.s.Copy())
-}
-
-// Length returns the number of values in the set.
-func (s ValueSet) Length() int {
- return s.s.Length()
-}
-
-// Values returns a slice of all of the values in the set in no particular
-// order.
-func (s ValueSet) Values() []Value {
- l := s.s.Length()
- if l == 0 {
- return nil
- }
- ret := make([]Value, 0, l)
- ety := s.ElementType()
- for it := s.s.Iterator(); it.Next(); {
- ret = append(ret, Value{
- ty: ety,
- v: it.Value(),
- })
- }
- return ret
-}
-
-// Union returns a new set that contains all of the members of both the
-// receiving set and the given set. Both sets must have the same element type,
-// or else this function will panic.
-func (s ValueSet) Union(other ValueSet) ValueSet {
- return newValueSet(s.s.Union(other.s))
-}
-
-// Intersection returns a new set that contains the values that both the
-// receiver and given sets have in common. Both sets must have the same element
-// type, or else this function will panic.
-func (s ValueSet) Intersection(other ValueSet) ValueSet {
- return newValueSet(s.s.Intersection(other.s))
-}
-
-// Subtract returns a new set that contains all of the values from the receiver
-// that are not also in the given set. Both sets must have the same element
-// type, or else this function will panic.
-func (s ValueSet) Subtract(other ValueSet) ValueSet {
- return newValueSet(s.s.Subtract(other.s))
-}
-
-// SymmetricDifference returns a new set that contains all of the values from
-// both the receiver and given sets, except those that both sets have in
-// common. Both sets must have the same element type, or else this function
-// will panic.
-func (s ValueSet) SymmetricDifference(other ValueSet) ValueSet {
- return newValueSet(s.s.SymmetricDifference(other.s))
-}
-
-// requireElementType panics if the given value is not of the set's element type.
-//
-// It also panics if the given value is marked, because marked values cannot
-// be stored in sets.
-func (s ValueSet) requireElementType(v Value) {
- if v.IsMarked() {
- panic("cannot store marked value directly in a set (make the set itself unknown instead)")
- }
- if !v.Type().Equals(s.ElementType()) {
- panic(fmt.Errorf("attempt to use %#v value with set of %#v", v.Type(), s.ElementType()))
- }
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/set_internals.go b/vendor/github.com/hashicorp/go-cty/cty/set_internals.go
deleted file mode 100644
index 40801980..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/set_internals.go
+++ /dev/null
@@ -1,244 +0,0 @@
-package cty
-
-import (
- "bytes"
- "fmt"
- "hash/crc32"
- "math/big"
- "sort"
-
- "github.com/hashicorp/go-cty/cty/set"
-)
-
-// setRules provides a Rules implementation for the ./set package that
-// respects the equality rules for cty values of the given type.
-//
-// This implementation expects that values added to the set will be
-// valid internal values for the given Type, which is to say that wrapping
-// the given value in a Value struct along with the ruleset's type should
-// produce a valid, working Value.
-type setRules struct {
- Type Type
-}
-
-var _ set.OrderedRules = setRules{}
-
-// Hash returns a hash value for the receiver that can be used for equality
-// checks where some inaccuracy is tolerable.
-//
-// The hash function is value-type-specific, so it is not meaningful to compare
-// hash results for values of different types.
-//
-// This function is not safe to use for security-related applications, since
-// the hash used is not strong enough.
-func (val Value) Hash() int {
- hashBytes, marks := makeSetHashBytes(val)
- if len(marks) > 0 {
- panic("can't take hash of value that has marks or has embedded values that have marks")
- }
- return int(crc32.ChecksumIEEE(hashBytes))
-}
-
-func (r setRules) Hash(v interface{}) int {
- return Value{
- ty: r.Type,
- v: v,
- }.Hash()
-}
-
-func (r setRules) Equivalent(v1 interface{}, v2 interface{}) bool {
- v1v := Value{
- ty: r.Type,
- v: v1,
- }
- v2v := Value{
- ty: r.Type,
- v: v2,
- }
-
- eqv := v1v.Equals(v2v)
-
- // By comparing the result to true we ensure that an Unknown result,
- // which will result if either value is unknown, will be considered
- // as non-equivalent. Two unknown values are not equivalent for the
- // sake of set membership.
- return eqv.v == true
-}
-
-// Less is an implementation of set.OrderedRules so that we can iterate over
-// set elements in a consistent order, where such an order is possible.
-func (r setRules) Less(v1, v2 interface{}) bool {
- v1v := Value{
- ty: r.Type,
- v: v1,
- }
- v2v := Value{
- ty: r.Type,
- v: v2,
- }
-
- if v1v.RawEquals(v2v) { // Easy case: if they are equal then v1 can't be less
- return false
- }
-
- // Null values always sort after non-null values
- if v2v.IsNull() && !v1v.IsNull() {
- return true
- } else if v1v.IsNull() {
- return false
- }
- // Unknown values always sort after known values
- if v1v.IsKnown() && !v2v.IsKnown() {
- return true
- } else if !v1v.IsKnown() {
- return false
- }
-
- switch r.Type {
- case String:
- // String values sort lexicographically
- return v1v.AsString() < v2v.AsString()
- case Bool:
- // Weird to have a set of bools, but if we do then false sorts before true.
- if v2v.True() || !v1v.True() {
- return true
- }
- return false
- case Number:
- v1f := v1v.AsBigFloat()
- v2f := v2v.AsBigFloat()
- return v1f.Cmp(v2f) < 0
- default:
- // No other types have a well-defined ordering, so we just produce a
- // default consistent-but-undefined ordering then. This situation is
- // not considered a compatibility constraint; callers should rely only
- // on the ordering rules for primitive values.
- v1h, _ := makeSetHashBytes(v1v)
- v2h, _ := makeSetHashBytes(v2v)
- return bytes.Compare(v1h, v2h) < 0
- }
-}
-
-func makeSetHashBytes(val Value) ([]byte, ValueMarks) {
- var buf bytes.Buffer
- marks := make(ValueMarks)
- appendSetHashBytes(val, &buf, marks)
- return buf.Bytes(), marks
-}
-
-func appendSetHashBytes(val Value, buf *bytes.Buffer, marks ValueMarks) {
- // Exactly what bytes we generate here don't matter as long as the following
- // constraints hold:
- // - Unknown and null values all generate distinct strings from
- // each other and from any normal value of the given type.
- // - The delimiter used to separate items in a compound structure can
- // never appear literally in any of its elements.
- // Since we don't support hetrogenous lists we don't need to worry about
- // collisions between values of different types, apart from
- // PseudoTypeDynamic.
- // If in practice we *do* get a collision then it's not a big deal because
- // the Equivalent function will still distinguish values, but set
- // performance will be best if we are able to produce a distinct string
- // for each distinct value, unknown values notwithstanding.
-
- // Marks aren't considered part of a value for equality-testing purposes,
- // so we'll unmark our value before we work with it but we'll remember
- // the marks in case the caller needs to re-apply them to a derived
- // value.
- if val.IsMarked() {
- unmarkedVal, valMarks := val.Unmark()
- for m := range valMarks {
- marks[m] = struct{}{}
- }
- val = unmarkedVal
- }
-
- if !val.IsKnown() {
- buf.WriteRune('?')
- return
- }
- if val.IsNull() {
- buf.WriteRune('~')
- return
- }
-
- switch val.ty {
- case Number:
- // Due to an unfortunate quirk of gob encoding for big.Float, we end up
- // with non-pointer values immediately after a gob round-trip, and
- // we end up in here before we've had a chance to run
- // gobDecodeFixNumberPtr on the inner values of a gob-encoded set,
- // and so sadly we must make a special effort to handle that situation
- // here just so that we can get far enough along to fix it up for
- // everything else in this package.
- if bf, ok := val.v.(big.Float); ok {
- buf.WriteString(bf.String())
- return
- }
- buf.WriteString(val.v.(*big.Float).String())
- return
- case Bool:
- if val.v.(bool) {
- buf.WriteRune('T')
- } else {
- buf.WriteRune('F')
- }
- return
- case String:
- buf.WriteString(fmt.Sprintf("%q", val.v.(string)))
- return
- }
-
- if val.ty.IsMapType() {
- buf.WriteRune('{')
- val.ForEachElement(func(keyVal, elementVal Value) bool {
- appendSetHashBytes(keyVal, buf, marks)
- buf.WriteRune(':')
- appendSetHashBytes(elementVal, buf, marks)
- buf.WriteRune(';')
- return false
- })
- buf.WriteRune('}')
- return
- }
-
- if val.ty.IsListType() || val.ty.IsSetType() {
- buf.WriteRune('[')
- val.ForEachElement(func(keyVal, elementVal Value) bool {
- appendSetHashBytes(elementVal, buf, marks)
- buf.WriteRune(';')
- return false
- })
- buf.WriteRune(']')
- return
- }
-
- if val.ty.IsObjectType() {
- buf.WriteRune('<')
- attrNames := make([]string, 0, len(val.ty.AttributeTypes()))
- for attrName := range val.ty.AttributeTypes() {
- attrNames = append(attrNames, attrName)
- }
- sort.Strings(attrNames)
- for _, attrName := range attrNames {
- appendSetHashBytes(val.GetAttr(attrName), buf, marks)
- buf.WriteRune(';')
- }
- buf.WriteRune('>')
- return
- }
-
- if val.ty.IsTupleType() {
- buf.WriteRune('<')
- val.ForEachElement(func(keyVal, elementVal Value) bool {
- appendSetHashBytes(elementVal, buf, marks)
- buf.WriteRune(';')
- return false
- })
- buf.WriteRune('>')
- return
- }
-
- // should never get down here
- panic("unsupported type in set hash")
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/set_type.go b/vendor/github.com/hashicorp/go-cty/cty/set_type.go
deleted file mode 100644
index cbc3706f..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/set_type.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package cty
-
-import (
- "fmt"
-)
-
-type typeSet struct {
- typeImplSigil
- ElementTypeT Type
-}
-
-// Set creates a set type with the given element Type.
-//
-// Set types are CollectionType implementations.
-func Set(elem Type) Type {
- return Type{
- typeSet{
- ElementTypeT: elem,
- },
- }
-}
-
-// Equals returns true if the other Type is a set whose element type is
-// equal to that of the receiver.
-func (t typeSet) Equals(other Type) bool {
- ot, isSet := other.typeImpl.(typeSet)
- if !isSet {
- return false
- }
-
- return t.ElementTypeT.Equals(ot.ElementTypeT)
-}
-
-func (t typeSet) FriendlyName(mode friendlyTypeNameMode) string {
- elemName := t.ElementTypeT.friendlyNameMode(mode)
- if mode == friendlyTypeConstraintName {
- if t.ElementTypeT == DynamicPseudoType {
- elemName = "any single type"
- }
- }
- return "set of " + elemName
-}
-
-func (t typeSet) ElementType() Type {
- return t.ElementTypeT
-}
-
-func (t typeSet) GoString() string {
- return fmt.Sprintf("cty.Set(%#v)", t.ElementTypeT)
-}
-
-// IsSetType returns true if the given type is a list type, regardless of its
-// element type.
-func (t Type) IsSetType() bool {
- _, ok := t.typeImpl.(typeSet)
- return ok
-}
-
-// SetElementType is a convenience method that checks if the given type is
-// a set type, returning a pointer to its element type if so and nil
-// otherwise. This is intended to allow convenient conditional branches,
-// like so:
-//
-// if et := t.SetElementType(); et != nil {
-// // Do something with *et
-// }
-func (t Type) SetElementType() *Type {
- if lt, ok := t.typeImpl.(typeSet); ok {
- return <.ElementTypeT
- }
- return nil
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/tuple_type.go b/vendor/github.com/hashicorp/go-cty/cty/tuple_type.go
deleted file mode 100644
index 798cacd6..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/tuple_type.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package cty
-
-import (
- "fmt"
-)
-
-type typeTuple struct {
- typeImplSigil
- ElemTypes []Type
-}
-
-// Tuple creates a tuple type with the given element types.
-//
-// After a slice is passed to this function the caller must no longer access
-// the underlying array, since ownership is transferred to this library.
-func Tuple(elemTypes []Type) Type {
- return Type{
- typeTuple{
- ElemTypes: elemTypes,
- },
- }
-}
-
-func (t typeTuple) Equals(other Type) bool {
- if ot, ok := other.typeImpl.(typeTuple); ok {
- if len(t.ElemTypes) != len(ot.ElemTypes) {
- // Fast path: if we don't have the same number of elements
- // then we can't possibly be equal.
- return false
- }
-
- for i, ty := range t.ElemTypes {
- oty := ot.ElemTypes[i]
- if !ok {
- return false
- }
- if !oty.Equals(ty) {
- return false
- }
- }
-
- return true
- }
- return false
-}
-
-func (t typeTuple) FriendlyName(mode friendlyTypeNameMode) string {
- // There isn't really a friendly way to write a tuple type due to its
- // complexity, so we'll just do something English-ish. Callers will
- // probably want to make some extra effort to avoid ever printing out
- // a tuple type FriendlyName in its entirety. For example, could
- // produce an error message by diffing two object types and saying
- // something like "Expected attribute foo to be string, but got number".
- // TODO: Finish this
- return "tuple"
-}
-
-func (t typeTuple) GoString() string {
- if len(t.ElemTypes) == 0 {
- return "cty.EmptyTuple"
- }
- return fmt.Sprintf("cty.Tuple(%#v)", t.ElemTypes)
-}
-
-// EmptyTuple is a shorthand for Tuple([]Type{}), to more easily talk about
-// the empty tuple type.
-var EmptyTuple Type
-
-// EmptyTupleVal is the only possible non-null, non-unknown value of type
-// EmptyTuple.
-var EmptyTupleVal Value
-
-func init() {
- EmptyTuple = Tuple([]Type{})
- EmptyTupleVal = Value{
- ty: EmptyTuple,
- v: []interface{}{},
- }
-}
-
-// IsTupleType returns true if the given type is an object type, regardless
-// of its element type.
-func (t Type) IsTupleType() bool {
- _, ok := t.typeImpl.(typeTuple)
- return ok
-}
-
-// Length returns the number of elements of the receiving tuple type.
-// Will panic if the reciever isn't a tuple type; use IsTupleType to determine
-// whether this operation will succeed.
-func (t Type) Length() int {
- if ot, ok := t.typeImpl.(typeTuple); ok {
- return len(ot.ElemTypes)
- }
- panic("Length on non-tuple Type")
-}
-
-// TupleElementType returns the type of the element with the given index. Will
-// panic if the receiver is not a tuple type (use IsTupleType to confirm)
-// or if the index is out of range (use Length to confirm).
-func (t Type) TupleElementType(idx int) Type {
- if ot, ok := t.typeImpl.(typeTuple); ok {
- return ot.ElemTypes[idx]
- }
- panic("TupleElementType on non-tuple Type")
-}
-
-// TupleElementTypes returns a slice of the recieving tuple type's element
-// types. Will panic if the receiver is not a tuple type (use IsTupleType
-// to confirm).
-//
-// The returned slice is part of the internal state of the type, and is provided
-// for read access only. It is forbidden for any caller to modify the
-// underlying array. For many purposes the element-related methods of Value
-// are more appropriate and more convenient to use.
-func (t Type) TupleElementTypes() []Type {
- if ot, ok := t.typeImpl.(typeTuple); ok {
- return ot.ElemTypes
- }
- panic("TupleElementTypes on non-tuple Type")
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/type.go b/vendor/github.com/hashicorp/go-cty/cty/type.go
deleted file mode 100644
index 730cb986..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/type.go
+++ /dev/null
@@ -1,120 +0,0 @@
-package cty
-
-// Type represents value types within the type system.
-//
-// This is a closed interface type, meaning that only the concrete
-// implementations provided within this package are considered valid.
-type Type struct {
- typeImpl
-}
-
-type typeImpl interface {
- // isTypeImpl is a do-nothing method that exists only to express
- // that a type is an implementation of typeImpl.
- isTypeImpl() typeImplSigil
-
- // Equals returns true if the other given Type exactly equals the
- // receiver Type.
- Equals(other Type) bool
-
- // FriendlyName returns a human-friendly *English* name for the given
- // type.
- FriendlyName(mode friendlyTypeNameMode) string
-
- // GoString implements the GoStringer interface from package fmt.
- GoString() string
-}
-
-// Base implementation of Type to embed into concrete implementations
-// to signal that they are implementations of Type.
-type typeImplSigil struct{}
-
-func (t typeImplSigil) isTypeImpl() typeImplSigil {
- return typeImplSigil{}
-}
-
-// Equals returns true if the other given Type exactly equals the receiver
-// type.
-func (t Type) Equals(other Type) bool {
- return t.typeImpl.Equals(other)
-}
-
-// FriendlyName returns a human-friendly *English* name for the given type.
-func (t Type) FriendlyName() string {
- return t.typeImpl.FriendlyName(friendlyTypeName)
-}
-
-// FriendlyNameForConstraint is similar to FriendlyName except that the
-// result is specialized for describing type _constraints_ rather than types
-// themselves. This is more appropriate when reporting that a particular value
-// does not conform to an expected type constraint.
-//
-// In particular, this function uses the term "any type" to refer to
-// cty.DynamicPseudoType, rather than "dynamic" as returned by FriendlyName.
-func (t Type) FriendlyNameForConstraint() string {
- return t.typeImpl.FriendlyName(friendlyTypeConstraintName)
-}
-
-// friendlyNameMode is an internal combination of the various FriendlyName*
-// variants that just directly takes a mode, for easy passthrough for
-// recursive name construction.
-func (t Type) friendlyNameMode(mode friendlyTypeNameMode) string {
- return t.typeImpl.FriendlyName(mode)
-}
-
-// GoString returns a string approximating how the receiver type would be
-// expressed in Go source code.
-func (t Type) GoString() string {
- if t.typeImpl == nil {
- return "cty.NilType"
- }
-
- return t.typeImpl.GoString()
-}
-
-// NilType is an invalid type used when a function is returning an error
-// and has no useful type to return. It should not be used and any methods
-// called on it will panic.
-var NilType = Type{}
-
-// HasDynamicTypes returns true either if the receiver is itself
-// DynamicPseudoType or if it is a compound type whose descendent elements
-// are DynamicPseudoType.
-func (t Type) HasDynamicTypes() bool {
- switch {
- case t == DynamicPseudoType:
- return true
- case t.IsPrimitiveType():
- return false
- case t.IsCollectionType():
- return false
- case t.IsObjectType():
- attrTypes := t.AttributeTypes()
- for _, at := range attrTypes {
- if at.HasDynamicTypes() {
- return true
- }
- }
- return false
- case t.IsTupleType():
- elemTypes := t.TupleElementTypes()
- for _, et := range elemTypes {
- if et.HasDynamicTypes() {
- return true
- }
- }
- return false
- case t.IsCapsuleType():
- return false
- default:
- // Should never happen, since above should be exhaustive
- panic("HasDynamicTypes does not support the given type")
- }
-}
-
-type friendlyTypeNameMode rune
-
-const (
- friendlyTypeName friendlyTypeNameMode = 'N'
- friendlyTypeConstraintName friendlyTypeNameMode = 'C'
-)
diff --git a/vendor/github.com/hashicorp/go-cty/cty/type_conform.go b/vendor/github.com/hashicorp/go-cty/cty/type_conform.go
deleted file mode 100644
index 476eeea8..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/type_conform.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package cty
-
-// TestConformance recursively walks the receiver and the given other type and
-// returns nil if the receiver *conforms* to the given type.
-//
-// Type conformance is similar to type equality but has one crucial difference:
-// PseudoTypeDynamic can be used within the given type to represent that
-// *any* type is allowed.
-//
-// If any non-conformities are found, the returned slice will be non-nil and
-// contain at least one error value. It will be nil if the type is entirely
-// conformant.
-//
-// Note that the special behavior of PseudoTypeDynamic is the *only* exception
-// to normal type equality. Calling applications may wish to apply their own
-// automatic conversion logic to the given data structure to create a more
-// liberal notion of conformance to a type.
-//
-// Returned errors are usually (but not always) PathError instances that
-// indicate where in the structure the error was found. If a returned error
-// is of that type then the error message is written for (English-speaking)
-// end-users working within the cty type system, not mentioning any Go-oriented
-// implementation details.
-func (t Type) TestConformance(other Type) []error {
- path := make(Path, 0)
- var errs []error
- testConformance(t, other, path, &errs)
- return errs
-}
-
-func testConformance(given Type, want Type, path Path, errs *[]error) {
- if want.Equals(DynamicPseudoType) {
- // anything goes!
- return
- }
-
- if given.Equals(want) {
- // Any equal types are always conformant
- return
- }
-
- // The remainder of this function is concerned with detecting
- // and reporting the specific non-conformance, since we wouldn't
- // have got here if the types were not divergent.
- // We treat compound structures as special so that we can report
- // specifically what is non-conforming, rather than simply returning
- // the entire type names and letting the user puzzle it out.
-
- if given.IsObjectType() && want.IsObjectType() {
- givenAttrs := given.AttributeTypes()
- wantAttrs := want.AttributeTypes()
-
- for k := range givenAttrs {
- if _, exists := wantAttrs[k]; !exists {
- *errs = append(
- *errs,
- errorf(path, "unsupported attribute %q", k),
- )
- }
- }
- for k := range wantAttrs {
- if _, exists := givenAttrs[k]; !exists {
- *errs = append(
- *errs,
- errorf(path, "missing required attribute %q", k),
- )
- }
- }
-
- path = append(path, nil)
- pathIdx := len(path) - 1
-
- for k, wantAttrType := range wantAttrs {
- if givenAttrType, exists := givenAttrs[k]; exists {
- path[pathIdx] = GetAttrStep{Name: k}
- testConformance(givenAttrType, wantAttrType, path, errs)
- }
- }
-
- path = path[0:pathIdx]
-
- return
- }
-
- if given.IsTupleType() && want.IsTupleType() {
- givenElems := given.TupleElementTypes()
- wantElems := want.TupleElementTypes()
-
- if len(givenElems) != len(wantElems) {
- *errs = append(
- *errs,
- errorf(path, "%d elements are required, but got %d", len(wantElems), len(givenElems)),
- )
- return
- }
-
- path = append(path, nil)
- pathIdx := len(path) - 1
-
- for i, wantElemType := range wantElems {
- givenElemType := givenElems[i]
- path[pathIdx] = IndexStep{Key: NumberIntVal(int64(i))}
- testConformance(givenElemType, wantElemType, path, errs)
- }
-
- path = path[0:pathIdx]
-
- return
- }
-
- if given.IsListType() && want.IsListType() {
- path = append(path, IndexStep{Key: UnknownVal(Number)})
- pathIdx := len(path) - 1
- testConformance(given.ElementType(), want.ElementType(), path, errs)
- path = path[0:pathIdx]
- return
- }
-
- if given.IsMapType() && want.IsMapType() {
- path = append(path, IndexStep{Key: UnknownVal(String)})
- pathIdx := len(path) - 1
- testConformance(given.ElementType(), want.ElementType(), path, errs)
- path = path[0:pathIdx]
- return
- }
-
- if given.IsSetType() && want.IsSetType() {
- path = append(path, IndexStep{Key: UnknownVal(given.ElementType())})
- pathIdx := len(path) - 1
- testConformance(given.ElementType(), want.ElementType(), path, errs)
- path = path[0:pathIdx]
- return
- }
-
- *errs = append(
- *errs,
- errorf(path, "%s required, but received %s", want.FriendlyName(), given.FriendlyName()),
- )
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/types_to_register.go b/vendor/github.com/hashicorp/go-cty/cty/types_to_register.go
deleted file mode 100644
index ec05bb18..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/types_to_register.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package cty
-
-import (
- "encoding/gob"
- "fmt"
- "math/big"
- "strings"
-
- "github.com/hashicorp/go-cty/cty/set"
-)
-
-// InternalTypesToRegister is a slice of values that covers all of the
-// internal types used in the representation of cty.Type and cty.Value
-// across all cty Types.
-//
-// This is intended to be used to register these types with encoding
-// packages that require registration of types used in interfaces, such as
-// encoding/gob, thus allowing cty types and values to be included in streams
-// created from those packages. However, registering with gob is not necessary
-// since that is done automatically as a side-effect of importing this package.
-//
-// Callers should not do anything with the values here except pass them on
-// verbatim to a registration function.
-//
-// If the calling application uses Capsule types that wrap local structs either
-// directly or indirectly, these structs may also need to be registered in
-// order to support encoding and decoding of values of these types. That is the
-// responsibility of the calling application.
-var InternalTypesToRegister []interface{}
-
-func init() {
- InternalTypesToRegister = []interface{}{
- primitiveType{},
- typeList{},
- typeMap{},
- typeObject{},
- typeSet{},
- setRules{},
- set.Set{},
- typeTuple{},
- big.Float{},
- capsuleType{},
- []interface{}(nil),
- map[string]interface{}(nil),
- }
-
- // Register these with gob here, rather than in gob.go, to ensure
- // that this will always happen after we build the above.
- for _, tv := range InternalTypesToRegister {
- typeName := fmt.Sprintf("%T", tv)
- if strings.HasPrefix(typeName, "cty.") {
- gob.RegisterName(fmt.Sprintf("github.com/hashicorp/go-cty/%s", typeName), tv)
- } else {
- gob.Register(tv)
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/unknown.go b/vendor/github.com/hashicorp/go-cty/cty/unknown.go
deleted file mode 100644
index e54179eb..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/unknown.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package cty
-
-// unknownType is the placeholder type used for the sigil value representing
-// "Unknown", to make it unambigiously distinct from any other possible value.
-type unknownType struct {
-}
-
-// Unknown is a special value that can be
-var unknown interface{} = &unknownType{}
-
-// UnknownVal returns an Value that represents an unknown value of the given
-// type. Unknown values can be used to represent a value that is
-// not yet known. Its meaning is undefined in cty, but it could be used by
-// an calling application to allow partial evaluation.
-//
-// Unknown values of any type can be created of any type. All operations on
-// Unknown values themselves return Unknown.
-func UnknownVal(t Type) Value {
- return Value{
- ty: t,
- v: unknown,
- }
-}
-
-func (t unknownType) GoString() string {
- // This is the stringification of our internal unknown marker. The
- // stringification of the public representation of unknowns is in
- // Value.GoString.
- return "cty.unknown"
-}
-
-type pseudoTypeDynamic struct {
- typeImplSigil
-}
-
-// DynamicPseudoType represents the dynamic pseudo-type.
-//
-// This type can represent situations where a type is not yet known. Its
-// meaning is undefined in cty, but it could be used by a calling
-// application to allow expression type checking with some types not yet known.
-// For example, the application might optimistically permit any operation on
-// values of this type in type checking, allowing a partial type-check result,
-// and then repeat the check when more information is known to get the
-// final, concrete type.
-//
-// It is a pseudo-type because it is used only as a sigil to the calling
-// application. "Unknown" is the only valid value of this pseudo-type, so
-// operations on values of this type will always short-circuit as per
-// the rules for that special value.
-var DynamicPseudoType Type
-
-func (t pseudoTypeDynamic) Equals(other Type) bool {
- _, ok := other.typeImpl.(pseudoTypeDynamic)
- return ok
-}
-
-func (t pseudoTypeDynamic) FriendlyName(mode friendlyTypeNameMode) string {
- switch mode {
- case friendlyTypeConstraintName:
- return "any type"
- default:
- return "dynamic"
- }
-}
-
-func (t pseudoTypeDynamic) GoString() string {
- return "cty.DynamicPseudoType"
-}
-
-// DynamicVal is the only valid value of the pseudo-type dynamic.
-// This value can be used as a placeholder where a value or expression's
-// type and value are both unknown, thus allowing partial evaluation. See
-// the docs for DynamicPseudoType for more information.
-var DynamicVal Value
-
-func init() {
- DynamicPseudoType = Type{
- pseudoTypeDynamic{},
- }
- DynamicVal = Value{
- ty: DynamicPseudoType,
- v: unknown,
- }
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/unknown_as_null.go b/vendor/github.com/hashicorp/go-cty/cty/unknown_as_null.go
deleted file mode 100644
index ba926475..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/unknown_as_null.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package cty
-
-// UnknownAsNull returns a value of the same type as the given value but
-// with any unknown values (including nested values) replaced with null
-// values of the same type.
-//
-// This can be useful if a result is to be serialized in a format that can't
-// represent unknowns, such as JSON, as long as the caller does not need to
-// retain the unknown value information.
-func UnknownAsNull(val Value) Value {
- ty := val.Type()
- switch {
- case val.IsNull():
- return val
- case !val.IsKnown():
- return NullVal(ty)
- case ty.IsListType() || ty.IsTupleType() || ty.IsSetType():
- length := val.LengthInt()
- if length == 0 {
- // If there are no elements then we can't have unknowns
- return val
- }
- vals := make([]Value, 0, length)
- it := val.ElementIterator()
- for it.Next() {
- _, v := it.Element()
- vals = append(vals, UnknownAsNull(v))
- }
- switch {
- case ty.IsListType():
- return ListVal(vals)
- case ty.IsTupleType():
- return TupleVal(vals)
- default:
- return SetVal(vals)
- }
- case ty.IsMapType() || ty.IsObjectType():
- var length int
- switch {
- case ty.IsMapType():
- length = val.LengthInt()
- default:
- length = len(val.Type().AttributeTypes())
- }
- if length == 0 {
- // If there are no elements then we can't have unknowns
- return val
- }
- vals := make(map[string]Value, length)
- it := val.ElementIterator()
- for it.Next() {
- k, v := it.Element()
- vals[k.AsString()] = UnknownAsNull(v)
- }
- switch {
- case ty.IsMapType():
- return MapVal(vals)
- default:
- return ObjectVal(vals)
- }
- }
-
- return val
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/value.go b/vendor/github.com/hashicorp/go-cty/cty/value.go
deleted file mode 100644
index 1025ba82..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/value.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package cty
-
-// Value represents a value of a particular type, and is the interface by
-// which operations are executed on typed values.
-//
-// Value has two different classes of method. Operation methods stay entirely
-// within the type system (methods accept and return Value instances) and
-// are intended for use in implementing a language in terms of cty, while
-// integration methods either enter or leave the type system, working with
-// native Go values. Operation methods are guaranteed to support all of the
-// expected short-circuit behavior for unknown and dynamic values, while
-// integration methods may not.
-//
-// The philosophy for the operations API is that it's the caller's
-// responsibility to ensure that the given types and values satisfy the
-// specified invariants during a separate type check, so that the caller is
-// able to return errors to its user from the application's own perspective.
-//
-// Consequently the design of these methods assumes such checks have already
-// been done and panics if any invariants turn out not to be satisfied. These
-// panic errors are not intended to be handled, but rather indicate a bug in
-// the calling application that should be fixed with more checks prior to
-// executing operations.
-//
-// A related consequence of this philosophy is that no automatic type
-// conversions are done. If a method specifies that its argument must be
-// number then it's the caller's responsibility to do that conversion before
-// the call, thus allowing the application to have more constrained conversion
-// rules than are offered by the built-in converter where necessary.
-type Value struct {
- ty Type
- v interface{}
-}
-
-// Type returns the type of the value.
-func (val Value) Type() Type {
- return val.ty
-}
-
-// IsKnown returns true if the value is known. That is, if it is not
-// the result of the unknown value constructor Unknown(...), and is not
-// the result of an operation on another unknown value.
-//
-// Unknown values are only produced either directly or as a result of
-// operating on other unknown values, and so an application that never
-// introduces Unknown values can be guaranteed to never receive any either.
-func (val Value) IsKnown() bool {
- if val.IsMarked() {
- return val.unmarkForce().IsKnown()
- }
- return val.v != unknown
-}
-
-// IsNull returns true if the value is null. Values of any type can be
-// null, but any operations on a null value will panic. No operation ever
-// produces null, so an application that never introduces Null values can
-// be guaranteed to never receive any either.
-func (val Value) IsNull() bool {
- if val.IsMarked() {
- return val.unmarkForce().IsNull()
- }
- return val.v == nil
-}
-
-// NilVal is an invalid Value that can be used as a placeholder when returning
-// with an error from a function that returns (Value, error).
-//
-// NilVal is *not* a valid error and so no operations may be performed on it.
-// Any attempt to use it will result in a panic.
-//
-// This should not be confused with the idea of a Null value, as returned by
-// NullVal. NilVal is a nil within the *Go* type system, and is invalid in
-// the cty type system. Null values *do* exist in the cty type system.
-var NilVal = Value{
- ty: Type{typeImpl: nil},
- v: nil,
-}
-
-// IsWhollyKnown is an extension of IsKnown that also recursively checks
-// inside collections and structures to see if there are any nested unknown
-// values.
-func (val Value) IsWhollyKnown() bool {
- if val.IsMarked() {
- return val.unmarkForce().IsWhollyKnown()
- }
-
- if !val.IsKnown() {
- return false
- }
-
- if val.IsNull() {
- // Can't recurse into a null, so we're done
- return true
- }
-
- switch {
- case val.CanIterateElements():
- for it := val.ElementIterator(); it.Next(); {
- _, ev := it.Element()
- if !ev.IsWhollyKnown() {
- return false
- }
- }
- return true
- default:
- return true
- }
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/value_init.go b/vendor/github.com/hashicorp/go-cty/cty/value_init.go
deleted file mode 100644
index 853a5a7d..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/value_init.go
+++ /dev/null
@@ -1,324 +0,0 @@
-package cty
-
-import (
- "fmt"
- "math/big"
- "reflect"
-
- "golang.org/x/text/unicode/norm"
-
- "github.com/hashicorp/go-cty/cty/set"
-)
-
-// BoolVal returns a Value of type Number whose internal value is the given
-// bool.
-func BoolVal(v bool) Value {
- return Value{
- ty: Bool,
- v: v,
- }
-}
-
-// NumberVal returns a Value of type Number whose internal value is the given
-// big.Float. The returned value becomes the owner of the big.Float object,
-// and so it's forbidden for the caller to mutate the object after it's
-// wrapped in this way.
-func NumberVal(v *big.Float) Value {
- return Value{
- ty: Number,
- v: v,
- }
-}
-
-// ParseNumberVal returns a Value of type number produced by parsing the given
-// string as a decimal real number. To ensure that two identical strings will
-// always produce an equal number, always use this function to derive a number
-// from a string; it will ensure that the precision and rounding mode for the
-// internal big decimal is configured in a consistent way.
-//
-// If the given string cannot be parsed as a number, the returned error has
-// the message "a number is required", making it suitable to return to an
-// end-user to signal a type conversion error.
-//
-// If the given string contains a number that becomes a recurring fraction
-// when expressed in binary then it will be truncated to have a 512-bit
-// mantissa. Note that this is a higher precision than that of a float64,
-// so coverting the same decimal number first to float64 and then calling
-// NumberFloatVal will not produce an equal result; the conversion first
-// to float64 will round the mantissa to fewer than 512 bits.
-func ParseNumberVal(s string) (Value, error) {
- // Base 10, precision 512, and rounding to nearest even is the standard
- // way to handle numbers arriving as strings.
- f, _, err := big.ParseFloat(s, 10, 512, big.ToNearestEven)
- if err != nil {
- return NilVal, fmt.Errorf("a number is required")
- }
- return NumberVal(f), nil
-}
-
-// MustParseNumberVal is like ParseNumberVal but it will panic in case of any
-// error. It can be used during initialization or any other situation where
-// the given string is a constant or otherwise known to be correct by the
-// caller.
-func MustParseNumberVal(s string) Value {
- ret, err := ParseNumberVal(s)
- if err != nil {
- panic(err)
- }
- return ret
-}
-
-// NumberIntVal returns a Value of type Number whose internal value is equal
-// to the given integer.
-func NumberIntVal(v int64) Value {
- return NumberVal(new(big.Float).SetInt64(v))
-}
-
-// NumberUIntVal returns a Value of type Number whose internal value is equal
-// to the given unsigned integer.
-func NumberUIntVal(v uint64) Value {
- return NumberVal(new(big.Float).SetUint64(v))
-}
-
-// NumberFloatVal returns a Value of type Number whose internal value is
-// equal to the given float.
-func NumberFloatVal(v float64) Value {
- return NumberVal(new(big.Float).SetFloat64(v))
-}
-
-// StringVal returns a Value of type String whose internal value is the
-// given string.
-//
-// Strings must be UTF-8 encoded sequences of valid unicode codepoints, and
-// they are NFC-normalized on entry into the world of cty values.
-//
-// If the given string is not valid UTF-8 then behavior of string operations
-// is undefined.
-func StringVal(v string) Value {
- return Value{
- ty: String,
- v: NormalizeString(v),
- }
-}
-
-// NormalizeString applies the same normalization that cty applies when
-// constructing string values.
-//
-// A return value from this function can be meaningfully compared byte-for-byte
-// with a Value.AsString result.
-func NormalizeString(s string) string {
- return norm.NFC.String(s)
-}
-
-// ObjectVal returns a Value of an object type whose structure is defined
-// by the key names and value types in the given map.
-func ObjectVal(attrs map[string]Value) Value {
- attrTypes := make(map[string]Type, len(attrs))
- attrVals := make(map[string]interface{}, len(attrs))
-
- for attr, val := range attrs {
- attr = NormalizeString(attr)
- attrTypes[attr] = val.ty
- attrVals[attr] = val.v
- }
-
- return Value{
- ty: Object(attrTypes),
- v: attrVals,
- }
-}
-
-// TupleVal returns a Value of a tuple type whose element types are
-// defined by the value types in the given slice.
-func TupleVal(elems []Value) Value {
- elemTypes := make([]Type, len(elems))
- elemVals := make([]interface{}, len(elems))
-
- for i, val := range elems {
- elemTypes[i] = val.ty
- elemVals[i] = val.v
- }
-
- return Value{
- ty: Tuple(elemTypes),
- v: elemVals,
- }
-}
-
-// ListVal returns a Value of list type whose element type is defined by
-// the types of the given values, which must be homogenous.
-//
-// If the types are not all consistent (aside from elements that are of the
-// dynamic pseudo-type) then this function will panic. It will panic also
-// if the given list is empty, since then the element type cannot be inferred.
-// (See also ListValEmpty.)
-func ListVal(vals []Value) Value {
- if len(vals) == 0 {
- panic("must not call ListVal with empty slice")
- }
- elementType := DynamicPseudoType
- rawList := make([]interface{}, len(vals))
-
- for i, val := range vals {
- if elementType == DynamicPseudoType {
- elementType = val.ty
- } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) {
- panic(fmt.Errorf(
- "inconsistent list element types (%#v then %#v)",
- elementType, val.ty,
- ))
- }
-
- rawList[i] = val.v
- }
-
- return Value{
- ty: List(elementType),
- v: rawList,
- }
-}
-
-// ListValEmpty returns an empty list of the given element type.
-func ListValEmpty(element Type) Value {
- return Value{
- ty: List(element),
- v: []interface{}{},
- }
-}
-
-// MapVal returns a Value of a map type whose element type is defined by
-// the types of the given values, which must be homogenous.
-//
-// If the types are not all consistent (aside from elements that are of the
-// dynamic pseudo-type) then this function will panic. It will panic also
-// if the given map is empty, since then the element type cannot be inferred.
-// (See also MapValEmpty.)
-func MapVal(vals map[string]Value) Value {
- if len(vals) == 0 {
- panic("must not call MapVal with empty map")
- }
- elementType := DynamicPseudoType
- rawMap := make(map[string]interface{}, len(vals))
-
- for key, val := range vals {
- if elementType == DynamicPseudoType {
- elementType = val.ty
- } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) {
- panic(fmt.Errorf(
- "inconsistent map element types (%#v then %#v)",
- elementType, val.ty,
- ))
- }
-
- rawMap[NormalizeString(key)] = val.v
- }
-
- return Value{
- ty: Map(elementType),
- v: rawMap,
- }
-}
-
-// MapValEmpty returns an empty map of the given element type.
-func MapValEmpty(element Type) Value {
- return Value{
- ty: Map(element),
- v: map[string]interface{}{},
- }
-}
-
-// SetVal returns a Value of set type whose element type is defined by
-// the types of the given values, which must be homogenous.
-//
-// If the types are not all consistent (aside from elements that are of the
-// dynamic pseudo-type) then this function will panic. It will panic also
-// if the given list is empty, since then the element type cannot be inferred.
-// (See also SetValEmpty.)
-func SetVal(vals []Value) Value {
- if len(vals) == 0 {
- panic("must not call SetVal with empty slice")
- }
- elementType := DynamicPseudoType
- rawList := make([]interface{}, len(vals))
- var markSets []ValueMarks
-
- for i, val := range vals {
- if unmarkedVal, marks := val.UnmarkDeep(); len(marks) > 0 {
- val = unmarkedVal
- markSets = append(markSets, marks)
- }
- if val.ContainsMarked() {
- // FIXME: Allow this, but unmark the values and apply the
- // marking to the set itself instead.
- panic("set cannot contain marked values")
- }
- if elementType == DynamicPseudoType {
- elementType = val.ty
- } else if val.ty != DynamicPseudoType && !elementType.Equals(val.ty) {
- panic(fmt.Errorf(
- "inconsistent set element types (%#v then %#v)",
- elementType, val.ty,
- ))
- }
-
- rawList[i] = val.v
- }
-
- rawVal := set.NewSetFromSlice(setRules{elementType}, rawList)
-
- return Value{
- ty: Set(elementType),
- v: rawVal,
- }.WithMarks(markSets...)
-}
-
-// SetValFromValueSet returns a Value of set type based on an already-constructed
-// ValueSet.
-//
-// The element type of the returned value is the element type of the given
-// set.
-func SetValFromValueSet(s ValueSet) Value {
- ety := s.ElementType()
- rawVal := s.s.Copy() // copy so caller can't mutate what we wrap
-
- return Value{
- ty: Set(ety),
- v: rawVal,
- }
-}
-
-// SetValEmpty returns an empty set of the given element type.
-func SetValEmpty(element Type) Value {
- return Value{
- ty: Set(element),
- v: set.NewSet(setRules{element}),
- }
-}
-
-// CapsuleVal creates a value of the given capsule type using the given
-// wrapVal, which must be a pointer to a value of the capsule type's native
-// type.
-//
-// This function will panic if the given type is not a capsule type, if
-// the given wrapVal is not compatible with the given capsule type, or if
-// wrapVal is not a pointer.
-func CapsuleVal(ty Type, wrapVal interface{}) Value {
- if !ty.IsCapsuleType() {
- panic("not a capsule type")
- }
-
- wv := reflect.ValueOf(wrapVal)
- if wv.Kind() != reflect.Ptr {
- panic("wrapVal is not a pointer")
- }
-
- it := ty.typeImpl.(*capsuleType).GoType
- if !wv.Type().Elem().AssignableTo(it) {
- panic("wrapVal target is not compatible with the given capsule type")
- }
-
- return Value{
- ty: ty,
- v: wrapVal,
- }
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/value_ops.go b/vendor/github.com/hashicorp/go-cty/cty/value_ops.go
deleted file mode 100644
index 69e5a8ab..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/value_ops.go
+++ /dev/null
@@ -1,1290 +0,0 @@
-package cty
-
-import (
- "fmt"
- "math/big"
- "reflect"
-
- "github.com/hashicorp/go-cty/cty/set"
-)
-
-// GoString is an implementation of fmt.GoStringer that produces concise
-// source-like representations of values suitable for use in debug messages.
-func (val Value) GoString() string {
- if val.IsMarked() {
- unVal, marks := val.Unmark()
- if len(marks) == 1 {
- var mark interface{}
- for m := range marks {
- mark = m
- }
- return fmt.Sprintf("%#v.Mark(%#v)", unVal, mark)
- }
- return fmt.Sprintf("%#v.WithMarks(%#v)", unVal, marks)
- }
-
- if val == NilVal {
- return "cty.NilVal"
- }
-
- if val.IsNull() {
- return fmt.Sprintf("cty.NullVal(%#v)", val.ty)
- }
- if val == DynamicVal { // is unknown, so must be before the IsKnown check below
- return "cty.DynamicVal"
- }
- if !val.IsKnown() {
- return fmt.Sprintf("cty.UnknownVal(%#v)", val.ty)
- }
-
- // By the time we reach here we've dealt with all of the exceptions around
- // unknowns and nulls, so we're guaranteed that the values are the
- // canonical internal representation of the given type.
-
- switch val.ty {
- case Bool:
- if val.v.(bool) {
- return "cty.True"
- }
- return "cty.False"
- case Number:
- fv := val.v.(*big.Float)
- // We'll try to use NumberIntVal or NumberFloatVal if we can, since
- // the fully-general initializer call is pretty ugly-looking.
- if fv.IsInt() {
- return fmt.Sprintf("cty.NumberIntVal(%#v)", fv)
- }
- if rfv, accuracy := fv.Float64(); accuracy == big.Exact {
- return fmt.Sprintf("cty.NumberFloatVal(%#v)", rfv)
- }
- return fmt.Sprintf("cty.MustParseNumberVal(%q)", fv.Text('f', -1))
- case String:
- return fmt.Sprintf("cty.StringVal(%#v)", val.v)
- }
-
- switch {
- case val.ty.IsSetType():
- vals := val.AsValueSlice()
- if len(vals) == 0 {
- return fmt.Sprintf("cty.SetValEmpty(%#v)", val.ty.ElementType())
- }
- return fmt.Sprintf("cty.SetVal(%#v)", vals)
- case val.ty.IsListType():
- vals := val.AsValueSlice()
- if len(vals) == 0 {
- return fmt.Sprintf("cty.ListValEmpty(%#v)", val.ty.ElementType())
- }
- return fmt.Sprintf("cty.ListVal(%#v)", vals)
- case val.ty.IsMapType():
- vals := val.AsValueMap()
- if len(vals) == 0 {
- return fmt.Sprintf("cty.MapValEmpty(%#v)", val.ty.ElementType())
- }
- return fmt.Sprintf("cty.MapVal(%#v)", vals)
- case val.ty.IsTupleType():
- if val.ty.Equals(EmptyTuple) {
- return "cty.EmptyTupleVal"
- }
- vals := val.AsValueSlice()
- return fmt.Sprintf("cty.TupleVal(%#v)", vals)
- case val.ty.IsObjectType():
- if val.ty.Equals(EmptyObject) {
- return "cty.EmptyObjectVal"
- }
- vals := val.AsValueMap()
- return fmt.Sprintf("cty.ObjectVal(%#v)", vals)
- case val.ty.IsCapsuleType():
- impl := val.ty.CapsuleOps().GoString
- if impl == nil {
- return fmt.Sprintf("cty.CapsuleVal(%#v, %#v)", val.ty, val.v)
- }
- return impl(val.EncapsulatedValue())
- }
-
- // Default exposes implementation details, so should actually cover
- // all of the cases above for good caller UX.
- return fmt.Sprintf("cty.Value{ty: %#v, v: %#v}", val.ty, val.v)
-}
-
-// Equals returns True if the receiver and the given other value have the
-// same type and are exactly equal in value.
-//
-// As a special case, two null values are always equal regardless of type.
-//
-// The usual short-circuit rules apply, so the result will be unknown if
-// either of the given values are.
-//
-// Use RawEquals to compare if two values are equal *ignoring* the
-// short-circuit rules and the exception for null values.
-func (val Value) Equals(other Value) Value {
- if val.IsMarked() || other.IsMarked() {
- val, valMarks := val.Unmark()
- other, otherMarks := other.Unmark()
- return val.Equals(other).WithMarks(valMarks, otherMarks)
- }
-
- // Start by handling Unknown values before considering types.
- // This needs to be done since Null values are always equal regardless of
- // type.
- switch {
- case !val.IsKnown() && !other.IsKnown():
- // both unknown
- return UnknownVal(Bool)
- case val.IsKnown() && !other.IsKnown():
- switch {
- case val.IsNull(), other.ty.HasDynamicTypes():
- // If known is Null, we need to wait for the unkown value since
- // nulls of any type are equal.
- // An unkown with a dynamic type compares as unknown, which we need
- // to check before the type comparison below.
- return UnknownVal(Bool)
- case !val.ty.Equals(other.ty):
- // There is no null comparison or dynamic types, so unequal types
- // will never be equal.
- return False
- default:
- return UnknownVal(Bool)
- }
- case other.IsKnown() && !val.IsKnown():
- switch {
- case other.IsNull(), val.ty.HasDynamicTypes():
- // If known is Null, we need to wait for the unkown value since
- // nulls of any type are equal.
- // An unkown with a dynamic type compares as unknown, which we need
- // to check before the type comparison below.
- return UnknownVal(Bool)
- case !other.ty.Equals(val.ty):
- // There's no null comparison or dynamic types, so unequal types
- // will never be equal.
- return False
- default:
- return UnknownVal(Bool)
- }
- }
-
- switch {
- case val.IsNull() && other.IsNull():
- // Nulls are always equal, regardless of type
- return BoolVal(true)
- case val.IsNull() || other.IsNull():
- // If only one is null then the result must be false
- return BoolVal(false)
- }
-
- if val.ty.HasDynamicTypes() || other.ty.HasDynamicTypes() {
- return UnknownVal(Bool)
- }
-
- if !val.ty.Equals(other.ty) {
- return BoolVal(false)
- }
-
- ty := val.ty
- result := false
-
- switch {
- case ty == Number:
- result = val.v.(*big.Float).Cmp(other.v.(*big.Float)) == 0
- case ty == Bool:
- result = val.v.(bool) == other.v.(bool)
- case ty == String:
- // Simple equality is safe because we NFC-normalize strings as they
- // enter our world from StringVal, and so we can assume strings are
- // always in normal form.
- result = val.v.(string) == other.v.(string)
- case ty.IsObjectType():
- oty := ty.typeImpl.(typeObject)
- result = true
- for attr, aty := range oty.AttrTypes {
- lhs := Value{
- ty: aty,
- v: val.v.(map[string]interface{})[attr],
- }
- rhs := Value{
- ty: aty,
- v: other.v.(map[string]interface{})[attr],
- }
- eq := lhs.Equals(rhs)
- if !eq.IsKnown() {
- return UnknownVal(Bool)
- }
- if eq.False() {
- result = false
- break
- }
- }
- case ty.IsTupleType():
- tty := ty.typeImpl.(typeTuple)
- result = true
- for i, ety := range tty.ElemTypes {
- lhs := Value{
- ty: ety,
- v: val.v.([]interface{})[i],
- }
- rhs := Value{
- ty: ety,
- v: other.v.([]interface{})[i],
- }
- eq := lhs.Equals(rhs)
- if !eq.IsKnown() {
- return UnknownVal(Bool)
- }
- if eq.False() {
- result = false
- break
- }
- }
- case ty.IsListType():
- ety := ty.typeImpl.(typeList).ElementTypeT
- if len(val.v.([]interface{})) == len(other.v.([]interface{})) {
- result = true
- for i := range val.v.([]interface{}) {
- lhs := Value{
- ty: ety,
- v: val.v.([]interface{})[i],
- }
- rhs := Value{
- ty: ety,
- v: other.v.([]interface{})[i],
- }
- eq := lhs.Equals(rhs)
- if !eq.IsKnown() {
- return UnknownVal(Bool)
- }
- if eq.False() {
- result = false
- break
- }
- }
- }
- case ty.IsSetType():
- s1 := val.v.(set.Set)
- s2 := other.v.(set.Set)
- equal := true
-
- // Note that by our definition of sets it's never possible for two
- // sets that contain unknown values (directly or indicrectly) to
- // ever be equal, even if they are otherwise identical.
-
- // FIXME: iterating both lists and checking each item is not the
- // ideal implementation here, but it works with the primitives we
- // have in the set implementation. Perhaps the set implementation
- // can provide its own equality test later.
- s1.EachValue(func(v interface{}) {
- if !s2.Has(v) {
- equal = false
- }
- })
- s2.EachValue(func(v interface{}) {
- if !s1.Has(v) {
- equal = false
- }
- })
-
- result = equal
- case ty.IsMapType():
- ety := ty.typeImpl.(typeMap).ElementTypeT
- if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) {
- result = true
- for k := range val.v.(map[string]interface{}) {
- if _, ok := other.v.(map[string]interface{})[k]; !ok {
- result = false
- break
- }
- lhs := Value{
- ty: ety,
- v: val.v.(map[string]interface{})[k],
- }
- rhs := Value{
- ty: ety,
- v: other.v.(map[string]interface{})[k],
- }
- eq := lhs.Equals(rhs)
- if !eq.IsKnown() {
- return UnknownVal(Bool)
- }
- if eq.False() {
- result = false
- break
- }
- }
- }
- case ty.IsCapsuleType():
- impl := val.ty.CapsuleOps().Equals
- if impl == nil {
- impl := val.ty.CapsuleOps().RawEquals
- if impl == nil {
- // A capsule type's encapsulated value is a pointer to a value of its
- // native type, so we can just compare these to get the identity test
- // we need.
- return BoolVal(val.v == other.v)
- }
- return BoolVal(impl(val.v, other.v))
- }
- ret := impl(val.v, other.v)
- if !ret.Type().Equals(Bool) {
- panic(fmt.Sprintf("Equals for %#v returned %#v, not cty.Bool", ty, ret.Type()))
- }
- return ret
-
- default:
- // should never happen
- panic(fmt.Errorf("unsupported value type %#v in Equals", ty))
- }
-
- return BoolVal(result)
-}
-
-// NotEqual is a shorthand for Equals followed by Not.
-func (val Value) NotEqual(other Value) Value {
- return val.Equals(other).Not()
-}
-
-// True returns true if the receiver is True, false if False, and panics if
-// the receiver is not of type Bool.
-//
-// This is a helper function to help write application logic that works with
-// values, rather than a first-class operation. It does not work with unknown
-// or null values. For more robust handling with unknown value
-// short-circuiting, use val.Equals(cty.True).
-func (val Value) True() bool {
- val.assertUnmarked()
- if val.ty != Bool {
- panic("not bool")
- }
- return val.Equals(True).v.(bool)
-}
-
-// False is the opposite of True.
-func (val Value) False() bool {
- return !val.True()
-}
-
-// RawEquals returns true if and only if the two given values have the same
-// type and equal value, ignoring the usual short-circuit rules about
-// unknowns and dynamic types.
-//
-// This method is more appropriate for testing than for real use, since it
-// skips over usual semantics around unknowns but as a consequence allows
-// testing the result of another operation that is expected to return unknown.
-// It returns a primitive Go bool rather than a Value to remind us that it
-// is not a first-class value operation.
-func (val Value) RawEquals(other Value) bool {
- if !val.ty.Equals(other.ty) {
- return false
- }
- if !val.HasSameMarks(other) {
- return false
- }
- // Since we've now checked the marks, we'll unmark for the rest of this...
- val = val.unmarkForce()
- other = other.unmarkForce()
-
- if (!val.IsKnown()) && (!other.IsKnown()) {
- return true
- }
- if (val.IsKnown() && !other.IsKnown()) || (other.IsKnown() && !val.IsKnown()) {
- return false
- }
- if val.IsNull() && other.IsNull() {
- return true
- }
- if (val.IsNull() && !other.IsNull()) || (other.IsNull() && !val.IsNull()) {
- return false
- }
- if val.ty == DynamicPseudoType && other.ty == DynamicPseudoType {
- return true
- }
-
- ty := val.ty
- switch {
- case ty == Number || ty == Bool || ty == String || ty == DynamicPseudoType:
- return val.Equals(other).True()
- case ty.IsObjectType():
- oty := ty.typeImpl.(typeObject)
- for attr, aty := range oty.AttrTypes {
- lhs := Value{
- ty: aty,
- v: val.v.(map[string]interface{})[attr],
- }
- rhs := Value{
- ty: aty,
- v: other.v.(map[string]interface{})[attr],
- }
- eq := lhs.RawEquals(rhs)
- if !eq {
- return false
- }
- }
- return true
- case ty.IsTupleType():
- tty := ty.typeImpl.(typeTuple)
- for i, ety := range tty.ElemTypes {
- lhs := Value{
- ty: ety,
- v: val.v.([]interface{})[i],
- }
- rhs := Value{
- ty: ety,
- v: other.v.([]interface{})[i],
- }
- eq := lhs.RawEquals(rhs)
- if !eq {
- return false
- }
- }
- return true
- case ty.IsListType():
- ety := ty.typeImpl.(typeList).ElementTypeT
- if len(val.v.([]interface{})) == len(other.v.([]interface{})) {
- for i := range val.v.([]interface{}) {
- lhs := Value{
- ty: ety,
- v: val.v.([]interface{})[i],
- }
- rhs := Value{
- ty: ety,
- v: other.v.([]interface{})[i],
- }
- eq := lhs.RawEquals(rhs)
- if !eq {
- return false
- }
- }
- return true
- }
- return false
- case ty.IsSetType():
- s1 := val.v.(set.Set)
- s2 := other.v.(set.Set)
-
- // Since we're intentionally ignoring our rule that two unknowns
- // are never equal, we can cheat here.
- // (This isn't 100% right since e.g. it will fail if the set contains
- // numbers that are infinite, which DeepEqual can't compare properly.
- // We're accepting that limitation for simplicity here, since this
- // function is here primarily for testing.)
- return reflect.DeepEqual(s1, s2)
-
- case ty.IsMapType():
- ety := ty.typeImpl.(typeMap).ElementTypeT
- if len(val.v.(map[string]interface{})) == len(other.v.(map[string]interface{})) {
- for k := range val.v.(map[string]interface{}) {
- if _, ok := other.v.(map[string]interface{})[k]; !ok {
- return false
- }
- lhs := Value{
- ty: ety,
- v: val.v.(map[string]interface{})[k],
- }
- rhs := Value{
- ty: ety,
- v: other.v.(map[string]interface{})[k],
- }
- eq := lhs.RawEquals(rhs)
- if !eq {
- return false
- }
- }
- return true
- }
- return false
- case ty.IsCapsuleType():
- impl := val.ty.CapsuleOps().RawEquals
- if impl == nil {
- // A capsule type's encapsulated value is a pointer to a value of its
- // native type, so we can just compare these to get the identity test
- // we need.
- return val.v == other.v
- }
- return impl(val.v, other.v)
-
- default:
- // should never happen
- panic(fmt.Errorf("unsupported value type %#v in RawEquals", ty))
- }
-}
-
-// Add returns the sum of the receiver and the given other value. Both values
-// must be numbers; this method will panic if not.
-func (val Value) Add(other Value) Value {
- if val.IsMarked() || other.IsMarked() {
- val, valMarks := val.Unmark()
- other, otherMarks := other.Unmark()
- return val.Add(other).WithMarks(valMarks, otherMarks)
- }
-
- if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil {
- shortCircuit = forceShortCircuitType(shortCircuit, Number)
- return *shortCircuit
- }
-
- ret := new(big.Float)
- ret.Add(val.v.(*big.Float), other.v.(*big.Float))
- return NumberVal(ret)
-}
-
-// Subtract returns receiver minus the given other value. Both values must be
-// numbers; this method will panic if not.
-func (val Value) Subtract(other Value) Value {
- if val.IsMarked() || other.IsMarked() {
- val, valMarks := val.Unmark()
- other, otherMarks := other.Unmark()
- return val.Subtract(other).WithMarks(valMarks, otherMarks)
- }
-
- if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil {
- shortCircuit = forceShortCircuitType(shortCircuit, Number)
- return *shortCircuit
- }
-
- return val.Add(other.Negate())
-}
-
-// Negate returns the numeric negative of the receiver, which must be a number.
-// This method will panic when given a value of any other type.
-func (val Value) Negate() Value {
- if val.IsMarked() {
- val, valMarks := val.Unmark()
- return val.Negate().WithMarks(valMarks)
- }
-
- if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil {
- shortCircuit = forceShortCircuitType(shortCircuit, Number)
- return *shortCircuit
- }
-
- ret := new(big.Float).Neg(val.v.(*big.Float))
- return NumberVal(ret)
-}
-
-// Multiply returns the product of the receiver and the given other value.
-// Both values must be numbers; this method will panic if not.
-func (val Value) Multiply(other Value) Value {
- if val.IsMarked() || other.IsMarked() {
- val, valMarks := val.Unmark()
- other, otherMarks := other.Unmark()
- return val.Multiply(other).WithMarks(valMarks, otherMarks)
- }
-
- if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil {
- shortCircuit = forceShortCircuitType(shortCircuit, Number)
- return *shortCircuit
- }
-
- ret := new(big.Float)
- ret.Mul(val.v.(*big.Float), other.v.(*big.Float))
- return NumberVal(ret)
-}
-
-// Divide returns the quotient of the receiver and the given other value.
-// Both values must be numbers; this method will panic if not.
-//
-// If the "other" value is exactly zero, this operation will return either
-// PositiveInfinity or NegativeInfinity, depending on the sign of the
-// receiver value. For some use-cases the presence of infinities may be
-// undesirable, in which case the caller should check whether the
-// other value equals zero before calling and raise an error instead.
-//
-// If both values are zero or infinity, this function will panic with
-// an instance of big.ErrNaN.
-func (val Value) Divide(other Value) Value {
- if val.IsMarked() || other.IsMarked() {
- val, valMarks := val.Unmark()
- other, otherMarks := other.Unmark()
- return val.Divide(other).WithMarks(valMarks, otherMarks)
- }
-
- if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil {
- shortCircuit = forceShortCircuitType(shortCircuit, Number)
- return *shortCircuit
- }
-
- ret := new(big.Float)
- ret.Quo(val.v.(*big.Float), other.v.(*big.Float))
- return NumberVal(ret)
-}
-
-// Modulo returns the remainder of an integer division of the receiver and
-// the given other value. Both values must be numbers; this method will panic
-// if not.
-//
-// If the "other" value is exactly zero, this operation will return either
-// PositiveInfinity or NegativeInfinity, depending on the sign of the
-// receiver value. For some use-cases the presence of infinities may be
-// undesirable, in which case the caller should check whether the
-// other value equals zero before calling and raise an error instead.
-//
-// This operation is primarily here for use with nonzero natural numbers.
-// Modulo with "other" as a non-natural number gets somewhat philosophical,
-// and this function takes a position on what that should mean, but callers
-// may wish to disallow such things outright or implement their own modulo
-// if they disagree with the interpretation used here.
-func (val Value) Modulo(other Value) Value {
- if val.IsMarked() || other.IsMarked() {
- val, valMarks := val.Unmark()
- other, otherMarks := other.Unmark()
- return val.Modulo(other).WithMarks(valMarks, otherMarks)
- }
-
- if shortCircuit := mustTypeCheck(Number, Number, val, other); shortCircuit != nil {
- shortCircuit = forceShortCircuitType(shortCircuit, Number)
- return *shortCircuit
- }
-
- // We cheat a bit here with infinities, just abusing the Multiply operation
- // to get an infinite result of the correct sign.
- if val == PositiveInfinity || val == NegativeInfinity || other == PositiveInfinity || other == NegativeInfinity {
- return val.Multiply(other)
- }
-
- if other.RawEquals(Zero) {
- return val
- }
-
- // FIXME: This is a bit clumsy. Should come back later and see if there's a
- // more straightforward way to do this.
- rat := val.Divide(other)
- ratFloorInt := &big.Int{}
- rat.v.(*big.Float).Int(ratFloorInt)
- work := (&big.Float{}).SetInt(ratFloorInt)
- work.Mul(other.v.(*big.Float), work)
- work.Sub(val.v.(*big.Float), work)
-
- return NumberVal(work)
-}
-
-// Absolute returns the absolute (signless) value of the receiver, which must
-// be a number or this method will panic.
-func (val Value) Absolute() Value {
- if val.IsMarked() {
- val, valMarks := val.Unmark()
- return val.Absolute().WithMarks(valMarks)
- }
-
- if shortCircuit := mustTypeCheck(Number, Number, val); shortCircuit != nil {
- shortCircuit = forceShortCircuitType(shortCircuit, Number)
- return *shortCircuit
- }
-
- ret := (&big.Float{}).Abs(val.v.(*big.Float))
- return NumberVal(ret)
-}
-
-// GetAttr returns the value of the given attribute of the receiver, which
-// must be of an object type that has an attribute of the given name.
-// This method will panic if the receiver type is not compatible.
-//
-// The method will also panic if the given attribute name is not defined
-// for the value's type. Use the attribute-related methods on Type to
-// check for the validity of an attribute before trying to use it.
-//
-// This method may be called on a value whose type is DynamicPseudoType,
-// in which case the result will also be DynamicVal.
-func (val Value) GetAttr(name string) Value {
- if val.IsMarked() {
- val, valMarks := val.Unmark()
- return val.GetAttr(name).WithMarks(valMarks)
- }
-
- if val.ty == DynamicPseudoType {
- return DynamicVal
- }
-
- if !val.ty.IsObjectType() {
- panic("value is not an object")
- }
-
- name = NormalizeString(name)
- if !val.ty.HasAttribute(name) {
- panic("value has no attribute of that name")
- }
-
- attrType := val.ty.AttributeType(name)
-
- if !val.IsKnown() {
- return UnknownVal(attrType)
- }
-
- return Value{
- ty: attrType,
- v: val.v.(map[string]interface{})[name],
- }
-}
-
-// Index returns the value of an element of the receiver, which must have
-// either a list, map or tuple type. This method will panic if the receiver
-// type is not compatible.
-//
-// The key value must be the correct type for the receving collection: a
-// number if the collection is a list or tuple, or a string if it is a map.
-// In the case of a list or tuple, the given number must be convertable to int
-// or this method will panic. The key may alternatively be of
-// DynamicPseudoType, in which case the result itself is an unknown of the
-// collection's element type.
-//
-// The result is of the receiver collection's element type, or in the case
-// of a tuple the type of the specific element index requested.
-//
-// This method may be called on a value whose type is DynamicPseudoType,
-// in which case the result will also be the DynamicValue.
-func (val Value) Index(key Value) Value {
- if val.IsMarked() || key.IsMarked() {
- val, valMarks := val.Unmark()
- key, keyMarks := key.Unmark()
- return val.Index(key).WithMarks(valMarks, keyMarks)
- }
-
- if val.ty == DynamicPseudoType {
- return DynamicVal
- }
-
- switch {
- case val.Type().IsListType():
- elty := val.Type().ElementType()
- if key.Type() == DynamicPseudoType {
- return UnknownVal(elty)
- }
-
- if key.Type() != Number {
- panic("element key for list must be number")
- }
- if !key.IsKnown() {
- return UnknownVal(elty)
- }
-
- if !val.IsKnown() {
- return UnknownVal(elty)
- }
-
- index, accuracy := key.v.(*big.Float).Int64()
- if accuracy != big.Exact || index < 0 {
- panic("element key for list must be non-negative integer")
- }
-
- return Value{
- ty: elty,
- v: val.v.([]interface{})[index],
- }
- case val.Type().IsMapType():
- elty := val.Type().ElementType()
- if key.Type() == DynamicPseudoType {
- return UnknownVal(elty)
- }
-
- if key.Type() != String {
- panic("element key for map must be string")
- }
- if !key.IsKnown() {
- return UnknownVal(elty)
- }
-
- if !val.IsKnown() {
- return UnknownVal(elty)
- }
-
- keyStr := key.v.(string)
-
- return Value{
- ty: elty,
- v: val.v.(map[string]interface{})[keyStr],
- }
- case val.Type().IsTupleType():
- if key.Type() == DynamicPseudoType {
- return DynamicVal
- }
-
- if key.Type() != Number {
- panic("element key for tuple must be number")
- }
- if !key.IsKnown() {
- return DynamicVal
- }
-
- index, accuracy := key.v.(*big.Float).Int64()
- if accuracy != big.Exact || index < 0 {
- panic("element key for list must be non-negative integer")
- }
-
- eltys := val.Type().TupleElementTypes()
-
- if !val.IsKnown() {
- return UnknownVal(eltys[index])
- }
-
- return Value{
- ty: eltys[index],
- v: val.v.([]interface{})[index],
- }
- default:
- panic("not a list, map, or tuple type")
- }
-}
-
-// HasIndex returns True if the receiver (which must be supported for Index)
-// has an element with the given index key, or False if it does not.
-//
-// The result will be UnknownVal(Bool) if either the collection or the
-// key value are unknown.
-//
-// This method will panic if the receiver is not indexable, but does not
-// impose any panic-causing type constraints on the key.
-func (val Value) HasIndex(key Value) Value {
- if val.IsMarked() || key.IsMarked() {
- val, valMarks := val.Unmark()
- key, keyMarks := key.Unmark()
- return val.HasIndex(key).WithMarks(valMarks, keyMarks)
- }
-
- if val.ty == DynamicPseudoType {
- return UnknownVal(Bool)
- }
-
- switch {
- case val.Type().IsListType():
- if key.Type() == DynamicPseudoType {
- return UnknownVal(Bool)
- }
-
- if key.Type() != Number {
- return False
- }
- if !key.IsKnown() {
- return UnknownVal(Bool)
- }
- if !val.IsKnown() {
- return UnknownVal(Bool)
- }
-
- index, accuracy := key.v.(*big.Float).Int64()
- if accuracy != big.Exact || index < 0 {
- return False
- }
-
- return BoolVal(int(index) < len(val.v.([]interface{})) && index >= 0)
- case val.Type().IsMapType():
- if key.Type() == DynamicPseudoType {
- return UnknownVal(Bool)
- }
-
- if key.Type() != String {
- return False
- }
- if !key.IsKnown() {
- return UnknownVal(Bool)
- }
- if !val.IsKnown() {
- return UnknownVal(Bool)
- }
-
- keyStr := key.v.(string)
- _, exists := val.v.(map[string]interface{})[keyStr]
-
- return BoolVal(exists)
- case val.Type().IsTupleType():
- if key.Type() == DynamicPseudoType {
- return UnknownVal(Bool)
- }
-
- if key.Type() != Number {
- return False
- }
- if !key.IsKnown() {
- return UnknownVal(Bool)
- }
-
- index, accuracy := key.v.(*big.Float).Int64()
- if accuracy != big.Exact || index < 0 {
- return False
- }
-
- length := val.Type().Length()
- return BoolVal(int(index) < length && index >= 0)
- default:
- panic("not a list, map, or tuple type")
- }
-}
-
-// HasElement returns True if the receiver (which must be of a set type)
-// has the given value as an element, or False if it does not.
-//
-// The result will be UnknownVal(Bool) if either the set or the
-// given value are unknown.
-//
-// This method will panic if the receiver is not a set, or if it is a null set.
-func (val Value) HasElement(elem Value) Value {
- if val.IsMarked() || elem.IsMarked() {
- val, valMarks := val.Unmark()
- elem, elemMarks := elem.Unmark()
- return val.HasElement(elem).WithMarks(valMarks, elemMarks)
- }
-
- ty := val.Type()
-
- if !ty.IsSetType() {
- panic("not a set type")
- }
- if !val.IsKnown() || !elem.IsKnown() {
- return UnknownVal(Bool)
- }
- if val.IsNull() {
- panic("can't call HasElement on a nil value")
- }
- if !ty.ElementType().Equals(elem.Type()) {
- return False
- }
-
- s := val.v.(set.Set)
- return BoolVal(s.Has(elem.v))
-}
-
-// Length returns the length of the receiver, which must be a collection type
-// or tuple type, as a number value. If the receiver is not a compatible type
-// then this method will panic.
-//
-// If the receiver is unknown then the result is also unknown.
-//
-// If the receiver is null then this function will panic.
-//
-// Note that Length is not supported for strings. To determine the length
-// of a string, call AsString and take the length of the native Go string
-// that is returned.
-func (val Value) Length() Value {
- if val.IsMarked() {
- val, valMarks := val.Unmark()
- return val.Length().WithMarks(valMarks)
- }
-
- if val.Type().IsTupleType() {
- // For tuples, we can return the length even if the value is not known.
- return NumberIntVal(int64(val.Type().Length()))
- }
-
- if !val.IsKnown() {
- return UnknownVal(Number)
- }
-
- return NumberIntVal(int64(val.LengthInt()))
-}
-
-// LengthInt is like Length except it returns an int. It has the same behavior
-// as Length except that it will panic if the receiver is unknown.
-//
-// This is an integration method provided for the convenience of code bridging
-// into Go's type system.
-func (val Value) LengthInt() int {
- val.assertUnmarked()
- if val.Type().IsTupleType() {
- // For tuples, we can return the length even if the value is not known.
- return val.Type().Length()
- }
- if val.Type().IsObjectType() {
- // For objects, the length is the number of attributes associated with the type.
- return len(val.Type().AttributeTypes())
- }
- if !val.IsKnown() {
- panic("value is not known")
- }
- if val.IsNull() {
- panic("value is null")
- }
-
- switch {
-
- case val.ty.IsListType():
- return len(val.v.([]interface{}))
-
- case val.ty.IsSetType():
- return val.v.(set.Set).Length()
-
- case val.ty.IsMapType():
- return len(val.v.(map[string]interface{}))
-
- default:
- panic("value is not a collection")
- }
-}
-
-// ElementIterator returns an ElementIterator for iterating the elements
-// of the receiver, which must be a collection type, a tuple type, or an object
-// type. If called on a method of any other type, this method will panic.
-//
-// The value must be Known and non-Null, or this method will panic.
-//
-// If the receiver is of a list type, the returned keys will be of type Number
-// and the values will be of the list's element type.
-//
-// If the receiver is of a map type, the returned keys will be of type String
-// and the value will be of the map's element type. Elements are passed in
-// ascending lexicographical order by key.
-//
-// If the receiver is of a set type, each element is returned as both the
-// key and the value, since set members are their own identity.
-//
-// If the receiver is of a tuple type, the returned keys will be of type Number
-// and the value will be of the corresponding element's type.
-//
-// If the receiver is of an object type, the returned keys will be of type
-// String and the value will be of the corresponding attributes's type.
-//
-// ElementIterator is an integration method, so it cannot handle Unknown
-// values. This method will panic if the receiver is Unknown.
-func (val Value) ElementIterator() ElementIterator {
- val.assertUnmarked()
- if !val.IsKnown() {
- panic("can't use ElementIterator on unknown value")
- }
- if val.IsNull() {
- panic("can't use ElementIterator on null value")
- }
- return elementIterator(val)
-}
-
-// CanIterateElements returns true if the receiver can support the
-// ElementIterator method (and by extension, ForEachElement) without panic.
-func (val Value) CanIterateElements() bool {
- return canElementIterator(val)
-}
-
-// ForEachElement executes a given callback function for each element of
-// the receiver, which must be a collection type or tuple type, or this method
-// will panic.
-//
-// ForEachElement uses ElementIterator internally, and so the values passed
-// to the callback are as described for ElementIterator.
-//
-// Returns true if the iteration exited early due to the callback function
-// returning true, or false if the loop ran to completion.
-//
-// ForEachElement is an integration method, so it cannot handle Unknown
-// values. This method will panic if the receiver is Unknown.
-func (val Value) ForEachElement(cb ElementCallback) bool {
- val.assertUnmarked()
- it := val.ElementIterator()
- for it.Next() {
- key, val := it.Element()
- stop := cb(key, val)
- if stop {
- return true
- }
- }
- return false
-}
-
-// Not returns the logical inverse of the receiver, which must be of type
-// Bool or this method will panic.
-func (val Value) Not() Value {
- if val.IsMarked() {
- val, valMarks := val.Unmark()
- return val.Not().WithMarks(valMarks)
- }
-
- if shortCircuit := mustTypeCheck(Bool, Bool, val); shortCircuit != nil {
- shortCircuit = forceShortCircuitType(shortCircuit, Bool)
- return *shortCircuit
- }
-
- return BoolVal(!val.v.(bool))
-}
-
-// And returns the result of logical AND with the receiver and the other given
-// value, which must both be of type Bool or this method will panic.
-func (val Value) And(other Value) Value {
- if val.IsMarked() || other.IsMarked() {
- val, valMarks := val.Unmark()
- other, otherMarks := other.Unmark()
- return val.And(other).WithMarks(valMarks, otherMarks)
- }
-
- if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil {
- shortCircuit = forceShortCircuitType(shortCircuit, Bool)
- return *shortCircuit
- }
-
- return BoolVal(val.v.(bool) && other.v.(bool))
-}
-
-// Or returns the result of logical OR with the receiver and the other given
-// value, which must both be of type Bool or this method will panic.
-func (val Value) Or(other Value) Value {
- if val.IsMarked() || other.IsMarked() {
- val, valMarks := val.Unmark()
- other, otherMarks := other.Unmark()
- return val.Or(other).WithMarks(valMarks, otherMarks)
- }
-
- if shortCircuit := mustTypeCheck(Bool, Bool, val, other); shortCircuit != nil {
- shortCircuit = forceShortCircuitType(shortCircuit, Bool)
- return *shortCircuit
- }
-
- return BoolVal(val.v.(bool) || other.v.(bool))
-}
-
-// LessThan returns True if the receiver is less than the other given value,
-// which must both be numbers or this method will panic.
-func (val Value) LessThan(other Value) Value {
- if val.IsMarked() || other.IsMarked() {
- val, valMarks := val.Unmark()
- other, otherMarks := other.Unmark()
- return val.LessThan(other).WithMarks(valMarks, otherMarks)
- }
-
- if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil {
- shortCircuit = forceShortCircuitType(shortCircuit, Bool)
- return *shortCircuit
- }
-
- return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) < 0)
-}
-
-// GreaterThan returns True if the receiver is greater than the other given
-// value, which must both be numbers or this method will panic.
-func (val Value) GreaterThan(other Value) Value {
- if val.IsMarked() || other.IsMarked() {
- val, valMarks := val.Unmark()
- other, otherMarks := other.Unmark()
- return val.GreaterThan(other).WithMarks(valMarks, otherMarks)
- }
-
- if shortCircuit := mustTypeCheck(Number, Bool, val, other); shortCircuit != nil {
- shortCircuit = forceShortCircuitType(shortCircuit, Bool)
- return *shortCircuit
- }
-
- return BoolVal(val.v.(*big.Float).Cmp(other.v.(*big.Float)) > 0)
-}
-
-// LessThanOrEqualTo is equivalent to LessThan and Equal combined with Or.
-func (val Value) LessThanOrEqualTo(other Value) Value {
- return val.LessThan(other).Or(val.Equals(other))
-}
-
-// GreaterThanOrEqualTo is equivalent to GreaterThan and Equal combined with Or.
-func (val Value) GreaterThanOrEqualTo(other Value) Value {
- return val.GreaterThan(other).Or(val.Equals(other))
-}
-
-// AsString returns the native string from a non-null, non-unknown cty.String
-// value, or panics if called on any other value.
-func (val Value) AsString() string {
- val.assertUnmarked()
- if val.ty != String {
- panic("not a string")
- }
- if val.IsNull() {
- panic("value is null")
- }
- if !val.IsKnown() {
- panic("value is unknown")
- }
-
- return val.v.(string)
-}
-
-// AsBigFloat returns a big.Float representation of a non-null, non-unknown
-// cty.Number value, or panics if called on any other value.
-//
-// For more convenient conversions to other native numeric types, use the
-// "gocty" package.
-func (val Value) AsBigFloat() *big.Float {
- val.assertUnmarked()
- if val.ty != Number {
- panic("not a number")
- }
- if val.IsNull() {
- panic("value is null")
- }
- if !val.IsKnown() {
- panic("value is unknown")
- }
-
- // Copy the float so that callers can't mutate our internal state
- ret := *(val.v.(*big.Float))
-
- return &ret
-}
-
-// AsValueSlice returns a []cty.Value representation of a non-null, non-unknown
-// value of any type that CanIterateElements, or panics if called on
-// any other value.
-//
-// For more convenient conversions to slices of more specific types, use
-// the "gocty" package.
-func (val Value) AsValueSlice() []Value {
- val.assertUnmarked()
- l := val.LengthInt()
- if l == 0 {
- return nil
- }
-
- ret := make([]Value, 0, l)
- for it := val.ElementIterator(); it.Next(); {
- _, v := it.Element()
- ret = append(ret, v)
- }
- return ret
-}
-
-// AsValueMap returns a map[string]cty.Value representation of a non-null,
-// non-unknown value of any type that CanIterateElements, or panics if called
-// on any other value.
-//
-// For more convenient conversions to maps of more specific types, use
-// the "gocty" package.
-func (val Value) AsValueMap() map[string]Value {
- val.assertUnmarked()
- l := val.LengthInt()
- if l == 0 {
- return nil
- }
-
- ret := make(map[string]Value, l)
- for it := val.ElementIterator(); it.Next(); {
- k, v := it.Element()
- ret[k.AsString()] = v
- }
- return ret
-}
-
-// AsValueSet returns a ValueSet representation of a non-null,
-// non-unknown value of any collection type, or panics if called
-// on any other value.
-//
-// Unlike AsValueSlice and AsValueMap, this method requires specifically a
-// collection type (list, set or map) and does not allow structural types
-// (tuple or object), because the ValueSet type requires homogenous
-// element types.
-//
-// The returned ValueSet can store only values of the receiver's element type.
-func (val Value) AsValueSet() ValueSet {
- val.assertUnmarked()
- if !val.Type().IsCollectionType() {
- panic("not a collection type")
- }
-
- // We don't give the caller our own set.Set (assuming we're a cty.Set value)
- // because then the caller could mutate our internals, which is forbidden.
- // Instead, we will construct a new set and append our elements into it.
- ret := NewValueSet(val.Type().ElementType())
- for it := val.ElementIterator(); it.Next(); {
- _, v := it.Element()
- ret.Add(v)
- }
- return ret
-}
-
-// EncapsulatedValue returns the native value encapsulated in a non-null,
-// non-unknown capsule-typed value, or panics if called on any other value.
-//
-// The result is the same pointer that was passed to CapsuleVal to create
-// the value. Since cty considers values to be immutable, it is strongly
-// recommended to treat the encapsulated value itself as immutable too.
-func (val Value) EncapsulatedValue() interface{} {
- val.assertUnmarked()
- if !val.Type().IsCapsuleType() {
- panic("not a capsule-typed value")
- }
-
- return val.v
-}
diff --git a/vendor/github.com/hashicorp/go-cty/cty/walk.go b/vendor/github.com/hashicorp/go-cty/cty/walk.go
deleted file mode 100644
index a6943bab..00000000
--- a/vendor/github.com/hashicorp/go-cty/cty/walk.go
+++ /dev/null
@@ -1,182 +0,0 @@
-package cty
-
-// Walk visits all of the values in a possibly-complex structure, calling
-// a given function for each value.
-//
-// For example, given a list of strings the callback would first be called
-// with the whole list and then called once for each element of the list.
-//
-// The callback function may prevent recursive visits to child values by
-// returning false. The callback function my halt the walk altogether by
-// returning a non-nil error. If the returned error is about the element
-// currently being visited, it is recommended to use the provided path
-// value to produce a PathError describing that context.
-//
-// The path passed to the given function may not be used after that function
-// returns, since its backing array is re-used for other calls.
-func Walk(val Value, cb func(Path, Value) (bool, error)) error {
- var path Path
- return walk(path, val, cb)
-}
-
-func walk(path Path, val Value, cb func(Path, Value) (bool, error)) error {
- deeper, err := cb(path, val)
- if err != nil {
- return err
- }
- if !deeper {
- return nil
- }
-
- if val.IsNull() || !val.IsKnown() {
- // Can't recurse into null or unknown values, regardless of type
- return nil
- }
-
- ty := val.Type()
- switch {
- case ty.IsObjectType():
- for it := val.ElementIterator(); it.Next(); {
- nameVal, av := it.Element()
- path := append(path, GetAttrStep{
- Name: nameVal.AsString(),
- })
- err := walk(path, av, cb)
- if err != nil {
- return err
- }
- }
- case val.CanIterateElements():
- for it := val.ElementIterator(); it.Next(); {
- kv, ev := it.Element()
- path := append(path, IndexStep{
- Key: kv,
- })
- err := walk(path, ev, cb)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// Transform visits all of the values in a possibly-complex structure,
-// calling a given function for each value which has an opportunity to
-// replace that value.
-//
-// Unlike Walk, Transform visits child nodes first, so for a list of strings
-// it would first visit the strings and then the _new_ list constructed
-// from the transformed values of the list items.
-//
-// This is useful for creating the effect of being able to make deep mutations
-// to a value even though values are immutable. However, it's the responsibility
-// of the given function to preserve expected invariants, such as homogenity of
-// element types in collections; this function can panic if such invariants
-// are violated, just as if new values were constructed directly using the
-// value constructor functions. An easy way to preserve invariants is to
-// ensure that the transform function never changes the value type.
-//
-// The callback function my halt the walk altogether by
-// returning a non-nil error. If the returned error is about the element
-// currently being visited, it is recommended to use the provided path
-// value to produce a PathError describing that context.
-//
-// The path passed to the given function may not be used after that function
-// returns, since its backing array is re-used for other calls.
-func Transform(val Value, cb func(Path, Value) (Value, error)) (Value, error) {
- var path Path
- return transform(path, val, cb)
-}
-
-func transform(path Path, val Value, cb func(Path, Value) (Value, error)) (Value, error) {
- ty := val.Type()
- var newVal Value
-
- switch {
-
- case val.IsNull() || !val.IsKnown():
- // Can't recurse into null or unknown values, regardless of type
- newVal = val
-
- case ty.IsListType() || ty.IsSetType() || ty.IsTupleType():
- l := val.LengthInt()
- switch l {
- case 0:
- // No deep transform for an empty sequence
- newVal = val
- default:
- elems := make([]Value, 0, l)
- for it := val.ElementIterator(); it.Next(); {
- kv, ev := it.Element()
- path := append(path, IndexStep{
- Key: kv,
- })
- newEv, err := transform(path, ev, cb)
- if err != nil {
- return DynamicVal, err
- }
- elems = append(elems, newEv)
- }
- switch {
- case ty.IsListType():
- newVal = ListVal(elems)
- case ty.IsSetType():
- newVal = SetVal(elems)
- case ty.IsTupleType():
- newVal = TupleVal(elems)
- default:
- panic("unknown sequence type") // should never happen because of the case we are in
- }
- }
-
- case ty.IsMapType():
- l := val.LengthInt()
- switch l {
- case 0:
- // No deep transform for an empty map
- newVal = val
- default:
- elems := make(map[string]Value)
- for it := val.ElementIterator(); it.Next(); {
- kv, ev := it.Element()
- path := append(path, IndexStep{
- Key: kv,
- })
- newEv, err := transform(path, ev, cb)
- if err != nil {
- return DynamicVal, err
- }
- elems[kv.AsString()] = newEv
- }
- newVal = MapVal(elems)
- }
-
- case ty.IsObjectType():
- switch {
- case ty.Equals(EmptyObject):
- // No deep transform for an empty object
- newVal = val
- default:
- atys := ty.AttributeTypes()
- newAVs := make(map[string]Value)
- for name := range atys {
- av := val.GetAttr(name)
- path := append(path, GetAttrStep{
- Name: name,
- })
- newAV, err := transform(path, av, cb)
- if err != nil {
- return DynamicVal, err
- }
- newAVs[name] = newAV
- }
- newVal = ObjectVal(newAVs)
- }
-
- default:
- newVal = val
- }
-
- return cb(path, newVal)
-}
diff --git a/vendor/github.com/hashicorp/go-hclog/.gitignore b/vendor/github.com/hashicorp/go-hclog/.gitignore
deleted file mode 100644
index 42cc4105..00000000
--- a/vendor/github.com/hashicorp/go-hclog/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-.idea*
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/go-hclog/LICENSE b/vendor/github.com/hashicorp/go-hclog/LICENSE
deleted file mode 100644
index 9938fb50..00000000
--- a/vendor/github.com/hashicorp/go-hclog/LICENSE
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2017 HashiCorp, Inc.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md
deleted file mode 100644
index 983d44c7..00000000
--- a/vendor/github.com/hashicorp/go-hclog/README.md
+++ /dev/null
@@ -1,149 +0,0 @@
-# go-hclog
-
-[][godocs]
-
-[godocs]: https://godoc.org/github.com/hashicorp/go-hclog
-
-`go-hclog` is a package for Go that provides a simple key/value logging
-interface for use in development and production environments.
-
-It provides logging levels that provide decreased output based upon the
-desired amount of output, unlike the standard library `log` package.
-
-It provides `Printf` style logging of values via `hclog.Fmt()`.
-
-It provides a human readable output mode for use in development as well as
-JSON output mode for production.
-
-## Stability Note
-
-This library has reached 1.0 stability. Its API can be considered solidified
-and promised through future versions.
-
-## Installation and Docs
-
-Install using `go get github.com/hashicorp/go-hclog`.
-
-Full documentation is available at
-http://godoc.org/github.com/hashicorp/go-hclog
-
-## Usage
-
-### Use the global logger
-
-```go
-hclog.Default().Info("hello world")
-```
-
-```text
-2017-07-05T16:15:55.167-0700 [INFO ] hello world
-```
-
-(Note timestamps are removed in future examples for brevity.)
-
-### Create a new logger
-
-```go
-appLogger := hclog.New(&hclog.LoggerOptions{
- Name: "my-app",
- Level: hclog.LevelFromString("DEBUG"),
-})
-```
-
-### Emit an Info level message with 2 key/value pairs
-
-```go
-input := "5.5"
-_, err := strconv.ParseInt(input, 10, 32)
-if err != nil {
- appLogger.Info("Invalid input for ParseInt", "input", input, "error", err)
-}
-```
-
-```text
-... [INFO ] my-app: Invalid input for ParseInt: input=5.5 error="strconv.ParseInt: parsing "5.5": invalid syntax"
-```
-
-### Create a new Logger for a major subsystem
-
-```go
-subsystemLogger := appLogger.Named("transport")
-subsystemLogger.Info("we are transporting something")
-```
-
-```text
-... [INFO ] my-app.transport: we are transporting something
-```
-
-Notice that logs emitted by `subsystemLogger` contain `my-app.transport`,
-reflecting both the application and subsystem names.
-
-### Create a new Logger with fixed key/value pairs
-
-Using `With()` will include a specific key-value pair in all messages emitted
-by that logger.
-
-```go
-requestID := "5fb446b6-6eba-821d-df1b-cd7501b6a363"
-requestLogger := subsystemLogger.With("request", requestID)
-requestLogger.Info("we are transporting a request")
-```
-
-```text
-... [INFO ] my-app.transport: we are transporting a request: request=5fb446b6-6eba-821d-df1b-cd7501b6a363
-```
-
-This allows sub Loggers to be context specific without having to thread that
-into all the callers.
-
-### Using `hclog.Fmt()`
-
-```go
-totalBandwidth := 200
-appLogger.Info("total bandwidth exceeded", "bandwidth", hclog.Fmt("%d GB/s", totalBandwidth))
-```
-
-```text
-... [INFO ] my-app: total bandwidth exceeded: bandwidth="200 GB/s"
-```
-
-### Use this with code that uses the standard library logger
-
-If you want to use the standard library's `log.Logger` interface you can wrap
-`hclog.Logger` by calling the `StandardLogger()` method. This allows you to use
-it with the familiar `Println()`, `Printf()`, etc. For example:
-
-```go
-stdLogger := appLogger.StandardLogger(&hclog.StandardLoggerOptions{
- InferLevels: true,
-})
-// Printf() is provided by stdlib log.Logger interface, not hclog.Logger
-stdLogger.Printf("[DEBUG] %+v", stdLogger)
-```
-
-```text
-... [DEBUG] my-app: &{mu:{state:0 sema:0} prefix: flag:0 out:0xc42000a0a0 buf:[]}
-```
-
-Alternatively, you may configure the system-wide logger:
-
-```go
-// log the standard logger from 'import "log"'
-log.SetOutput(appLogger.StandardWriter(&hclog.StandardLoggerOptions{InferLevels: true}))
-log.SetPrefix("")
-log.SetFlags(0)
-
-log.Printf("[DEBUG] %d", 42)
-```
-
-```text
-... [DEBUG] my-app: 42
-```
-
-Notice that if `appLogger` is initialized with the `INFO` log level, _and_ you
-specify `InferLevels: true`, you will not see any output here. You must change
-`appLogger` to `DEBUG` to see output. See the docs for more information.
-
-If the log lines start with a timestamp you can use the
-`InferLevelsWithTimestamp` option to try and ignore them. Please note that in order
-for `InferLevelsWithTimestamp` to be relevant, `InferLevels` must be set to `true`.
diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_unix.go b/vendor/github.com/hashicorp/go-hclog/colorize_unix.go
deleted file mode 100644
index d00816b3..00000000
--- a/vendor/github.com/hashicorp/go-hclog/colorize_unix.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MIT
-
-//go:build !windows
-// +build !windows
-
-package hclog
-
-import (
- "github.com/mattn/go-isatty"
-)
-
-// hasFD is used to check if the writer has an Fd value to check
-// if it's a terminal.
-type hasFD interface {
- Fd() uintptr
-}
-
-// setColorization will mutate the values of this logger
-// to appropriately configure colorization options. It provides
-// a wrapper to the output stream on Windows systems.
-func (l *intLogger) setColorization(opts *LoggerOptions) {
- if opts.Color != AutoColor {
- return
- }
-
- if sc, ok := l.writer.w.(SupportsColor); ok {
- if !sc.SupportsColor() {
- l.headerColor = ColorOff
- l.writer.color = ColorOff
- }
- return
- }
-
- fi, ok := l.writer.w.(hasFD)
- if !ok {
- return
- }
-
- if !isatty.IsTerminal(fi.Fd()) {
- l.headerColor = ColorOff
- l.writer.color = ColorOff
- }
-}
diff --git a/vendor/github.com/hashicorp/go-hclog/colorize_windows.go b/vendor/github.com/hashicorp/go-hclog/colorize_windows.go
deleted file mode 100644
index 2c3fb9ea..00000000
--- a/vendor/github.com/hashicorp/go-hclog/colorize_windows.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MIT
-
-//go:build windows
-// +build windows
-
-package hclog
-
-import (
- "os"
-
- colorable "github.com/mattn/go-colorable"
-)
-
-// setColorization will mutate the values of this logger
-// to appropriately configure colorization options. It provides
-// a wrapper to the output stream on Windows systems.
-func (l *intLogger) setColorization(opts *LoggerOptions) {
- if opts.Color == ColorOff {
- return
- }
-
- fi, ok := l.writer.w.(*os.File)
- if !ok {
- l.writer.color = ColorOff
- l.headerColor = ColorOff
- return
- }
-
- cfi := colorable.NewColorable(fi)
-
- // NewColorable detects if color is possible and if it's not, then it
- // returns the original value. So we can test if we got the original
- // value back to know if color is possible.
- if cfi == fi {
- l.writer.color = ColorOff
- l.headerColor = ColorOff
- } else {
- l.writer.w = cfi
- }
-}
diff --git a/vendor/github.com/hashicorp/go-hclog/context.go b/vendor/github.com/hashicorp/go-hclog/context.go
deleted file mode 100644
index eb5aba55..00000000
--- a/vendor/github.com/hashicorp/go-hclog/context.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MIT
-
-package hclog
-
-import (
- "context"
-)
-
-// WithContext inserts a logger into the context and is retrievable
-// with FromContext. The optional args can be set with the same syntax as
-// Logger.With to set fields on the inserted logger. This will not modify
-// the logger argument in-place.
-func WithContext(ctx context.Context, logger Logger, args ...interface{}) context.Context {
- // While we could call logger.With even with zero args, we have this
- // check to avoid unnecessary allocations around creating a copy of a
- // logger.
- if len(args) > 0 {
- logger = logger.With(args...)
- }
-
- return context.WithValue(ctx, contextKey, logger)
-}
-
-// FromContext returns a logger from the context. This will return L()
-// (the default logger) if no logger is found in the context. Therefore,
-// this will never return a nil value.
-func FromContext(ctx context.Context) Logger {
- logger, _ := ctx.Value(contextKey).(Logger)
- if logger == nil {
- return L()
- }
-
- return logger
-}
-
-// Unexported new type so that our context key never collides with another.
-type contextKeyType struct{}
-
-// contextKey is the key used for the context to store the logger.
-var contextKey = contextKeyType{}
diff --git a/vendor/github.com/hashicorp/go-hclog/exclude.go b/vendor/github.com/hashicorp/go-hclog/exclude.go
deleted file mode 100644
index 4b73ba55..00000000
--- a/vendor/github.com/hashicorp/go-hclog/exclude.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MIT
-
-package hclog
-
-import (
- "regexp"
- "strings"
-)
-
-// ExcludeByMessage provides a simple way to build a list of log messages that
-// can be queried and matched. This is meant to be used with the Exclude
-// option on Options to suppress log messages. This does not hold any mutexs
-// within itself, so normal usage would be to Add entries at setup and none after
-// Exclude is going to be called. Exclude is called with a mutex held within
-// the Logger, so that doesn't need to use a mutex. Example usage:
-//
-// f := new(ExcludeByMessage)
-// f.Add("Noisy log message text")
-// appLogger.Exclude = f.Exclude
-type ExcludeByMessage struct {
- messages map[string]struct{}
-}
-
-// Add a message to be filtered. Do not call this after Exclude is to be called
-// due to concurrency issues.
-func (f *ExcludeByMessage) Add(msg string) {
- if f.messages == nil {
- f.messages = make(map[string]struct{})
- }
-
- f.messages[msg] = struct{}{}
-}
-
-// Return true if the given message should be included
-func (f *ExcludeByMessage) Exclude(level Level, msg string, args ...interface{}) bool {
- _, ok := f.messages[msg]
- return ok
-}
-
-// ExcludeByPrefix is a simple type to match a message string that has a common prefix.
-type ExcludeByPrefix string
-
-// Matches an message that starts with the prefix.
-func (p ExcludeByPrefix) Exclude(level Level, msg string, args ...interface{}) bool {
- return strings.HasPrefix(msg, string(p))
-}
-
-// ExcludeByRegexp takes a regexp and uses it to match a log message string. If it matches
-// the log entry is excluded.
-type ExcludeByRegexp struct {
- Regexp *regexp.Regexp
-}
-
-// Exclude the log message if the message string matches the regexp
-func (e ExcludeByRegexp) Exclude(level Level, msg string, args ...interface{}) bool {
- return e.Regexp.MatchString(msg)
-}
-
-// ExcludeFuncs is a slice of functions that will called to see if a log entry
-// should be filtered or not. It stops calling functions once at least one returns
-// true.
-type ExcludeFuncs []func(level Level, msg string, args ...interface{}) bool
-
-// Calls each function until one of them returns true
-func (ff ExcludeFuncs) Exclude(level Level, msg string, args ...interface{}) bool {
- for _, f := range ff {
- if f(level, msg, args...) {
- return true
- }
- }
-
- return false
-}
diff --git a/vendor/github.com/hashicorp/go-hclog/global.go b/vendor/github.com/hashicorp/go-hclog/global.go
deleted file mode 100644
index a7403f59..00000000
--- a/vendor/github.com/hashicorp/go-hclog/global.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MIT
-
-package hclog
-
-import (
- "sync"
- "time"
-)
-
-var (
- protect sync.Once
- def Logger
-
- // DefaultOptions is used to create the Default logger. These are read
- // only when the Default logger is created, so set them as soon as the
- // process starts.
- DefaultOptions = &LoggerOptions{
- Level: DefaultLevel,
- Output: DefaultOutput,
- TimeFn: time.Now,
- }
-)
-
-// Default returns a globally held logger. This can be a good starting
-// place, and then you can use .With() and .Named() to create sub-loggers
-// to be used in more specific contexts.
-// The value of the Default logger can be set via SetDefault() or by
-// changing the options in DefaultOptions.
-//
-// This method is goroutine safe, returning a global from memory, but
-// care should be used if SetDefault() is called it random times
-// in the program as that may result in race conditions and an unexpected
-// Logger being returned.
-func Default() Logger {
- protect.Do(func() {
- // If SetDefault was used before Default() was called, we need to
- // detect that here.
- if def == nil {
- def = New(DefaultOptions)
- }
- })
-
- return def
-}
-
-// L is a short alias for Default().
-func L() Logger {
- return Default()
-}
-
-// SetDefault changes the logger to be returned by Default()and L()
-// to the one given. This allows packages to use the default logger
-// and have higher level packages change it to match the execution
-// environment. It returns any old default if there is one.
-//
-// NOTE: This is expected to be called early in the program to setup
-// a default logger. As such, it does not attempt to make itself
-// not racy with regard to the value of the default logger. Ergo
-// if it is called in goroutines, you may experience race conditions
-// with other goroutines retrieving the default logger. Basically,
-// don't do that.
-func SetDefault(log Logger) Logger {
- old := def
- def = log
- return old
-}
diff --git a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go b/vendor/github.com/hashicorp/go-hclog/interceptlogger.go
deleted file mode 100644
index e9b1c188..00000000
--- a/vendor/github.com/hashicorp/go-hclog/interceptlogger.go
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MIT
-
-package hclog
-
-import (
- "io"
- "log"
- "sync"
- "sync/atomic"
-)
-
-var _ Logger = &interceptLogger{}
-
-type interceptLogger struct {
- Logger
-
- mu *sync.Mutex
- sinkCount *int32
- Sinks map[SinkAdapter]struct{}
-}
-
-func NewInterceptLogger(opts *LoggerOptions) InterceptLogger {
- l := newLogger(opts)
- if l.callerOffset > 0 {
- // extra frames for interceptLogger.{Warn,Info,Log,etc...}, and interceptLogger.log
- l.callerOffset += 2
- }
- intercept := &interceptLogger{
- Logger: l,
- mu: new(sync.Mutex),
- sinkCount: new(int32),
- Sinks: make(map[SinkAdapter]struct{}),
- }
-
- atomic.StoreInt32(intercept.sinkCount, 0)
-
- return intercept
-}
-
-func (i *interceptLogger) Log(level Level, msg string, args ...interface{}) {
- i.log(level, msg, args...)
-}
-
-// log is used to make the caller stack frame lookup consistent. If Warn,Info,etc
-// all called Log then direct calls to Log would have a different stack frame
-// depth. By having all the methods call the same helper we ensure the stack
-// frame depth is the same.
-func (i *interceptLogger) log(level Level, msg string, args ...interface{}) {
- i.Logger.Log(level, msg, args...)
- if atomic.LoadInt32(i.sinkCount) == 0 {
- return
- }
-
- i.mu.Lock()
- defer i.mu.Unlock()
- for s := range i.Sinks {
- s.Accept(i.Name(), level, msg, i.retrieveImplied(args...)...)
- }
-}
-
-// Emit the message and args at TRACE level to log and sinks
-func (i *interceptLogger) Trace(msg string, args ...interface{}) {
- i.log(Trace, msg, args...)
-}
-
-// Emit the message and args at DEBUG level to log and sinks
-func (i *interceptLogger) Debug(msg string, args ...interface{}) {
- i.log(Debug, msg, args...)
-}
-
-// Emit the message and args at INFO level to log and sinks
-func (i *interceptLogger) Info(msg string, args ...interface{}) {
- i.log(Info, msg, args...)
-}
-
-// Emit the message and args at WARN level to log and sinks
-func (i *interceptLogger) Warn(msg string, args ...interface{}) {
- i.log(Warn, msg, args...)
-}
-
-// Emit the message and args at ERROR level to log and sinks
-func (i *interceptLogger) Error(msg string, args ...interface{}) {
- i.log(Error, msg, args...)
-}
-
-func (i *interceptLogger) retrieveImplied(args ...interface{}) []interface{} {
- top := i.Logger.ImpliedArgs()
-
- cp := make([]interface{}, len(top)+len(args))
- copy(cp, top)
- copy(cp[len(top):], args)
-
- return cp
-}
-
-// Create a new sub-Logger that a name descending from the current name.
-// This is used to create a subsystem specific Logger.
-// Registered sinks will subscribe to these messages as well.
-func (i *interceptLogger) Named(name string) Logger {
- return i.NamedIntercept(name)
-}
-
-// Create a new sub-Logger with an explicit name. This ignores the current
-// name. This is used to create a standalone logger that doesn't fall
-// within the normal hierarchy. Registered sinks will subscribe
-// to these messages as well.
-func (i *interceptLogger) ResetNamed(name string) Logger {
- return i.ResetNamedIntercept(name)
-}
-
-// Create a new sub-Logger that a name decending from the current name.
-// This is used to create a subsystem specific Logger.
-// Registered sinks will subscribe to these messages as well.
-func (i *interceptLogger) NamedIntercept(name string) InterceptLogger {
- var sub interceptLogger
-
- sub = *i
- sub.Logger = i.Logger.Named(name)
- return &sub
-}
-
-// Create a new sub-Logger with an explicit name. This ignores the current
-// name. This is used to create a standalone logger that doesn't fall
-// within the normal hierarchy. Registered sinks will subscribe
-// to these messages as well.
-func (i *interceptLogger) ResetNamedIntercept(name string) InterceptLogger {
- var sub interceptLogger
-
- sub = *i
- sub.Logger = i.Logger.ResetNamed(name)
- return &sub
-}
-
-// Return a sub-Logger for which every emitted log message will contain
-// the given key/value pairs. This is used to create a context specific
-// Logger.
-func (i *interceptLogger) With(args ...interface{}) Logger {
- var sub interceptLogger
-
- sub = *i
-
- sub.Logger = i.Logger.With(args...)
-
- return &sub
-}
-
-// RegisterSink attaches a SinkAdapter to interceptLoggers sinks.
-func (i *interceptLogger) RegisterSink(sink SinkAdapter) {
- i.mu.Lock()
- defer i.mu.Unlock()
-
- i.Sinks[sink] = struct{}{}
-
- atomic.AddInt32(i.sinkCount, 1)
-}
-
-// DeregisterSink removes a SinkAdapter from interceptLoggers sinks.
-func (i *interceptLogger) DeregisterSink(sink SinkAdapter) {
- i.mu.Lock()
- defer i.mu.Unlock()
-
- delete(i.Sinks, sink)
-
- atomic.AddInt32(i.sinkCount, -1)
-}
-
-func (i *interceptLogger) StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger {
- return i.StandardLogger(opts)
-}
-
-func (i *interceptLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger {
- if opts == nil {
- opts = &StandardLoggerOptions{}
- }
-
- return log.New(i.StandardWriter(opts), "", 0)
-}
-
-func (i *interceptLogger) StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer {
- return i.StandardWriter(opts)
-}
-
-func (i *interceptLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer {
- return &stdlogAdapter{
- log: i,
- inferLevels: opts.InferLevels,
- inferLevelsWithTimestamp: opts.InferLevelsWithTimestamp,
- forceLevel: opts.ForceLevel,
- }
-}
-
-func (i *interceptLogger) ResetOutput(opts *LoggerOptions) error {
- if or, ok := i.Logger.(OutputResettable); ok {
- return or.ResetOutput(opts)
- } else {
- return nil
- }
-}
-
-func (i *interceptLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error {
- if or, ok := i.Logger.(OutputResettable); ok {
- return or.ResetOutputWithFlush(opts, flushable)
- } else {
- return nil
- }
-}
diff --git a/vendor/github.com/hashicorp/go-hclog/intlogger.go b/vendor/github.com/hashicorp/go-hclog/intlogger.go
deleted file mode 100644
index 272a710c..00000000
--- a/vendor/github.com/hashicorp/go-hclog/intlogger.go
+++ /dev/null
@@ -1,1007 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MIT
-
-package hclog
-
-import (
- "bytes"
- "encoding"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "log"
- "reflect"
- "runtime"
- "sort"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
- "unicode"
- "unicode/utf8"
-
- "github.com/fatih/color"
-)
-
-// TimeFormat is the time format to use for plain (non-JSON) output.
-// This is a version of RFC3339 that contains millisecond precision.
-const TimeFormat = "2006-01-02T15:04:05.000Z0700"
-
-// TimeFormatJSON is the time format to use for JSON output.
-// This is a version of RFC3339 that contains microsecond precision.
-const TimeFormatJSON = "2006-01-02T15:04:05.000000Z07:00"
-
-// errJsonUnsupportedTypeMsg is included in log json entries, if an arg cannot be serialized to json
-const errJsonUnsupportedTypeMsg = "logging contained values that don't serialize to json"
-
-var (
- _levelToBracket = map[Level]string{
- Debug: "[DEBUG]",
- Trace: "[TRACE]",
- Info: "[INFO] ",
- Warn: "[WARN] ",
- Error: "[ERROR]",
- }
-
- _levelToColor = map[Level]*color.Color{
- Debug: color.New(color.FgHiWhite),
- Trace: color.New(color.FgHiGreen),
- Info: color.New(color.FgHiBlue),
- Warn: color.New(color.FgHiYellow),
- Error: color.New(color.FgHiRed),
- }
-
- faintBoldColor = color.New(color.Faint, color.Bold)
- faintColor = color.New(color.Faint)
- faintMultiLinePrefix string
- faintFieldSeparator string
- faintFieldSeparatorWithNewLine string
-)
-
-func init() {
- // Force all the colors to enabled because we do our own detection of color usage.
- for _, c := range _levelToColor {
- c.EnableColor()
- }
-
- faintBoldColor.EnableColor()
- faintColor.EnableColor()
-
- faintMultiLinePrefix = faintColor.Sprint(" | ")
- faintFieldSeparator = faintColor.Sprint("=")
- faintFieldSeparatorWithNewLine = faintColor.Sprint("=\n")
-}
-
-// Make sure that intLogger is a Logger
-var _ Logger = &intLogger{}
-
-// intLogger is an internal logger implementation. Internal in that it is
-// defined entirely by this package.
-type intLogger struct {
- json bool
- jsonEscapeEnabled bool
- callerOffset int
- name string
- timeFormat string
- timeFn TimeFunction
- disableTime bool
-
- // This is an interface so that it's shared by any derived loggers, since
- // those derived loggers share the bufio.Writer as well.
- mutex Locker
- writer *writer
- level *int32
-
- // The value of curEpoch when our level was set
- setEpoch uint64
-
- // The value of curEpoch the last time we performed the level sync process
- ownEpoch uint64
-
- // Shared amongst all the loggers created in this hierachy, used to determine
- // if the level sync process should be run by comparing it with ownEpoch
- curEpoch *uint64
-
- // The logger this one was created from. Only set when syncParentLevel is set
- parent *intLogger
-
- headerColor ColorOption
- fieldColor ColorOption
-
- implied []interface{}
-
- exclude func(level Level, msg string, args ...interface{}) bool
-
- // create subloggers with their own level setting
- independentLevels bool
- syncParentLevel bool
-
- subloggerHook func(sub Logger) Logger
-}
-
-// New returns a configured logger.
-func New(opts *LoggerOptions) Logger {
- return newLogger(opts)
-}
-
-// NewSinkAdapter returns a SinkAdapter with configured settings
-// defined by LoggerOptions
-func NewSinkAdapter(opts *LoggerOptions) SinkAdapter {
- l := newLogger(opts)
- if l.callerOffset > 0 {
- // extra frames for interceptLogger.{Warn,Info,Log,etc...}, and SinkAdapter.Accept
- l.callerOffset += 2
- }
- return l
-}
-
-func newLogger(opts *LoggerOptions) *intLogger {
- if opts == nil {
- opts = &LoggerOptions{}
- }
-
- output := opts.Output
- if output == nil {
- output = DefaultOutput
- }
-
- level := opts.Level
- if level == NoLevel {
- level = DefaultLevel
- }
-
- mutex := opts.Mutex
- if mutex == nil {
- mutex = new(sync.Mutex)
- }
-
- var (
- primaryColor = ColorOff
- headerColor = ColorOff
- fieldColor = ColorOff
- )
- switch {
- case opts.ColorHeaderOnly:
- headerColor = opts.Color
- case opts.ColorHeaderAndFields:
- fieldColor = opts.Color
- headerColor = opts.Color
- default:
- primaryColor = opts.Color
- }
-
- l := &intLogger{
- json: opts.JSONFormat,
- jsonEscapeEnabled: !opts.JSONEscapeDisabled,
- name: opts.Name,
- timeFormat: TimeFormat,
- timeFn: time.Now,
- disableTime: opts.DisableTime,
- mutex: mutex,
- writer: newWriter(output, primaryColor),
- level: new(int32),
- curEpoch: new(uint64),
- exclude: opts.Exclude,
- independentLevels: opts.IndependentLevels,
- syncParentLevel: opts.SyncParentLevel,
- headerColor: headerColor,
- fieldColor: fieldColor,
- subloggerHook: opts.SubloggerHook,
- }
- if opts.IncludeLocation {
- l.callerOffset = offsetIntLogger + opts.AdditionalLocationOffset
- }
-
- if l.json {
- l.timeFormat = TimeFormatJSON
- }
- if opts.TimeFn != nil {
- l.timeFn = opts.TimeFn
- }
- if opts.TimeFormat != "" {
- l.timeFormat = opts.TimeFormat
- }
-
- if l.subloggerHook == nil {
- l.subloggerHook = identityHook
- }
-
- l.setColorization(opts)
-
- atomic.StoreInt32(l.level, int32(level))
-
- return l
-}
-
-func identityHook(logger Logger) Logger {
- return logger
-}
-
-// offsetIntLogger is the stack frame offset in the call stack for the caller to
-// one of the Warn, Info, Log, etc methods.
-const offsetIntLogger = 3
-
-// Log a message and a set of key/value pairs if the given level is at
-// or more severe that the threshold configured in the Logger.
-func (l *intLogger) log(name string, level Level, msg string, args ...interface{}) {
- if level < l.GetLevel() {
- return
- }
-
- t := l.timeFn()
-
- l.mutex.Lock()
- defer l.mutex.Unlock()
-
- if l.exclude != nil && l.exclude(level, msg, args...) {
- return
- }
-
- if l.json {
- l.logJSON(t, name, level, msg, args...)
- } else {
- l.logPlain(t, name, level, msg, args...)
- }
-
- l.writer.Flush(level)
-}
-
-// Cleanup a path by returning the last 2 segments of the path only.
-func trimCallerPath(path string) string {
- // lovely borrowed from zap
- // nb. To make sure we trim the path correctly on Windows too, we
- // counter-intuitively need to use '/' and *not* os.PathSeparator here,
- // because the path given originates from Go stdlib, specifically
- // runtime.Caller() which (as of Mar/17) returns forward slashes even on
- // Windows.
- //
- // See https://github.com/golang/go/issues/3335
- // and https://github.com/golang/go/issues/18151
- //
- // for discussion on the issue on Go side.
-
- // Find the last separator.
- idx := strings.LastIndexByte(path, '/')
- if idx == -1 {
- return path
- }
-
- // Find the penultimate separator.
- idx = strings.LastIndexByte(path[:idx], '/')
- if idx == -1 {
- return path
- }
-
- return path[idx+1:]
-}
-
-// isNormal indicates if the rune is one allowed to exist as an unquoted
-// string value. This is a subset of ASCII, `-` through `~`.
-func isNormal(r rune) bool {
- return 0x2D <= r && r <= 0x7E // - through ~
-}
-
-// needsQuoting returns false if all the runes in string are normal, according
-// to isNormal
-func needsQuoting(str string) bool {
- for _, r := range str {
- if !isNormal(r) {
- return true
- }
- }
-
- return false
-}
-
-// logPlain is the non-JSON logging format function which writes directly
-// to the underlying writer the logger was initialized with.
-//
-// If the logger was initialized with a color function, it also handles
-// applying the color to the log message.
-//
-// Color Options
-// 1. No color.
-// 2. Color the whole log line, based on the level.
-// 3. Color only the header (level) part of the log line.
-// 4. Color both the header and fields of the log line.
-func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) {
-
- if !l.disableTime {
- l.writer.WriteString(t.Format(l.timeFormat))
- l.writer.WriteByte(' ')
- }
-
- s, ok := _levelToBracket[level]
- if ok {
- if l.headerColor != ColorOff {
- color := _levelToColor[level]
- color.Fprint(l.writer, s)
- } else {
- l.writer.WriteString(s)
- }
- } else {
- l.writer.WriteString("[?????]")
- }
-
- if l.callerOffset > 0 {
- if _, file, line, ok := runtime.Caller(l.callerOffset); ok {
- l.writer.WriteByte(' ')
- l.writer.WriteString(trimCallerPath(file))
- l.writer.WriteByte(':')
- l.writer.WriteString(strconv.Itoa(line))
- l.writer.WriteByte(':')
- }
- }
-
- l.writer.WriteByte(' ')
-
- if name != "" {
- l.writer.WriteString(name)
- if msg != "" {
- l.writer.WriteString(": ")
- l.writer.WriteString(msg)
- }
- } else if msg != "" {
- l.writer.WriteString(msg)
- }
-
- args = append(l.implied, args...)
-
- var stacktrace CapturedStacktrace
-
- if len(args) > 0 {
- if len(args)%2 != 0 {
- cs, ok := args[len(args)-1].(CapturedStacktrace)
- if ok {
- args = args[:len(args)-1]
- stacktrace = cs
- } else {
- extra := args[len(args)-1]
- args = append(args[:len(args)-1], MissingKey, extra)
- }
- }
-
- l.writer.WriteByte(':')
-
- // Handle the field arguments, which come in pairs (key=val).
- FOR:
- for i := 0; i < len(args); i = i + 2 {
- var (
- key string
- val string
- raw bool
- )
-
- // Convert the field value to a string.
- switch st := args[i+1].(type) {
- case string:
- val = st
- if st == "" {
- val = `""`
- raw = true
- }
- case int:
- val = strconv.FormatInt(int64(st), 10)
- case int64:
- val = strconv.FormatInt(int64(st), 10)
- case int32:
- val = strconv.FormatInt(int64(st), 10)
- case int16:
- val = strconv.FormatInt(int64(st), 10)
- case int8:
- val = strconv.FormatInt(int64(st), 10)
- case uint:
- val = strconv.FormatUint(uint64(st), 10)
- case uint64:
- val = strconv.FormatUint(uint64(st), 10)
- case uint32:
- val = strconv.FormatUint(uint64(st), 10)
- case uint16:
- val = strconv.FormatUint(uint64(st), 10)
- case uint8:
- val = strconv.FormatUint(uint64(st), 10)
- case Hex:
- val = "0x" + strconv.FormatUint(uint64(st), 16)
- case Octal:
- val = "0" + strconv.FormatUint(uint64(st), 8)
- case Binary:
- val = "0b" + strconv.FormatUint(uint64(st), 2)
- case CapturedStacktrace:
- stacktrace = st
- continue FOR
- case Format:
- val = fmt.Sprintf(st[0].(string), st[1:]...)
- case Quote:
- raw = true
- val = strconv.Quote(string(st))
- default:
- v := reflect.ValueOf(st)
- if v.Kind() == reflect.Slice {
- val = l.renderSlice(v)
- raw = true
- } else {
- val = fmt.Sprintf("%v", st)
- }
- }
-
- // Convert the field key to a string.
- switch st := args[i].(type) {
- case string:
- key = st
- default:
- key = fmt.Sprintf("%s", st)
- }
-
- // Optionally apply the ANSI "faint" and "bold"
- // SGR values to the key.
- if l.fieldColor != ColorOff {
- key = faintBoldColor.Sprint(key)
- }
-
- // Values may contain multiple lines, and that format
- // is preserved, with each line prefixed with a " | "
- // to show it's part of a collection of lines.
- //
- // Values may also need quoting, if not all the runes
- // in the value string are "normal", like if they
- // contain ANSI escape sequences.
- if strings.Contains(val, "\n") {
- l.writer.WriteString("\n ")
- l.writer.WriteString(key)
- if l.fieldColor != ColorOff {
- l.writer.WriteString(faintFieldSeparatorWithNewLine)
- writeIndent(l.writer, val, faintMultiLinePrefix)
- } else {
- l.writer.WriteString("=\n")
- writeIndent(l.writer, val, " | ")
- }
- l.writer.WriteString(" ")
- } else if !raw && needsQuoting(val) {
- l.writer.WriteByte(' ')
- l.writer.WriteString(key)
- if l.fieldColor != ColorOff {
- l.writer.WriteString(faintFieldSeparator)
- } else {
- l.writer.WriteByte('=')
- }
- l.writer.WriteByte('"')
- writeEscapedForOutput(l.writer, val, true)
- l.writer.WriteByte('"')
- } else {
- l.writer.WriteByte(' ')
- l.writer.WriteString(key)
- if l.fieldColor != ColorOff {
- l.writer.WriteString(faintFieldSeparator)
- } else {
- l.writer.WriteByte('=')
- }
- l.writer.WriteString(val)
- }
- }
- }
-
- l.writer.WriteString("\n")
-
- if stacktrace != "" {
- l.writer.WriteString(string(stacktrace))
- l.writer.WriteString("\n")
- }
-}
-
-func writeIndent(w *writer, str string, indent string) {
- for {
- nl := strings.IndexByte(str, "\n"[0])
- if nl == -1 {
- if str != "" {
- w.WriteString(indent)
- writeEscapedForOutput(w, str, false)
- w.WriteString("\n")
- }
- return
- }
-
- w.WriteString(indent)
- writeEscapedForOutput(w, str[:nl], false)
- w.WriteString("\n")
- str = str[nl+1:]
- }
-}
-
-func needsEscaping(str string) bool {
- for _, b := range str {
- if !unicode.IsPrint(b) || b == '"' {
- return true
- }
- }
-
- return false
-}
-
-const (
- lowerhex = "0123456789abcdef"
-)
-
-var bufPool = sync.Pool{
- New: func() interface{} {
- return new(bytes.Buffer)
- },
-}
-
-func writeEscapedForOutput(w io.Writer, str string, escapeQuotes bool) {
- if !needsEscaping(str) {
- w.Write([]byte(str))
- return
- }
-
- bb := bufPool.Get().(*bytes.Buffer)
- bb.Reset()
-
- defer bufPool.Put(bb)
-
- for _, r := range str {
- if escapeQuotes && r == '"' {
- bb.WriteString(`\"`)
- } else if unicode.IsPrint(r) {
- bb.WriteRune(r)
- } else {
- switch r {
- case '\a':
- bb.WriteString(`\a`)
- case '\b':
- bb.WriteString(`\b`)
- case '\f':
- bb.WriteString(`\f`)
- case '\n':
- bb.WriteString(`\n`)
- case '\r':
- bb.WriteString(`\r`)
- case '\t':
- bb.WriteString(`\t`)
- case '\v':
- bb.WriteString(`\v`)
- default:
- switch {
- case r < ' ':
- bb.WriteString(`\x`)
- bb.WriteByte(lowerhex[byte(r)>>4])
- bb.WriteByte(lowerhex[byte(r)&0xF])
- case !utf8.ValidRune(r):
- r = 0xFFFD
- fallthrough
- case r < 0x10000:
- bb.WriteString(`\u`)
- for s := 12; s >= 0; s -= 4 {
- bb.WriteByte(lowerhex[r>>uint(s)&0xF])
- }
- default:
- bb.WriteString(`\U`)
- for s := 28; s >= 0; s -= 4 {
- bb.WriteByte(lowerhex[r>>uint(s)&0xF])
- }
- }
- }
- }
- }
-
- w.Write(bb.Bytes())
-}
-
-func (l *intLogger) renderSlice(v reflect.Value) string {
- var buf bytes.Buffer
-
- buf.WriteRune('[')
-
- for i := 0; i < v.Len(); i++ {
- if i > 0 {
- buf.WriteString(", ")
- }
-
- sv := v.Index(i)
-
- var val string
-
- switch sv.Kind() {
- case reflect.String:
- val = strconv.Quote(sv.String())
- case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64:
- val = strconv.FormatInt(sv.Int(), 10)
- case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- val = strconv.FormatUint(sv.Uint(), 10)
- default:
- val = fmt.Sprintf("%v", sv.Interface())
- if strings.ContainsAny(val, " \t\n\r") {
- val = strconv.Quote(val)
- }
- }
-
- buf.WriteString(val)
- }
-
- buf.WriteRune(']')
-
- return buf.String()
-}
-
-// JSON logging function
-func (l *intLogger) logJSON(t time.Time, name string, level Level, msg string, args ...interface{}) {
- vals := l.jsonMapEntry(t, name, level, msg)
- args = append(l.implied, args...)
-
- if len(args) > 0 {
- if len(args)%2 != 0 {
- cs, ok := args[len(args)-1].(CapturedStacktrace)
- if ok {
- args = args[:len(args)-1]
- vals["stacktrace"] = cs
- } else {
- extra := args[len(args)-1]
- args = append(args[:len(args)-1], MissingKey, extra)
- }
- }
-
- for i := 0; i < len(args); i = i + 2 {
- val := args[i+1]
- switch sv := val.(type) {
- case error:
- // Check if val is of type error. If error type doesn't
- // implement json.Marshaler or encoding.TextMarshaler
- // then set val to err.Error() so that it gets marshaled
- switch sv.(type) {
- case json.Marshaler, encoding.TextMarshaler:
- default:
- val = sv.Error()
- }
- case Format:
- val = fmt.Sprintf(sv[0].(string), sv[1:]...)
- }
-
- var key string
-
- switch st := args[i].(type) {
- case string:
- key = st
- default:
- key = fmt.Sprintf("%s", st)
- }
- vals[key] = val
- }
- }
-
- encoder := json.NewEncoder(l.writer)
- encoder.SetEscapeHTML(l.jsonEscapeEnabled)
- err := encoder.Encode(vals)
- if err != nil {
- if _, ok := err.(*json.UnsupportedTypeError); ok {
- plainVal := l.jsonMapEntry(t, name, level, msg)
- plainVal["@warn"] = errJsonUnsupportedTypeMsg
-
- errEncoder := json.NewEncoder(l.writer)
- errEncoder.SetEscapeHTML(l.jsonEscapeEnabled)
- errEncoder.Encode(plainVal)
- }
- }
-}
-
-func (l intLogger) jsonMapEntry(t time.Time, name string, level Level, msg string) map[string]interface{} {
- vals := map[string]interface{}{
- "@message": msg,
- }
- if !l.disableTime {
- vals["@timestamp"] = t.Format(l.timeFormat)
- }
-
- var levelStr string
- switch level {
- case Error:
- levelStr = "error"
- case Warn:
- levelStr = "warn"
- case Info:
- levelStr = "info"
- case Debug:
- levelStr = "debug"
- case Trace:
- levelStr = "trace"
- default:
- levelStr = "all"
- }
-
- vals["@level"] = levelStr
-
- if name != "" {
- vals["@module"] = name
- }
-
- if l.callerOffset > 0 {
- if _, file, line, ok := runtime.Caller(l.callerOffset + 1); ok {
- vals["@caller"] = fmt.Sprintf("%s:%d", file, line)
- }
- }
- return vals
-}
-
-// Emit the message and args at the provided level
-func (l *intLogger) Log(level Level, msg string, args ...interface{}) {
- l.log(l.Name(), level, msg, args...)
-}
-
-// Emit the message and args at DEBUG level
-func (l *intLogger) Debug(msg string, args ...interface{}) {
- l.log(l.Name(), Debug, msg, args...)
-}
-
-// Emit the message and args at TRACE level
-func (l *intLogger) Trace(msg string, args ...interface{}) {
- l.log(l.Name(), Trace, msg, args...)
-}
-
-// Emit the message and args at INFO level
-func (l *intLogger) Info(msg string, args ...interface{}) {
- l.log(l.Name(), Info, msg, args...)
-}
-
-// Emit the message and args at WARN level
-func (l *intLogger) Warn(msg string, args ...interface{}) {
- l.log(l.Name(), Warn, msg, args...)
-}
-
-// Emit the message and args at ERROR level
-func (l *intLogger) Error(msg string, args ...interface{}) {
- l.log(l.Name(), Error, msg, args...)
-}
-
-// Indicate that the logger would emit TRACE level logs
-func (l *intLogger) IsTrace() bool {
- return l.GetLevel() == Trace
-}
-
-// Indicate that the logger would emit DEBUG level logs
-func (l *intLogger) IsDebug() bool {
- return l.GetLevel() <= Debug
-}
-
-// Indicate that the logger would emit INFO level logs
-func (l *intLogger) IsInfo() bool {
- return l.GetLevel() <= Info
-}
-
-// Indicate that the logger would emit WARN level logs
-func (l *intLogger) IsWarn() bool {
- return l.GetLevel() <= Warn
-}
-
-// Indicate that the logger would emit ERROR level logs
-func (l *intLogger) IsError() bool {
- return l.GetLevel() <= Error
-}
-
-const MissingKey = "EXTRA_VALUE_AT_END"
-
-// Return a sub-Logger for which every emitted log message will contain
-// the given key/value pairs. This is used to create a context specific
-// Logger.
-func (l *intLogger) With(args ...interface{}) Logger {
- var extra interface{}
-
- if len(args)%2 != 0 {
- extra = args[len(args)-1]
- args = args[:len(args)-1]
- }
-
- sl := l.copy()
-
- result := make(map[string]interface{}, len(l.implied)+len(args))
- keys := make([]string, 0, len(l.implied)+len(args))
-
- // Read existing args, store map and key for consistent sorting
- for i := 0; i < len(l.implied); i += 2 {
- key := l.implied[i].(string)
- keys = append(keys, key)
- result[key] = l.implied[i+1]
- }
- // Read new args, store map and key for consistent sorting
- for i := 0; i < len(args); i += 2 {
- key := args[i].(string)
- _, exists := result[key]
- if !exists {
- keys = append(keys, key)
- }
- result[key] = args[i+1]
- }
-
- // Sort keys to be consistent
- sort.Strings(keys)
-
- sl.implied = make([]interface{}, 0, len(l.implied)+len(args))
- for _, k := range keys {
- sl.implied = append(sl.implied, k)
- sl.implied = append(sl.implied, result[k])
- }
-
- if extra != nil {
- sl.implied = append(sl.implied, MissingKey, extra)
- }
-
- return l.subloggerHook(sl)
-}
-
-// Create a new sub-Logger that a name decending from the current name.
-// This is used to create a subsystem specific Logger.
-func (l *intLogger) Named(name string) Logger {
- sl := l.copy()
-
- if sl.name != "" {
- sl.name = sl.name + "." + name
- } else {
- sl.name = name
- }
-
- return l.subloggerHook(sl)
-}
-
-// Create a new sub-Logger with an explicit name. This ignores the current
-// name. This is used to create a standalone logger that doesn't fall
-// within the normal hierarchy.
-func (l *intLogger) ResetNamed(name string) Logger {
- sl := l.copy()
-
- sl.name = name
-
- return l.subloggerHook(sl)
-}
-
-func (l *intLogger) ResetOutput(opts *LoggerOptions) error {
- if opts.Output == nil {
- return errors.New("given output is nil")
- }
-
- l.mutex.Lock()
- defer l.mutex.Unlock()
-
- return l.resetOutput(opts)
-}
-
-func (l *intLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error {
- if opts.Output == nil {
- return errors.New("given output is nil")
- }
- if flushable == nil {
- return errors.New("flushable is nil")
- }
-
- l.mutex.Lock()
- defer l.mutex.Unlock()
-
- if err := flushable.Flush(); err != nil {
- return err
- }
-
- return l.resetOutput(opts)
-}
-
-func (l *intLogger) resetOutput(opts *LoggerOptions) error {
- l.writer = newWriter(opts.Output, opts.Color)
- l.setColorization(opts)
- return nil
-}
-
-// Update the logging level on-the-fly. This will affect all subloggers as
-// well.
-func (l *intLogger) SetLevel(level Level) {
- if !l.syncParentLevel {
- atomic.StoreInt32(l.level, int32(level))
- return
- }
-
- nsl := new(int32)
- *nsl = int32(level)
-
- l.level = nsl
-
- l.ownEpoch = atomic.AddUint64(l.curEpoch, 1)
- l.setEpoch = l.ownEpoch
-}
-
-func (l *intLogger) searchLevelPtr() *int32 {
- p := l.parent
-
- ptr := l.level
-
- max := l.setEpoch
-
- for p != nil {
- if p.setEpoch > max {
- max = p.setEpoch
- ptr = p.level
- }
-
- p = p.parent
- }
-
- return ptr
-}
-
-// Returns the current level
-func (l *intLogger) GetLevel() Level {
- // We perform the loads immediately to keep the CPU pipeline busy, which
- // effectively makes the second load cost nothing. Once loaded into registers
- // the comparison returns the already loaded value. The comparison is almost
- // always true, so the branch predictor should hit consistently with it.
- var (
- curEpoch = atomic.LoadUint64(l.curEpoch)
- level = Level(atomic.LoadInt32(l.level))
- own = l.ownEpoch
- )
-
- if curEpoch == own {
- return level
- }
-
- // Perform the level sync process. We'll avoid doing this next time by seeing the
- // epoch as current.
-
- ptr := l.searchLevelPtr()
- l.level = ptr
- l.ownEpoch = curEpoch
-
- return Level(atomic.LoadInt32(ptr))
-}
-
-// Create a *log.Logger that will send it's data through this Logger. This
-// allows packages that expect to be using the standard library log to actually
-// use this logger.
-func (l *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger {
- if opts == nil {
- opts = &StandardLoggerOptions{}
- }
-
- return log.New(l.StandardWriter(opts), "", 0)
-}
-
-func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer {
- newLog := *l
- if l.callerOffset > 0 {
- // the stack is
- // logger.printf() -> l.Output() ->l.out.writer(hclog:stdlogAdaptor.write) -> hclog:stdlogAdaptor.dispatch()
- // So plus 4.
- newLog.callerOffset = l.callerOffset + 4
- }
- return &stdlogAdapter{
- log: &newLog,
- inferLevels: opts.InferLevels,
- inferLevelsWithTimestamp: opts.InferLevelsWithTimestamp,
- forceLevel: opts.ForceLevel,
- }
-}
-
-// Accept implements the SinkAdapter interface
-func (i *intLogger) Accept(name string, level Level, msg string, args ...interface{}) {
- i.log(name, level, msg, args...)
-}
-
-// ImpliedArgs returns the loggers implied args
-func (i *intLogger) ImpliedArgs() []interface{} {
- return i.implied
-}
-
-// Name returns the loggers name
-func (i *intLogger) Name() string {
- return i.name
-}
-
-// copy returns a shallow copy of the intLogger, replacing the level pointer
-// when necessary
-func (l *intLogger) copy() *intLogger {
- sl := *l
-
- if l.independentLevels {
- sl.level = new(int32)
- *sl.level = *l.level
- } else if l.syncParentLevel {
- sl.parent = l
- }
-
- return &sl
-}
diff --git a/vendor/github.com/hashicorp/go-hclog/logger.go b/vendor/github.com/hashicorp/go-hclog/logger.go
deleted file mode 100644
index ad17544f..00000000
--- a/vendor/github.com/hashicorp/go-hclog/logger.go
+++ /dev/null
@@ -1,415 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MIT
-
-package hclog
-
-import (
- "io"
- "log"
- "os"
- "strings"
- "time"
-)
-
-var (
- // DefaultOutput is used as the default log output.
- DefaultOutput io.Writer = os.Stderr
-
- // DefaultLevel is used as the default log level.
- DefaultLevel = Info
-)
-
-// Level represents a log level.
-type Level int32
-
-const (
- // NoLevel is a special level used to indicate that no level has been
- // set and allow for a default to be used.
- NoLevel Level = 0
-
- // Trace is the most verbose level. Intended to be used for the tracing
- // of actions in code, such as function enters/exits, etc.
- Trace Level = 1
-
- // Debug information for programmer low-level analysis.
- Debug Level = 2
-
- // Info information about steady state operations.
- Info Level = 3
-
- // Warn information about rare but handled events.
- Warn Level = 4
-
- // Error information about unrecoverable events.
- Error Level = 5
-
- // Off disables all logging output.
- Off Level = 6
-)
-
-// Format is a simple convenience type for when formatting is required. When
-// processing a value of this type, the logger automatically treats the first
-// argument as a Printf formatting string and passes the rest as the values
-// to be formatted. For example: L.Info(Fmt{"%d beans/day", beans}).
-type Format []interface{}
-
-// Fmt returns a Format type. This is a convenience function for creating a Format
-// type.
-func Fmt(str string, args ...interface{}) Format {
- return append(Format{str}, args...)
-}
-
-// A simple shortcut to format numbers in hex when displayed with the normal
-// text output. For example: L.Info("header value", Hex(17))
-type Hex int
-
-// A simple shortcut to format numbers in octal when displayed with the normal
-// text output. For example: L.Info("perms", Octal(17))
-type Octal int
-
-// A simple shortcut to format numbers in binary when displayed with the normal
-// text output. For example: L.Info("bits", Binary(17))
-type Binary int
-
-// A simple shortcut to format strings with Go quoting. Control and
-// non-printable characters will be escaped with their backslash equivalents in
-// output. Intended for untrusted or multiline strings which should be logged
-// as concisely as possible.
-type Quote string
-
-// ColorOption expresses how the output should be colored, if at all.
-type ColorOption uint8
-
-const (
- // ColorOff is the default coloration, and does not
- // inject color codes into the io.Writer.
- ColorOff ColorOption = iota
- // AutoColor checks if the io.Writer is a tty,
- // and if so enables coloring.
- AutoColor
- // ForceColor will enable coloring, regardless of whether
- // the io.Writer is a tty or not.
- ForceColor
-)
-
-// SupportsColor is an optional interface that can be implemented by the output
-// value. If implemented and SupportsColor() returns true, then AutoColor will
-// enable colorization.
-type SupportsColor interface {
- SupportsColor() bool
-}
-
-// LevelFromString returns a Level type for the named log level, or "NoLevel" if
-// the level string is invalid. This facilitates setting the log level via
-// config or environment variable by name in a predictable way.
-func LevelFromString(levelStr string) Level {
- // We don't care about case. Accept both "INFO" and "info".
- levelStr = strings.ToLower(strings.TrimSpace(levelStr))
- switch levelStr {
- case "trace":
- return Trace
- case "debug":
- return Debug
- case "info":
- return Info
- case "warn":
- return Warn
- case "error":
- return Error
- case "off":
- return Off
- default:
- return NoLevel
- }
-}
-
-func (l Level) String() string {
- switch l {
- case Trace:
- return "trace"
- case Debug:
- return "debug"
- case Info:
- return "info"
- case Warn:
- return "warn"
- case Error:
- return "error"
- case NoLevel:
- return "none"
- case Off:
- return "off"
- default:
- return "unknown"
- }
-}
-
-// Logger describes the interface that must be implemented by all loggers.
-type Logger interface {
- // Args are alternating key, val pairs
- // keys must be strings
- // vals can be any type, but display is implementation specific
- // Emit a message and key/value pairs at a provided log level
- Log(level Level, msg string, args ...interface{})
-
- // Emit a message and key/value pairs at the TRACE level
- Trace(msg string, args ...interface{})
-
- // Emit a message and key/value pairs at the DEBUG level
- Debug(msg string, args ...interface{})
-
- // Emit a message and key/value pairs at the INFO level
- Info(msg string, args ...interface{})
-
- // Emit a message and key/value pairs at the WARN level
- Warn(msg string, args ...interface{})
-
- // Emit a message and key/value pairs at the ERROR level
- Error(msg string, args ...interface{})
-
- // Indicate if TRACE logs would be emitted. This and the other Is* guards
- // are used to elide expensive logging code based on the current level.
- IsTrace() bool
-
- // Indicate if DEBUG logs would be emitted. This and the other Is* guards
- IsDebug() bool
-
- // Indicate if INFO logs would be emitted. This and the other Is* guards
- IsInfo() bool
-
- // Indicate if WARN logs would be emitted. This and the other Is* guards
- IsWarn() bool
-
- // Indicate if ERROR logs would be emitted. This and the other Is* guards
- IsError() bool
-
- // ImpliedArgs returns With key/value pairs
- ImpliedArgs() []interface{}
-
- // Creates a sublogger that will always have the given key/value pairs
- With(args ...interface{}) Logger
-
- // Returns the Name of the logger
- Name() string
-
- // Create a logger that will prepend the name string on the front of all messages.
- // If the logger already has a name, the new value will be appended to the current
- // name. That way, a major subsystem can use this to decorate all it's own logs
- // without losing context.
- Named(name string) Logger
-
- // Create a logger that will prepend the name string on the front of all messages.
- // This sets the name of the logger to the value directly, unlike Named which honor
- // the current name as well.
- ResetNamed(name string) Logger
-
- // Updates the level. This should affect all related loggers as well,
- // unless they were created with IndependentLevels. If an
- // implementation cannot update the level on the fly, it should no-op.
- SetLevel(level Level)
-
- // Returns the current level
- GetLevel() Level
-
- // Return a value that conforms to the stdlib log.Logger interface
- StandardLogger(opts *StandardLoggerOptions) *log.Logger
-
- // Return a value that conforms to io.Writer, which can be passed into log.SetOutput()
- StandardWriter(opts *StandardLoggerOptions) io.Writer
-}
-
-// StandardLoggerOptions can be used to configure a new standard logger.
-type StandardLoggerOptions struct {
- // Indicate that some minimal parsing should be done on strings to try
- // and detect their level and re-emit them.
- // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO],
- // [DEBUG] and strip it off before reapplying it.
- InferLevels bool
-
- // Indicate that some minimal parsing should be done on strings to try
- // and detect their level and re-emit them while ignoring possible
- // timestamp values in the beginning of the string.
- // This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO],
- // [DEBUG] and strip it off before reapplying it.
- // The timestamp detection may result in false positives and incomplete
- // string outputs.
- // InferLevelsWithTimestamp is only relevant if InferLevels is true.
- InferLevelsWithTimestamp bool
-
- // ForceLevel is used to force all output from the standard logger to be at
- // the specified level. Similar to InferLevels, this will strip any level
- // prefix contained in the logged string before applying the forced level.
- // If set, this override InferLevels.
- ForceLevel Level
-}
-
-type TimeFunction = func() time.Time
-
-// LoggerOptions can be used to configure a new logger.
-type LoggerOptions struct {
- // Name of the subsystem to prefix logs with
- Name string
-
- // The threshold for the logger. Anything less severe is suppressed
- Level Level
-
- // Where to write the logs to. Defaults to os.Stderr if nil
- Output io.Writer
-
- // An optional Locker in case Output is shared. This can be a sync.Mutex or
- // a NoopLocker if the caller wants control over output, e.g. for batching
- // log lines.
- Mutex Locker
-
- // Control if the output should be in JSON.
- JSONFormat bool
-
- // Control the escape switch of json.Encoder
- JSONEscapeDisabled bool
-
- // Include file and line information in each log line
- IncludeLocation bool
-
- // AdditionalLocationOffset is the number of additional stack levels to skip
- // when finding the file and line information for the log line
- AdditionalLocationOffset int
-
- // The time format to use instead of the default
- TimeFormat string
-
- // A function which is called to get the time object that is formatted using `TimeFormat`
- TimeFn TimeFunction
-
- // Control whether or not to display the time at all. This is required
- // because setting TimeFormat to empty assumes the default format.
- DisableTime bool
-
- // Color the output. On Windows, colored logs are only available for io.Writers that
- // are concretely instances of *os.File.
- Color ColorOption
-
- // Only color the header, not the body. This can help with readability of long messages.
- ColorHeaderOnly bool
-
- // Color the header and message body fields. This can help with readability
- // of long messages with multiple fields.
- ColorHeaderAndFields bool
-
- // A function which is called with the log information and if it returns true the value
- // should not be logged.
- // This is useful when interacting with a system that you wish to suppress the log
- // message for (because it's too noisy, etc)
- Exclude func(level Level, msg string, args ...interface{}) bool
-
- // IndependentLevels causes subloggers to be created with an independent
- // copy of this logger's level. This means that using SetLevel on this
- // logger will not affect any subloggers, and SetLevel on any subloggers
- // will not affect the parent or sibling loggers.
- IndependentLevels bool
-
- // When set, changing the level of a logger effects only it's direct sub-loggers
- // rather than all sub-loggers. For example:
- // a := logger.Named("a")
- // a.SetLevel(Error)
- // b := a.Named("b")
- // c := a.Named("c")
- // b.GetLevel() => Error
- // c.GetLevel() => Error
- // b.SetLevel(Info)
- // a.GetLevel() => Error
- // b.GetLevel() => Info
- // c.GetLevel() => Error
- // a.SetLevel(Warn)
- // a.GetLevel() => Warn
- // b.GetLevel() => Warn
- // c.GetLevel() => Warn
- SyncParentLevel bool
-
- // SubloggerHook registers a function that is called when a sublogger via
- // Named, With, or ResetNamed is created. If defined, the function is passed
- // the newly created Logger and the returned Logger is returned from the
- // original function. This option allows customization via interception and
- // wrapping of Logger instances.
- SubloggerHook func(sub Logger) Logger
-}
-
-// InterceptLogger describes the interface for using a logger
-// that can register different output sinks.
-// This is useful for sending lower level log messages
-// to a different output while keeping the root logger
-// at a higher one.
-type InterceptLogger interface {
- // Logger is the root logger for an InterceptLogger
- Logger
-
- // RegisterSink adds a SinkAdapter to the InterceptLogger
- RegisterSink(sink SinkAdapter)
-
- // DeregisterSink removes a SinkAdapter from the InterceptLogger
- DeregisterSink(sink SinkAdapter)
-
- // Create a interceptlogger that will prepend the name string on the front of all messages.
- // If the logger already has a name, the new value will be appended to the current
- // name. That way, a major subsystem can use this to decorate all it's own logs
- // without losing context.
- NamedIntercept(name string) InterceptLogger
-
- // Create a interceptlogger that will prepend the name string on the front of all messages.
- // This sets the name of the logger to the value directly, unlike Named which honor
- // the current name as well.
- ResetNamedIntercept(name string) InterceptLogger
-
- // Deprecated: use StandardLogger
- StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger
-
- // Deprecated: use StandardWriter
- StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer
-}
-
-// SinkAdapter describes the interface that must be implemented
-// in order to Register a new sink to an InterceptLogger
-type SinkAdapter interface {
- Accept(name string, level Level, msg string, args ...interface{})
-}
-
-// Flushable represents a method for flushing an output buffer. It can be used
-// if Resetting the log to use a new output, in order to flush the writes to
-// the existing output beforehand.
-type Flushable interface {
- Flush() error
-}
-
-// OutputResettable provides ways to swap the output in use at runtime
-type OutputResettable interface {
- // ResetOutput swaps the current output writer with the one given in the
- // opts. Color options given in opts will be used for the new output.
- ResetOutput(opts *LoggerOptions) error
-
- // ResetOutputWithFlush swaps the current output writer with the one given
- // in the opts, first calling Flush on the given Flushable. Color options
- // given in opts will be used for the new output.
- ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error
-}
-
-// Locker is used for locking output. If not set when creating a logger, a
-// sync.Mutex will be used internally.
-type Locker interface {
- // Lock is called when the output is going to be changed or written to
- Lock()
-
- // Unlock is called when the operation that called Lock() completes
- Unlock()
-}
-
-// NoopLocker implements locker but does nothing. This is useful if the client
-// wants tight control over locking, in order to provide grouping of log
-// entries or other functionality.
-type NoopLocker struct{}
-
-// Lock does nothing
-func (n NoopLocker) Lock() {}
-
-// Unlock does nothing
-func (n NoopLocker) Unlock() {}
-
-var _ Locker = (*NoopLocker)(nil)
diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go
deleted file mode 100644
index d43da809..00000000
--- a/vendor/github.com/hashicorp/go-hclog/nulllogger.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MIT
-
-package hclog
-
-import (
- "io"
- "io/ioutil"
- "log"
-)
-
-// NewNullLogger instantiates a Logger for which all calls
-// will succeed without doing anything.
-// Useful for testing purposes.
-func NewNullLogger() Logger {
- return &nullLogger{}
-}
-
-type nullLogger struct{}
-
-func (l *nullLogger) Log(level Level, msg string, args ...interface{}) {}
-
-func (l *nullLogger) Trace(msg string, args ...interface{}) {}
-
-func (l *nullLogger) Debug(msg string, args ...interface{}) {}
-
-func (l *nullLogger) Info(msg string, args ...interface{}) {}
-
-func (l *nullLogger) Warn(msg string, args ...interface{}) {}
-
-func (l *nullLogger) Error(msg string, args ...interface{}) {}
-
-func (l *nullLogger) IsTrace() bool { return false }
-
-func (l *nullLogger) IsDebug() bool { return false }
-
-func (l *nullLogger) IsInfo() bool { return false }
-
-func (l *nullLogger) IsWarn() bool { return false }
-
-func (l *nullLogger) IsError() bool { return false }
-
-func (l *nullLogger) ImpliedArgs() []interface{} { return []interface{}{} }
-
-func (l *nullLogger) With(args ...interface{}) Logger { return l }
-
-func (l *nullLogger) Name() string { return "" }
-
-func (l *nullLogger) Named(name string) Logger { return l }
-
-func (l *nullLogger) ResetNamed(name string) Logger { return l }
-
-func (l *nullLogger) SetLevel(level Level) {}
-
-func (l *nullLogger) GetLevel() Level { return NoLevel }
-
-func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger {
- return log.New(l.StandardWriter(opts), "", log.LstdFlags)
-}
-
-func (l *nullLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer {
- return ioutil.Discard
-}
diff --git a/vendor/github.com/hashicorp/go-hclog/stacktrace.go b/vendor/github.com/hashicorp/go-hclog/stacktrace.go
deleted file mode 100644
index 9b27bd3d..00000000
--- a/vendor/github.com/hashicorp/go-hclog/stacktrace.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package hclog
-
-import (
- "bytes"
- "runtime"
- "strconv"
- "strings"
- "sync"
-)
-
-var (
- _stacktraceIgnorePrefixes = []string{
- "runtime.goexit",
- "runtime.main",
- }
- _stacktracePool = sync.Pool{
- New: func() interface{} {
- return newProgramCounters(64)
- },
- }
-)
-
-// CapturedStacktrace represents a stacktrace captured by a previous call
-// to log.Stacktrace. If passed to a logging function, the stacktrace
-// will be appended.
-type CapturedStacktrace string
-
-// Stacktrace captures a stacktrace of the current goroutine and returns
-// it to be passed to a logging function.
-func Stacktrace() CapturedStacktrace {
- return CapturedStacktrace(takeStacktrace())
-}
-
-func takeStacktrace() string {
- programCounters := _stacktracePool.Get().(*programCounters)
- defer _stacktracePool.Put(programCounters)
-
- var buffer bytes.Buffer
-
- for {
- // Skip the call to runtime.Counters and takeStacktrace so that the
- // program counters start at the caller of takeStacktrace.
- n := runtime.Callers(2, programCounters.pcs)
- if n < cap(programCounters.pcs) {
- programCounters.pcs = programCounters.pcs[:n]
- break
- }
- // Don't put the too-short counter slice back into the pool; this lets
- // the pool adjust if we consistently take deep stacktraces.
- programCounters = newProgramCounters(len(programCounters.pcs) * 2)
- }
-
- i := 0
- frames := runtime.CallersFrames(programCounters.pcs)
- for frame, more := frames.Next(); more; frame, more = frames.Next() {
- if shouldIgnoreStacktraceFunction(frame.Function) {
- continue
- }
- if i != 0 {
- buffer.WriteByte('\n')
- }
- i++
- buffer.WriteString(frame.Function)
- buffer.WriteByte('\n')
- buffer.WriteByte('\t')
- buffer.WriteString(frame.File)
- buffer.WriteByte(':')
- buffer.WriteString(strconv.Itoa(int(frame.Line)))
- }
-
- return buffer.String()
-}
-
-func shouldIgnoreStacktraceFunction(function string) bool {
- for _, prefix := range _stacktraceIgnorePrefixes {
- if strings.HasPrefix(function, prefix) {
- return true
- }
- }
- return false
-}
-
-type programCounters struct {
- pcs []uintptr
-}
-
-func newProgramCounters(size int) *programCounters {
- return &programCounters{make([]uintptr, size)}
-}
diff --git a/vendor/github.com/hashicorp/go-hclog/stdlog.go b/vendor/github.com/hashicorp/go-hclog/stdlog.go
deleted file mode 100644
index 03739b61..00000000
--- a/vendor/github.com/hashicorp/go-hclog/stdlog.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MIT
-
-package hclog
-
-import (
- "bytes"
- "log"
- "regexp"
- "strings"
-)
-
-// Regex to ignore characters commonly found in timestamp formats from the
-// beginning of inputs.
-var logTimestampRegexp = regexp.MustCompile(`^[\d\s\:\/\.\+-TZ]*`)
-
-// Provides a io.Writer to shim the data out of *log.Logger
-// and back into our Logger. This is basically the only way to
-// build upon *log.Logger.
-type stdlogAdapter struct {
- log Logger
- inferLevels bool
- inferLevelsWithTimestamp bool
- forceLevel Level
-}
-
-// Take the data, infer the levels if configured, and send it through
-// a regular Logger.
-func (s *stdlogAdapter) Write(data []byte) (int, error) {
- str := string(bytes.TrimRight(data, " \t\n"))
-
- if s.forceLevel != NoLevel {
- // Use pickLevel to strip log levels included in the line since we are
- // forcing the level
- _, str := s.pickLevel(str)
-
- // Log at the forced level
- s.dispatch(str, s.forceLevel)
- } else if s.inferLevels {
- if s.inferLevelsWithTimestamp {
- str = s.trimTimestamp(str)
- }
-
- level, str := s.pickLevel(str)
- s.dispatch(str, level)
- } else {
- s.log.Info(str)
- }
-
- return len(data), nil
-}
-
-func (s *stdlogAdapter) dispatch(str string, level Level) {
- switch level {
- case Trace:
- s.log.Trace(str)
- case Debug:
- s.log.Debug(str)
- case Info:
- s.log.Info(str)
- case Warn:
- s.log.Warn(str)
- case Error:
- s.log.Error(str)
- default:
- s.log.Info(str)
- }
-}
-
-// Detect, based on conventions, what log level this is.
-func (s *stdlogAdapter) pickLevel(str string) (Level, string) {
- switch {
- case strings.HasPrefix(str, "[DEBUG]"):
- return Debug, strings.TrimSpace(str[7:])
- case strings.HasPrefix(str, "[TRACE]"):
- return Trace, strings.TrimSpace(str[7:])
- case strings.HasPrefix(str, "[INFO]"):
- return Info, strings.TrimSpace(str[6:])
- case strings.HasPrefix(str, "[WARN]"):
- return Warn, strings.TrimSpace(str[6:])
- case strings.HasPrefix(str, "[ERROR]"):
- return Error, strings.TrimSpace(str[7:])
- case strings.HasPrefix(str, "[ERR]"):
- return Error, strings.TrimSpace(str[5:])
- default:
- return Info, str
- }
-}
-
-func (s *stdlogAdapter) trimTimestamp(str string) string {
- idx := logTimestampRegexp.FindStringIndex(str)
- return str[idx[1]:]
-}
-
-type logWriter struct {
- l *log.Logger
-}
-
-func (l *logWriter) Write(b []byte) (int, error) {
- l.l.Println(string(bytes.TrimRight(b, " \n\t")))
- return len(b), nil
-}
-
-// Takes a standard library logger and returns a Logger that will write to it
-func FromStandardLogger(l *log.Logger, opts *LoggerOptions) Logger {
- var dl LoggerOptions = *opts
-
- // Use the time format that log.Logger uses
- dl.DisableTime = true
- dl.Output = &logWriter{l}
-
- return New(&dl)
-}
diff --git a/vendor/github.com/hashicorp/go-hclog/writer.go b/vendor/github.com/hashicorp/go-hclog/writer.go
deleted file mode 100644
index 4ee219bf..00000000
--- a/vendor/github.com/hashicorp/go-hclog/writer.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MIT
-
-package hclog
-
-import (
- "bytes"
- "io"
-)
-
-type writer struct {
- b bytes.Buffer
- w io.Writer
- color ColorOption
-}
-
-func newWriter(w io.Writer, color ColorOption) *writer {
- return &writer{w: w, color: color}
-}
-
-func (w *writer) Flush(level Level) (err error) {
- var unwritten = w.b.Bytes()
-
- if w.color != ColorOff {
- color := _levelToColor[level]
- unwritten = []byte(color.Sprintf("%s", unwritten))
- }
-
- if lw, ok := w.w.(LevelWriter); ok {
- _, err = lw.LevelWrite(level, unwritten)
- } else {
- _, err = w.w.Write(unwritten)
- }
- w.b.Reset()
- return err
-}
-
-func (w *writer) Write(p []byte) (int, error) {
- return w.b.Write(p)
-}
-
-func (w *writer) WriteByte(c byte) error {
- return w.b.WriteByte(c)
-}
-
-func (w *writer) WriteString(s string) (int, error) {
- return w.b.WriteString(s)
-}
-
-// LevelWriter is the interface that wraps the LevelWrite method.
-type LevelWriter interface {
- LevelWrite(level Level, p []byte) (n int, err error)
-}
-
-// LeveledWriter writes all log messages to the standard writer,
-// except for log levels that are defined in the overrides map.
-type LeveledWriter struct {
- standard io.Writer
- overrides map[Level]io.Writer
-}
-
-// NewLeveledWriter returns an initialized LeveledWriter.
-//
-// standard will be used as the default writer for all log levels,
-// except for log levels that are defined in the overrides map.
-func NewLeveledWriter(standard io.Writer, overrides map[Level]io.Writer) *LeveledWriter {
- return &LeveledWriter{
- standard: standard,
- overrides: overrides,
- }
-}
-
-// Write implements io.Writer.
-func (lw *LeveledWriter) Write(p []byte) (int, error) {
- return lw.standard.Write(p)
-}
-
-// LevelWrite implements LevelWriter.
-func (lw *LeveledWriter) LevelWrite(level Level, p []byte) (int, error) {
- w, ok := lw.overrides[level]
- if !ok {
- w = lw.standard
- }
- return w.Write(p)
-}
diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE
deleted file mode 100644
index 82b4de97..00000000
--- a/vendor/github.com/hashicorp/go-multierror/LICENSE
+++ /dev/null
@@ -1,353 +0,0 @@
-Mozilla Public License, version 2.0
-
-1. Definitions
-
-1.1. “Contributor”
-
- means each individual or legal entity that creates, contributes to the
- creation of, or owns Covered Software.
-
-1.2. “Contributor Version”
-
- means the combination of the Contributions of others (if any) used by a
- Contributor and that particular Contributor’s Contribution.
-
-1.3. “Contribution”
-
- means Covered Software of a particular Contributor.
-
-1.4. “Covered Software”
-
- means Source Code Form to which the initial Contributor has attached the
- notice in Exhibit A, the Executable Form of such Source Code Form, and
- Modifications of such Source Code Form, in each case including portions
- thereof.
-
-1.5. “Incompatible With Secondary Licenses”
- means
-
- a. that the initial Contributor has attached the notice described in
- Exhibit B to the Covered Software; or
-
- b. that the Covered Software was made available under the terms of version
- 1.1 or earlier of the License, but not also under the terms of a
- Secondary License.
-
-1.6. “Executable Form”
-
- means any form of the work other than Source Code Form.
-
-1.7. “Larger Work”
-
- means a work that combines Covered Software with other material, in a separate
- file or files, that is not Covered Software.
-
-1.8. “License”
-
- means this document.
-
-1.9. “Licensable”
-
- means having the right to grant, to the maximum extent possible, whether at the
- time of the initial grant or subsequently, any and all of the rights conveyed by
- this License.
-
-1.10. “Modifications”
-
- means any of the following:
-
- a. any file in Source Code Form that results from an addition to, deletion
- from, or modification of the contents of Covered Software; or
-
- b. any new file in Source Code Form that contains any Covered Software.
-
-1.11. “Patent Claims” of a Contributor
-
- means any patent claim(s), including without limitation, method, process,
- and apparatus claims, in any patent Licensable by such Contributor that
- would be infringed, but for the grant of the License, by the making,
- using, selling, offering for sale, having made, import, or transfer of
- either its Contributions or its Contributor Version.
-
-1.12. “Secondary License”
-
- means either the GNU General Public License, Version 2.0, the GNU Lesser
- General Public License, Version 2.1, the GNU Affero General Public
- License, Version 3.0, or any later versions of those licenses.
-
-1.13. “Source Code Form”
-
- means the form of the work preferred for making modifications.
-
-1.14. “You” (or “Your”)
-
- means an individual or a legal entity exercising rights under this
- License. For legal entities, “You” includes any entity that controls, is
- controlled by, or is under common control with You. For purposes of this
- definition, “control” means (a) the power, direct or indirect, to cause
- the direction or management of such entity, whether by contract or
- otherwise, or (b) ownership of more than fifty percent (50%) of the
- outstanding shares or beneficial ownership of such entity.
-
-
-2. License Grants and Conditions
-
-2.1. Grants
-
- Each Contributor hereby grants You a world-wide, royalty-free,
- non-exclusive license:
-
- a. under intellectual property rights (other than patent or trademark)
- Licensable by such Contributor to use, reproduce, make available,
- modify, display, perform, distribute, and otherwise exploit its
- Contributions, either on an unmodified basis, with Modifications, or as
- part of a Larger Work; and
-
- b. under Patent Claims of such Contributor to make, use, sell, offer for
- sale, have made, import, and otherwise transfer either its Contributions
- or its Contributor Version.
-
-2.2. Effective Date
-
- The licenses granted in Section 2.1 with respect to any Contribution become
- effective for each Contribution on the date the Contributor first distributes
- such Contribution.
-
-2.3. Limitations on Grant Scope
-
- The licenses granted in this Section 2 are the only rights granted under this
- License. No additional rights or licenses will be implied from the distribution
- or licensing of Covered Software under this License. Notwithstanding Section
- 2.1(b) above, no patent license is granted by a Contributor:
-
- a. for any code that a Contributor has removed from Covered Software; or
-
- b. for infringements caused by: (i) Your and any other third party’s
- modifications of Covered Software, or (ii) the combination of its
- Contributions with other software (except as part of its Contributor
- Version); or
-
- c. under Patent Claims infringed by Covered Software in the absence of its
- Contributions.
-
- This License does not grant any rights in the trademarks, service marks, or
- logos of any Contributor (except as may be necessary to comply with the
- notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
- No Contributor makes additional grants as a result of Your choice to
- distribute the Covered Software under a subsequent version of this License
- (see Section 10.2) or under the terms of a Secondary License (if permitted
- under the terms of Section 3.3).
-
-2.5. Representation
-
- Each Contributor represents that the Contributor believes its Contributions
- are its original creation(s) or it has sufficient rights to grant the
- rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
- This License is not intended to limit any rights You have under applicable
- copyright doctrines of fair use, fair dealing, or other equivalents.
-
-2.7. Conditions
-
- Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
- Section 2.1.
-
-
-3. Responsibilities
-
-3.1. Distribution of Source Form
-
- All distribution of Covered Software in Source Code Form, including any
- Modifications that You create or to which You contribute, must be under the
- terms of this License. You must inform recipients that the Source Code Form
- of the Covered Software is governed by the terms of this License, and how
- they can obtain a copy of this License. You may not attempt to alter or
- restrict the recipients’ rights in the Source Code Form.
-
-3.2. Distribution of Executable Form
-
- If You distribute Covered Software in Executable Form then:
-
- a. such Covered Software must also be made available in Source Code Form,
- as described in Section 3.1, and You must inform recipients of the
- Executable Form how they can obtain a copy of such Source Code Form by
- reasonable means in a timely manner, at a charge no more than the cost
- of distribution to the recipient; and
-
- b. You may distribute such Executable Form under the terms of this License,
- or sublicense it under different terms, provided that the license for
- the Executable Form does not attempt to limit or alter the recipients’
- rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
- You may create and distribute a Larger Work under terms of Your choice,
- provided that You also comply with the requirements of this License for the
- Covered Software. If the Larger Work is a combination of Covered Software
- with a work governed by one or more Secondary Licenses, and the Covered
- Software is not Incompatible With Secondary Licenses, this License permits
- You to additionally distribute such Covered Software under the terms of
- such Secondary License(s), so that the recipient of the Larger Work may, at
- their option, further distribute the Covered Software under the terms of
- either this License or such Secondary License(s).
-
-3.4. Notices
-
- You may not remove or alter the substance of any license notices (including
- copyright notices, patent notices, disclaimers of warranty, or limitations
- of liability) contained within the Source Code Form of the Covered
- Software, except that You may alter any license notices to the extent
- required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
- You may choose to offer, and to charge a fee for, warranty, support,
- indemnity or liability obligations to one or more recipients of Covered
- Software. However, You may do so only on Your own behalf, and not on behalf
- of any Contributor. You must make it absolutely clear that any such
- warranty, support, indemnity, or liability obligation is offered by You
- alone, and You hereby agree to indemnify every Contributor for any
- liability incurred by such Contributor as a result of warranty, support,
- indemnity or liability terms You offer. You may include additional
- disclaimers of warranty and limitations of liability specific to any
- jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
-
- If it is impossible for You to comply with any of the terms of this License
- with respect to some or all of the Covered Software due to statute, judicial
- order, or regulation then You must: (a) comply with the terms of this License
- to the maximum extent possible; and (b) describe the limitations and the code
- they affect. Such description must be placed in a text file included with all
- distributions of the Covered Software under this License. Except to the
- extent prohibited by statute or regulation, such description must be
- sufficiently detailed for a recipient of ordinary skill to be able to
- understand it.
-
-5. Termination
-
-5.1. The rights granted under this License will terminate automatically if You
- fail to comply with any of its terms. However, if You become compliant,
- then the rights granted under this License from a particular Contributor
- are reinstated (a) provisionally, unless and until such Contributor
- explicitly and finally terminates Your grants, and (b) on an ongoing basis,
- if such Contributor fails to notify You of the non-compliance by some
- reasonable means prior to 60 days after You have come back into compliance.
- Moreover, Your grants from a particular Contributor are reinstated on an
- ongoing basis if such Contributor notifies You of the non-compliance by
- some reasonable means, this is the first time You have received notice of
- non-compliance with this License from such Contributor, and You become
- compliant prior to 30 days after Your receipt of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
- infringement claim (excluding declaratory judgment actions, counter-claims,
- and cross-claims) alleging that a Contributor Version directly or
- indirectly infringes any patent, then the rights granted to You by any and
- all Contributors for the Covered Software under Section 2.1 of this License
- shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
- license agreements (excluding distributors and resellers) which have been
- validly granted by You or Your distributors under this License prior to
- termination shall survive termination.
-
-6. Disclaimer of Warranty
-
- Covered Software is provided under this License on an “as is” basis, without
- warranty of any kind, either expressed, implied, or statutory, including,
- without limitation, warranties that the Covered Software is free of defects,
- merchantable, fit for a particular purpose or non-infringing. The entire
- risk as to the quality and performance of the Covered Software is with You.
- Should any Covered Software prove defective in any respect, You (not any
- Contributor) assume the cost of any necessary servicing, repair, or
- correction. This disclaimer of warranty constitutes an essential part of this
- License. No use of any Covered Software is authorized under this License
- except under this disclaimer.
-
-7. Limitation of Liability
-
- Under no circumstances and under no legal theory, whether tort (including
- negligence), contract, or otherwise, shall any Contributor, or anyone who
- distributes Covered Software as permitted above, be liable to You for any
- direct, indirect, special, incidental, or consequential damages of any
- character including, without limitation, damages for lost profits, loss of
- goodwill, work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses, even if such party shall have been
- informed of the possibility of such damages. This limitation of liability
- shall not apply to liability for death or personal injury resulting from such
- party’s negligence to the extent applicable law prohibits such limitation.
- Some jurisdictions do not allow the exclusion or limitation of incidental or
- consequential damages, so this exclusion and limitation may not apply to You.
-
-8. Litigation
-
- Any litigation relating to this License may be brought only in the courts of
- a jurisdiction where the defendant maintains its principal place of business
- and such litigation shall be governed by laws of that jurisdiction, without
- reference to its conflict-of-law provisions. Nothing in this Section shall
- prevent a party’s ability to bring cross-claims or counter-claims.
-
-9. Miscellaneous
-
- This License represents the complete agreement concerning the subject matter
- hereof. If any provision of this License is held to be unenforceable, such
- provision shall be reformed only to the extent necessary to make it
- enforceable. Any law or regulation which provides that the language of a
- contract shall be construed against the drafter shall not be used to construe
- this License against a Contributor.
-
-
-10. Versions of the License
-
-10.1. New Versions
-
- Mozilla Foundation is the license steward. Except as provided in Section
- 10.3, no one other than the license steward has the right to modify or
- publish new versions of this License. Each version will be given a
- distinguishing version number.
-
-10.2. Effect of New Versions
-
- You may distribute the Covered Software under the terms of the version of
- the License under which You originally received the Covered Software, or
- under the terms of any subsequent version published by the license
- steward.
-
-10.3. Modified Versions
-
- If you create software not governed by this License, and you want to
- create a new license for such software, you may create and use a modified
- version of this License if you rename the license and remove any
- references to the name of the license steward (except to note that such
- modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
- If You choose to distribute Source Code Form that is Incompatible With
- Secondary Licenses under the terms of this version of the License, the
- notice described in Exhibit B of this License must be attached.
-
-Exhibit A - Source Code Form License Notice
-
- This Source Code Form is subject to the
- terms of the Mozilla Public License, v.
- 2.0. If a copy of the MPL was not
- distributed with this file, You can
- obtain one at
- http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular file, then
-You may include the notice in a location (such as a LICENSE file in a relevant
-directory) where a recipient would be likely to look for such a notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - “Incompatible With Secondary Licenses” Notice
-
- This Source Code Form is “Incompatible
- With Secondary Licenses”, as defined by
- the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/go-multierror/Makefile b/vendor/github.com/hashicorp/go-multierror/Makefile
deleted file mode 100644
index b97cd6ed..00000000
--- a/vendor/github.com/hashicorp/go-multierror/Makefile
+++ /dev/null
@@ -1,31 +0,0 @@
-TEST?=./...
-
-default: test
-
-# test runs the test suite and vets the code.
-test: generate
- @echo "==> Running tests..."
- @go list $(TEST) \
- | grep -v "/vendor/" \
- | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS}
-
-# testrace runs the race checker
-testrace: generate
- @echo "==> Running tests (race)..."
- @go list $(TEST) \
- | grep -v "/vendor/" \
- | xargs -n1 go test -timeout=60s -race ${TESTARGS}
-
-# updatedeps installs all the dependencies needed to run and build.
-updatedeps:
- @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'"
-
-# generate runs `go generate` to build the dynamically generated source files.
-generate:
- @echo "==> Generating..."
- @find . -type f -name '.DS_Store' -delete
- @go list ./... \
- | grep -v "/vendor/" \
- | xargs -n1 go generate
-
-.PHONY: default test testrace updatedeps generate
diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md
deleted file mode 100644
index 71dd308e..00000000
--- a/vendor/github.com/hashicorp/go-multierror/README.md
+++ /dev/null
@@ -1,150 +0,0 @@
-# go-multierror
-
-[](https://circleci.com/gh/hashicorp/go-multierror)
-[](https://pkg.go.dev/github.com/hashicorp/go-multierror)
-
-
-[circleci]: https://app.circleci.com/pipelines/github/hashicorp/go-multierror
-[godocs]: https://pkg.go.dev/github.com/hashicorp/go-multierror
-
-`go-multierror` is a package for Go that provides a mechanism for
-representing a list of `error` values as a single `error`.
-
-This allows a function in Go to return an `error` that might actually
-be a list of errors. If the caller knows this, they can unwrap the
-list and access the errors. If the caller doesn't know, the error
-formats to a nice human-readable format.
-
-`go-multierror` is fully compatible with the Go standard library
-[errors](https://golang.org/pkg/errors/) package, including the
-functions `As`, `Is`, and `Unwrap`. This provides a standardized approach
-for introspecting on error values.
-
-## Installation and Docs
-
-Install using `go get github.com/hashicorp/go-multierror`.
-
-Full documentation is available at
-https://pkg.go.dev/github.com/hashicorp/go-multierror
-
-### Requires go version 1.13 or newer
-
-`go-multierror` requires go version 1.13 or newer. Go 1.13 introduced
-[error wrapping](https://golang.org/doc/go1.13#error_wrapping), which
-this library takes advantage of.
-
-If you need to use an earlier version of go, you can use the
-[v1.0.0](https://github.com/hashicorp/go-multierror/tree/v1.0.0)
-tag, which doesn't rely on features in go 1.13.
-
-If you see compile errors that look like the below, it's likely that
-you're on an older version of go:
-
-```
-/go/src/github.com/hashicorp/go-multierror/multierror.go:112:9: undefined: errors.As
-/go/src/github.com/hashicorp/go-multierror/multierror.go:117:9: undefined: errors.Is
-```
-
-## Usage
-
-go-multierror is easy to use and purposely built to be unobtrusive in
-existing Go applications/libraries that may not be aware of it.
-
-**Building a list of errors**
-
-The `Append` function is used to create a list of errors. This function
-behaves a lot like the Go built-in `append` function: it doesn't matter
-if the first argument is nil, a `multierror.Error`, or any other `error`,
-the function behaves as you would expect.
-
-```go
-var result error
-
-if err := step1(); err != nil {
- result = multierror.Append(result, err)
-}
-if err := step2(); err != nil {
- result = multierror.Append(result, err)
-}
-
-return result
-```
-
-**Customizing the formatting of the errors**
-
-By specifying a custom `ErrorFormat`, you can customize the format
-of the `Error() string` function:
-
-```go
-var result *multierror.Error
-
-// ... accumulate errors here, maybe using Append
-
-if result != nil {
- result.ErrorFormat = func([]error) string {
- return "errors!"
- }
-}
-```
-
-**Accessing the list of errors**
-
-`multierror.Error` implements `error` so if the caller doesn't know about
-multierror, it will work just fine. But if you're aware a multierror might
-be returned, you can use type switches to access the list of errors:
-
-```go
-if err := something(); err != nil {
- if merr, ok := err.(*multierror.Error); ok {
- // Use merr.Errors
- }
-}
-```
-
-You can also use the standard [`errors.Unwrap`](https://golang.org/pkg/errors/#Unwrap)
-function. This will continue to unwrap into subsequent errors until none exist.
-
-**Extracting an error**
-
-The standard library [`errors.As`](https://golang.org/pkg/errors/#As)
-function can be used directly with a multierror to extract a specific error:
-
-```go
-// Assume err is a multierror value
-err := somefunc()
-
-// We want to know if "err" has a "RichErrorType" in it and extract it.
-var errRich RichErrorType
-if errors.As(err, &errRich) {
- // It has it, and now errRich is populated.
-}
-```
-
-**Checking for an exact error value**
-
-Some errors are returned as exact errors such as the [`ErrNotExist`](https://golang.org/pkg/os/#pkg-variables)
-error in the `os` package. You can check if this error is present by using
-the standard [`errors.Is`](https://golang.org/pkg/errors/#Is) function.
-
-```go
-// Assume err is a multierror value
-err := somefunc()
-if errors.Is(err, os.ErrNotExist) {
- // err contains os.ErrNotExist
-}
-```
-
-**Returning a multierror only if there are errors**
-
-If you build a `multierror.Error`, you can use the `ErrorOrNil` function
-to return an `error` implementation only if there are errors to return:
-
-```go
-var result *multierror.Error
-
-// ... accumulate errors here
-
-// Return the `error` only if errors were added to the multierror, otherwise
-// return nil since there are no errors.
-return result.ErrorOrNil()
-```
diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go
deleted file mode 100644
index 3e2589bf..00000000
--- a/vendor/github.com/hashicorp/go-multierror/append.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package multierror
-
-// Append is a helper function that will append more errors
-// onto an Error in order to create a larger multi-error.
-//
-// If err is not a multierror.Error, then it will be turned into
-// one. If any of the errs are multierr.Error, they will be flattened
-// one level into err.
-// Any nil errors within errs will be ignored. If err is nil, a new
-// *Error will be returned.
-func Append(err error, errs ...error) *Error {
- switch err := err.(type) {
- case *Error:
- // Typed nils can reach here, so initialize if we are nil
- if err == nil {
- err = new(Error)
- }
-
- // Go through each error and flatten
- for _, e := range errs {
- switch e := e.(type) {
- case *Error:
- if e != nil {
- err.Errors = append(err.Errors, e.Errors...)
- }
- default:
- if e != nil {
- err.Errors = append(err.Errors, e)
- }
- }
- }
-
- return err
- default:
- newErrs := make([]error, 0, len(errs)+1)
- if err != nil {
- newErrs = append(newErrs, err)
- }
- newErrs = append(newErrs, errs...)
-
- return Append(&Error{}, newErrs...)
- }
-}
diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go
deleted file mode 100644
index aab8e9ab..00000000
--- a/vendor/github.com/hashicorp/go-multierror/flatten.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package multierror
-
-// Flatten flattens the given error, merging any *Errors together into
-// a single *Error.
-func Flatten(err error) error {
- // If it isn't an *Error, just return the error as-is
- if _, ok := err.(*Error); !ok {
- return err
- }
-
- // Otherwise, make the result and flatten away!
- flatErr := new(Error)
- flatten(err, flatErr)
- return flatErr
-}
-
-func flatten(err error, flatErr *Error) {
- switch err := err.(type) {
- case *Error:
- for _, e := range err.Errors {
- flatten(e, flatErr)
- }
- default:
- flatErr.Errors = append(flatErr.Errors, err)
- }
-}
diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go
deleted file mode 100644
index 47f13c49..00000000
--- a/vendor/github.com/hashicorp/go-multierror/format.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package multierror
-
-import (
- "fmt"
- "strings"
-)
-
-// ErrorFormatFunc is a function callback that is called by Error to
-// turn the list of errors into a string.
-type ErrorFormatFunc func([]error) string
-
-// ListFormatFunc is a basic formatter that outputs the number of errors
-// that occurred along with a bullet point list of the errors.
-func ListFormatFunc(es []error) string {
- if len(es) == 1 {
- return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0])
- }
-
- points := make([]string, len(es))
- for i, err := range es {
- points[i] = fmt.Sprintf("* %s", err)
- }
-
- return fmt.Sprintf(
- "%d errors occurred:\n\t%s\n\n",
- len(es), strings.Join(points, "\n\t"))
-}
diff --git a/vendor/github.com/hashicorp/go-multierror/group.go b/vendor/github.com/hashicorp/go-multierror/group.go
deleted file mode 100644
index 9c29efb7..00000000
--- a/vendor/github.com/hashicorp/go-multierror/group.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package multierror
-
-import "sync"
-
-// Group is a collection of goroutines which return errors that need to be
-// coalesced.
-type Group struct {
- mutex sync.Mutex
- err *Error
- wg sync.WaitGroup
-}
-
-// Go calls the given function in a new goroutine.
-//
-// If the function returns an error it is added to the group multierror which
-// is returned by Wait.
-func (g *Group) Go(f func() error) {
- g.wg.Add(1)
-
- go func() {
- defer g.wg.Done()
-
- if err := f(); err != nil {
- g.mutex.Lock()
- g.err = Append(g.err, err)
- g.mutex.Unlock()
- }
- }()
-}
-
-// Wait blocks until all function calls from the Go method have returned, then
-// returns the multierror.
-func (g *Group) Wait() *Error {
- g.wg.Wait()
- g.mutex.Lock()
- defer g.mutex.Unlock()
- return g.err
-}
diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go
deleted file mode 100644
index f5457432..00000000
--- a/vendor/github.com/hashicorp/go-multierror/multierror.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package multierror
-
-import (
- "errors"
- "fmt"
-)
-
-// Error is an error type to track multiple errors. This is used to
-// accumulate errors in cases and return them as a single "error".
-type Error struct {
- Errors []error
- ErrorFormat ErrorFormatFunc
-}
-
-func (e *Error) Error() string {
- fn := e.ErrorFormat
- if fn == nil {
- fn = ListFormatFunc
- }
-
- return fn(e.Errors)
-}
-
-// ErrorOrNil returns an error interface if this Error represents
-// a list of errors, or returns nil if the list of errors is empty. This
-// function is useful at the end of accumulation to make sure that the value
-// returned represents the existence of errors.
-func (e *Error) ErrorOrNil() error {
- if e == nil {
- return nil
- }
- if len(e.Errors) == 0 {
- return nil
- }
-
- return e
-}
-
-func (e *Error) GoString() string {
- return fmt.Sprintf("*%#v", *e)
-}
-
-// WrappedErrors returns the list of errors that this Error is wrapping. It is
-// an implementation of the errwrap.Wrapper interface so that multierror.Error
-// can be used with that library.
-//
-// This method is not safe to be called concurrently. Unlike accessing the
-// Errors field directly, this function also checks if the multierror is nil to
-// prevent a null-pointer panic. It satisfies the errwrap.Wrapper interface.
-func (e *Error) WrappedErrors() []error {
- if e == nil {
- return nil
- }
- return e.Errors
-}
-
-// Unwrap returns an error from Error (or nil if there are no errors).
-// This error returned will further support Unwrap to get the next error,
-// etc. The order will match the order of Errors in the multierror.Error
-// at the time of calling.
-//
-// The resulting error supports errors.As/Is/Unwrap so you can continue
-// to use the stdlib errors package to introspect further.
-//
-// This will perform a shallow copy of the errors slice. Any errors appended
-// to this error after calling Unwrap will not be available until a new
-// Unwrap is called on the multierror.Error.
-func (e *Error) Unwrap() error {
- // If we have no errors then we do nothing
- if e == nil || len(e.Errors) == 0 {
- return nil
- }
-
- // If we have exactly one error, we can just return that directly.
- if len(e.Errors) == 1 {
- return e.Errors[0]
- }
-
- // Shallow copy the slice
- errs := make([]error, len(e.Errors))
- copy(errs, e.Errors)
- return chain(errs)
-}
-
-// chain implements the interfaces necessary for errors.Is/As/Unwrap to
-// work in a deterministic way with multierror. A chain tracks a list of
-// errors while accounting for the current represented error. This lets
-// Is/As be meaningful.
-//
-// Unwrap returns the next error. In the cleanest form, Unwrap would return
-// the wrapped error here but we can't do that if we want to properly
-// get access to all the errors. Instead, users are recommended to use
-// Is/As to get the correct error type out.
-//
-// Precondition: []error is non-empty (len > 0)
-type chain []error
-
-// Error implements the error interface
-func (e chain) Error() string {
- return e[0].Error()
-}
-
-// Unwrap implements errors.Unwrap by returning the next error in the
-// chain or nil if there are no more errors.
-func (e chain) Unwrap() error {
- if len(e) == 1 {
- return nil
- }
-
- return e[1:]
-}
-
-// As implements errors.As by attempting to map to the current value.
-func (e chain) As(target interface{}) bool {
- return errors.As(e[0], target)
-}
-
-// Is implements errors.Is by comparing the current value directly.
-func (e chain) Is(target error) bool {
- return errors.Is(e[0], target)
-}
diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go
deleted file mode 100644
index 5c477abe..00000000
--- a/vendor/github.com/hashicorp/go-multierror/prefix.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package multierror
-
-import (
- "fmt"
-
- "github.com/hashicorp/errwrap"
-)
-
-// Prefix is a helper function that will prefix some text
-// to the given error. If the error is a multierror.Error, then
-// it will be prefixed to each wrapped error.
-//
-// This is useful to use when appending multiple multierrors
-// together in order to give better scoping.
-func Prefix(err error, prefix string) error {
- if err == nil {
- return nil
- }
-
- format := fmt.Sprintf("%s {{err}}", prefix)
- switch err := err.(type) {
- case *Error:
- // Typed nils can reach here, so initialize if we are nil
- if err == nil {
- err = new(Error)
- }
-
- // Wrap each of the errors
- for i, e := range err.Errors {
- err.Errors[i] = errwrap.Wrapf(format, e)
- }
-
- return err
- default:
- return errwrap.Wrapf(format, err)
- }
-}
diff --git a/vendor/github.com/hashicorp/go-multierror/sort.go b/vendor/github.com/hashicorp/go-multierror/sort.go
deleted file mode 100644
index fecb14e8..00000000
--- a/vendor/github.com/hashicorp/go-multierror/sort.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package multierror
-
-// Len implements sort.Interface function for length
-func (err Error) Len() int {
- return len(err.Errors)
-}
-
-// Swap implements sort.Interface function for swapping elements
-func (err Error) Swap(i, j int) {
- err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i]
-}
-
-// Less implements sort.Interface function for determining order
-func (err Error) Less(i, j int) bool {
- return err.Errors[i].Error() < err.Errors[j].Error()
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/.gitignore b/vendor/github.com/hashicorp/go-plugin/.gitignore
deleted file mode 100644
index 4befed30..00000000
--- a/vendor/github.com/hashicorp/go-plugin/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-.DS_Store
-.idea
diff --git a/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md b/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md
deleted file mode 100644
index 722ad4f2..00000000
--- a/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md
+++ /dev/null
@@ -1,112 +0,0 @@
-## v1.6.1
-
-BUGS:
-
-* Suppress spurious `os.ErrClosed` on plugin shutdown [[GH-299](https://github.com/hashicorp/go-plugin/pull/299)]
-
-ENHANCEMENTS:
-
-* deps: bump google.golang.org/grpc to v1.58.3 [[GH-296](https://github.com/hashicorp/go-plugin/pull/296)]
-
-## v1.6.0
-
-CHANGES:
-
-* plugin: Plugins written in other languages can optionally start to advertise whether they support gRPC broker multiplexing.
- If the environment variable `PLUGIN_MULTIPLEX_GRPC` is set, it is safe to include a seventh field containing a boolean
- value in the `|`-separated protocol negotiation line.
-
-ENHANCEMENTS:
-
-* Support muxing gRPC broker connections over a single listener [[GH-288](https://github.com/hashicorp/go-plugin/pull/288)]
-* client: Configurable buffer size for reading plugin log lines [[GH-265](https://github.com/hashicorp/go-plugin/pull/265)]
-* Use `buf` for proto generation [[GH-286](https://github.com/hashicorp/go-plugin/pull/286)]
-* deps: bump golang.org/x/net to v0.17.0 [[GH-285](https://github.com/hashicorp/go-plugin/pull/285)]
-* deps: bump golang.org/x/sys to v0.13.0 [[GH-285](https://github.com/hashicorp/go-plugin/pull/285)]
-* deps: bump golang.org/x/text to v0.13.0 [[GH-285](https://github.com/hashicorp/go-plugin/pull/285)]
-
-## v1.5.2
-
-ENHANCEMENTS:
-
-client: New `UnixSocketConfig.TempDir` option allows setting the directory to use when creating plugin-specific Unix socket directories [[GH-282](https://github.com/hashicorp/go-plugin/pull/282)]
-
-## v1.5.1
-
-BUGS:
-
-* server: `PLUGIN_UNIX_SOCKET_DIR` is consistently used for gRPC broker sockets as well as the initial socket [[GH-277](https://github.com/hashicorp/go-plugin/pull/277)]
-
-ENHANCEMENTS:
-
-* client: New `UnixSocketConfig` option in `ClientConfig` to support making the client's Unix sockets group-writable [[GH-277](https://github.com/hashicorp/go-plugin/pull/277)]
-
-## v1.5.0
-
-ENHANCEMENTS:
-
-* client: New `runner.Runner` interface to support clients providing custom plugin command runner implementations [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)]
- * Accessible via new `ClientConfig` field `RunnerFunc`, which is mutually exclusive with `Cmd` and `Reattach`
- * Reattaching support via `ReattachConfig` field `ReattachFunc`
-* client: New `ClientConfig` field `SkipHostEnv` allows omitting the client process' own environment variables from the plugin command's environment [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)]
-* client: Add `ID()` method to `Client` for retrieving the pid or other unique ID of a running plugin [[GH-272](https://github.com/hashicorp/go-plugin/pull/272)]
-* server: Support setting the directory to create Unix sockets in with the env var `PLUGIN_UNIX_SOCKET_DIR` [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)]
-* server: Support setting group write permission and a custom group name or gid owner with the env var `PLUGIN_UNIX_SOCKET_GROUP` [[GH-270](https://github.com/hashicorp/go-plugin/pull/270)]
-
-## v1.4.11-rc1
-
-ENHANCEMENTS:
-
-* deps: bump protoreflect to v1.15.1 [[GH-264](https://github.com/hashicorp/go-plugin/pull/264)]
-
-## v1.4.10
-
-BUG FIXES:
-
-* additional notes: ensure to close files [[GH-241](https://github.com/hashicorp/go-plugin/pull/241)]
-
-ENHANCEMENTS:
-
-* deps: Remove direct dependency on golang.org/x/net [[GH-240](https://github.com/hashicorp/go-plugin/pull/240)]
-
-## v1.4.9
-
-ENHANCEMENTS:
-
-* client: Remove log warning introduced in 1.4.5 when SecureConfig is nil. [[GH-238](https://github.com/hashicorp/go-plugin/pull/238)]
-
-## v1.4.8
-
-BUG FIXES:
-
-* Fix windows build: [[GH-227](https://github.com/hashicorp/go-plugin/pull/227)]
-
-## v1.4.7
-
-ENHANCEMENTS:
-
-* More detailed error message on plugin start failure: [[GH-223](https://github.com/hashicorp/go-plugin/pull/223)]
-
-## v1.4.6
-
-BUG FIXES:
-
-* server: Prevent gRPC broker goroutine leak when using `GRPCServer` type `GracefulStop()` or `Stop()` methods [[GH-220](https://github.com/hashicorp/go-plugin/pull/220)]
-
-## v1.4.5
-
-ENHANCEMENTS:
-
-* client: log warning when SecureConfig is nil [[GH-207](https://github.com/hashicorp/go-plugin/pull/207)]
-
-
-## v1.4.4
-
-ENHANCEMENTS:
-
-* client: increase level of plugin exit logs [[GH-195](https://github.com/hashicorp/go-plugin/pull/195)]
-
-BUG FIXES:
-
-* Bidirectional communication: fix bidirectional communication when AutoMTLS is enabled [[GH-193](https://github.com/hashicorp/go-plugin/pull/193)]
-* RPC: Trim a spurious log message for plugins using RPC [[GH-186](https://github.com/hashicorp/go-plugin/pull/186)]
diff --git a/vendor/github.com/hashicorp/go-plugin/LICENSE b/vendor/github.com/hashicorp/go-plugin/LICENSE
deleted file mode 100644
index 042324fb..00000000
--- a/vendor/github.com/hashicorp/go-plugin/LICENSE
+++ /dev/null
@@ -1,355 +0,0 @@
-Copyright (c) 2016 HashiCorp, Inc.
-
-Mozilla Public License, version 2.0
-
-1. Definitions
-
-1.1. “Contributor”
-
- means each individual or legal entity that creates, contributes to the
- creation of, or owns Covered Software.
-
-1.2. “Contributor Version”
-
- means the combination of the Contributions of others (if any) used by a
- Contributor and that particular Contributor’s Contribution.
-
-1.3. “Contribution”
-
- means Covered Software of a particular Contributor.
-
-1.4. “Covered Software”
-
- means Source Code Form to which the initial Contributor has attached the
- notice in Exhibit A, the Executable Form of such Source Code Form, and
- Modifications of such Source Code Form, in each case including portions
- thereof.
-
-1.5. “Incompatible With Secondary Licenses”
- means
-
- a. that the initial Contributor has attached the notice described in
- Exhibit B to the Covered Software; or
-
- b. that the Covered Software was made available under the terms of version
- 1.1 or earlier of the License, but not also under the terms of a
- Secondary License.
-
-1.6. “Executable Form”
-
- means any form of the work other than Source Code Form.
-
-1.7. “Larger Work”
-
- means a work that combines Covered Software with other material, in a separate
- file or files, that is not Covered Software.
-
-1.8. “License”
-
- means this document.
-
-1.9. “Licensable”
-
- means having the right to grant, to the maximum extent possible, whether at the
- time of the initial grant or subsequently, any and all of the rights conveyed by
- this License.
-
-1.10. “Modifications”
-
- means any of the following:
-
- a. any file in Source Code Form that results from an addition to, deletion
- from, or modification of the contents of Covered Software; or
-
- b. any new file in Source Code Form that contains any Covered Software.
-
-1.11. “Patent Claims” of a Contributor
-
- means any patent claim(s), including without limitation, method, process,
- and apparatus claims, in any patent Licensable by such Contributor that
- would be infringed, but for the grant of the License, by the making,
- using, selling, offering for sale, having made, import, or transfer of
- either its Contributions or its Contributor Version.
-
-1.12. “Secondary License”
-
- means either the GNU General Public License, Version 2.0, the GNU Lesser
- General Public License, Version 2.1, the GNU Affero General Public
- License, Version 3.0, or any later versions of those licenses.
-
-1.13. “Source Code Form”
-
- means the form of the work preferred for making modifications.
-
-1.14. “You” (or “Your”)
-
- means an individual or a legal entity exercising rights under this
- License. For legal entities, “You” includes any entity that controls, is
- controlled by, or is under common control with You. For purposes of this
- definition, “control” means (a) the power, direct or indirect, to cause
- the direction or management of such entity, whether by contract or
- otherwise, or (b) ownership of more than fifty percent (50%) of the
- outstanding shares or beneficial ownership of such entity.
-
-
-2. License Grants and Conditions
-
-2.1. Grants
-
- Each Contributor hereby grants You a world-wide, royalty-free,
- non-exclusive license:
-
- a. under intellectual property rights (other than patent or trademark)
- Licensable by such Contributor to use, reproduce, make available,
- modify, display, perform, distribute, and otherwise exploit its
- Contributions, either on an unmodified basis, with Modifications, or as
- part of a Larger Work; and
-
- b. under Patent Claims of such Contributor to make, use, sell, offer for
- sale, have made, import, and otherwise transfer either its Contributions
- or its Contributor Version.
-
-2.2. Effective Date
-
- The licenses granted in Section 2.1 with respect to any Contribution become
- effective for each Contribution on the date the Contributor first distributes
- such Contribution.
-
-2.3. Limitations on Grant Scope
-
- The licenses granted in this Section 2 are the only rights granted under this
- License. No additional rights or licenses will be implied from the distribution
- or licensing of Covered Software under this License. Notwithstanding Section
- 2.1(b) above, no patent license is granted by a Contributor:
-
- a. for any code that a Contributor has removed from Covered Software; or
-
- b. for infringements caused by: (i) Your and any other third party’s
- modifications of Covered Software, or (ii) the combination of its
- Contributions with other software (except as part of its Contributor
- Version); or
-
- c. under Patent Claims infringed by Covered Software in the absence of its
- Contributions.
-
- This License does not grant any rights in the trademarks, service marks, or
- logos of any Contributor (except as may be necessary to comply with the
- notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
- No Contributor makes additional grants as a result of Your choice to
- distribute the Covered Software under a subsequent version of this License
- (see Section 10.2) or under the terms of a Secondary License (if permitted
- under the terms of Section 3.3).
-
-2.5. Representation
-
- Each Contributor represents that the Contributor believes its Contributions
- are its original creation(s) or it has sufficient rights to grant the
- rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
- This License is not intended to limit any rights You have under applicable
- copyright doctrines of fair use, fair dealing, or other equivalents.
-
-2.7. Conditions
-
- Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
- Section 2.1.
-
-
-3. Responsibilities
-
-3.1. Distribution of Source Form
-
- All distribution of Covered Software in Source Code Form, including any
- Modifications that You create or to which You contribute, must be under the
- terms of this License. You must inform recipients that the Source Code Form
- of the Covered Software is governed by the terms of this License, and how
- they can obtain a copy of this License. You may not attempt to alter or
- restrict the recipients’ rights in the Source Code Form.
-
-3.2. Distribution of Executable Form
-
- If You distribute Covered Software in Executable Form then:
-
- a. such Covered Software must also be made available in Source Code Form,
- as described in Section 3.1, and You must inform recipients of the
- Executable Form how they can obtain a copy of such Source Code Form by
- reasonable means in a timely manner, at a charge no more than the cost
- of distribution to the recipient; and
-
- b. You may distribute such Executable Form under the terms of this License,
- or sublicense it under different terms, provided that the license for
- the Executable Form does not attempt to limit or alter the recipients’
- rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
- You may create and distribute a Larger Work under terms of Your choice,
- provided that You also comply with the requirements of this License for the
- Covered Software. If the Larger Work is a combination of Covered Software
- with a work governed by one or more Secondary Licenses, and the Covered
- Software is not Incompatible With Secondary Licenses, this License permits
- You to additionally distribute such Covered Software under the terms of
- such Secondary License(s), so that the recipient of the Larger Work may, at
- their option, further distribute the Covered Software under the terms of
- either this License or such Secondary License(s).
-
-3.4. Notices
-
- You may not remove or alter the substance of any license notices (including
- copyright notices, patent notices, disclaimers of warranty, or limitations
- of liability) contained within the Source Code Form of the Covered
- Software, except that You may alter any license notices to the extent
- required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
- You may choose to offer, and to charge a fee for, warranty, support,
- indemnity or liability obligations to one or more recipients of Covered
- Software. However, You may do so only on Your own behalf, and not on behalf
- of any Contributor. You must make it absolutely clear that any such
- warranty, support, indemnity, or liability obligation is offered by You
- alone, and You hereby agree to indemnify every Contributor for any
- liability incurred by such Contributor as a result of warranty, support,
- indemnity or liability terms You offer. You may include additional
- disclaimers of warranty and limitations of liability specific to any
- jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
-
- If it is impossible for You to comply with any of the terms of this License
- with respect to some or all of the Covered Software due to statute, judicial
- order, or regulation then You must: (a) comply with the terms of this License
- to the maximum extent possible; and (b) describe the limitations and the code
- they affect. Such description must be placed in a text file included with all
- distributions of the Covered Software under this License. Except to the
- extent prohibited by statute or regulation, such description must be
- sufficiently detailed for a recipient of ordinary skill to be able to
- understand it.
-
-5. Termination
-
-5.1. The rights granted under this License will terminate automatically if You
- fail to comply with any of its terms. However, if You become compliant,
- then the rights granted under this License from a particular Contributor
- are reinstated (a) provisionally, unless and until such Contributor
- explicitly and finally terminates Your grants, and (b) on an ongoing basis,
- if such Contributor fails to notify You of the non-compliance by some
- reasonable means prior to 60 days after You have come back into compliance.
- Moreover, Your grants from a particular Contributor are reinstated on an
- ongoing basis if such Contributor notifies You of the non-compliance by
- some reasonable means, this is the first time You have received notice of
- non-compliance with this License from such Contributor, and You become
- compliant prior to 30 days after Your receipt of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
- infringement claim (excluding declaratory judgment actions, counter-claims,
- and cross-claims) alleging that a Contributor Version directly or
- indirectly infringes any patent, then the rights granted to You by any and
- all Contributors for the Covered Software under Section 2.1 of this License
- shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
- license agreements (excluding distributors and resellers) which have been
- validly granted by You or Your distributors under this License prior to
- termination shall survive termination.
-
-6. Disclaimer of Warranty
-
- Covered Software is provided under this License on an “as is” basis, without
- warranty of any kind, either expressed, implied, or statutory, including,
- without limitation, warranties that the Covered Software is free of defects,
- merchantable, fit for a particular purpose or non-infringing. The entire
- risk as to the quality and performance of the Covered Software is with You.
- Should any Covered Software prove defective in any respect, You (not any
- Contributor) assume the cost of any necessary servicing, repair, or
- correction. This disclaimer of warranty constitutes an essential part of this
- License. No use of any Covered Software is authorized under this License
- except under this disclaimer.
-
-7. Limitation of Liability
-
- Under no circumstances and under no legal theory, whether tort (including
- negligence), contract, or otherwise, shall any Contributor, or anyone who
- distributes Covered Software as permitted above, be liable to You for any
- direct, indirect, special, incidental, or consequential damages of any
- character including, without limitation, damages for lost profits, loss of
- goodwill, work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses, even if such party shall have been
- informed of the possibility of such damages. This limitation of liability
- shall not apply to liability for death or personal injury resulting from such
- party’s negligence to the extent applicable law prohibits such limitation.
- Some jurisdictions do not allow the exclusion or limitation of incidental or
- consequential damages, so this exclusion and limitation may not apply to You.
-
-8. Litigation
-
- Any litigation relating to this License may be brought only in the courts of
- a jurisdiction where the defendant maintains its principal place of business
- and such litigation shall be governed by laws of that jurisdiction, without
- reference to its conflict-of-law provisions. Nothing in this Section shall
- prevent a party’s ability to bring cross-claims or counter-claims.
-
-9. Miscellaneous
-
- This License represents the complete agreement concerning the subject matter
- hereof. If any provision of this License is held to be unenforceable, such
- provision shall be reformed only to the extent necessary to make it
- enforceable. Any law or regulation which provides that the language of a
- contract shall be construed against the drafter shall not be used to construe
- this License against a Contributor.
-
-
-10. Versions of the License
-
-10.1. New Versions
-
- Mozilla Foundation is the license steward. Except as provided in Section
- 10.3, no one other than the license steward has the right to modify or
- publish new versions of this License. Each version will be given a
- distinguishing version number.
-
-10.2. Effect of New Versions
-
- You may distribute the Covered Software under the terms of the version of
- the License under which You originally received the Covered Software, or
- under the terms of any subsequent version published by the license
- steward.
-
-10.3. Modified Versions
-
- If you create software not governed by this License, and you want to
- create a new license for such software, you may create and use a modified
- version of this License if you rename the license and remove any
- references to the name of the license steward (except to note that such
- modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
- If You choose to distribute Source Code Form that is Incompatible With
- Secondary Licenses under the terms of this version of the License, the
- notice described in Exhibit B of this License must be attached.
-
-Exhibit A - Source Code Form License Notice
-
- This Source Code Form is subject to the
- terms of the Mozilla Public License, v.
- 2.0. If a copy of the MPL was not
- distributed with this file, You can
- obtain one at
- http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular file, then
-You may include the notice in a location (such as a LICENSE file in a relevant
-directory) where a recipient would be likely to look for such a notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - “Incompatible With Secondary Licenses” Notice
-
- This Source Code Form is “Incompatible
- With Secondary Licenses”, as defined by
- the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md
deleted file mode 100644
index 50baee06..00000000
--- a/vendor/github.com/hashicorp/go-plugin/README.md
+++ /dev/null
@@ -1,165 +0,0 @@
-# Go Plugin System over RPC
-
-`go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system
-that has been in use by HashiCorp tooling for over 4 years. While initially
-created for [Packer](https://www.packer.io), it is additionally in use by
-[Terraform](https://www.terraform.io), [Nomad](https://www.nomadproject.io),
-[Vault](https://www.vaultproject.io),
-[Boundary](https://www.boundaryproject.io),
-and [Waypoint](https://www.waypointproject.io).
-
-While the plugin system is over RPC, it is currently only designed to work
-over a local [reliable] network. Plugins over a real network are not supported
-and will lead to unexpected behavior.
-
-This plugin system has been used on millions of machines across many different
-projects and has proven to be battle hardened and ready for production use.
-
-## Features
-
-The HashiCorp plugin system supports a number of features:
-
-**Plugins are Go interface implementations.** This makes writing and consuming
-plugins feel very natural. To a plugin author: you just implement an
-interface as if it were going to run in the same process. For a plugin user:
-you just use and call functions on an interface as if it were in the same
-process. This plugin system handles the communication in between.
-
-**Cross-language support.** Plugins can be written (and consumed) by
-almost every major language. This library supports serving plugins via
-[gRPC](http://www.grpc.io). gRPC-based plugins enable plugins to be written
-in any language.
-
-**Complex arguments and return values are supported.** This library
-provides APIs for handling complex arguments and return values such
-as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library
-(`MuxBroker`) for creating new connections between the client/server to
-serve additional interfaces or transfer raw data.
-
-**Bidirectional communication.** Because the plugin system supports
-complex arguments, the host process can send it interface implementations
-and the plugin can call back into the host process.
-
-**Built-in Logging.** Any plugins that use the `log` standard library
-will have log data automatically sent to the host process. The host
-process will mirror this output prefixed with the path to the plugin
-binary. This makes debugging with plugins simple. If the host system
-uses [hclog](https://github.com/hashicorp/go-hclog) then the log data
-will be structured. If the plugin also uses hclog, logs from the plugin
-will be sent to the host hclog and be structured.
-
-**Protocol Versioning.** A very basic "protocol version" is supported that
-can be incremented to invalidate any previous plugins. This is useful when
-interface signatures are changing, protocol level changes are necessary,
-etc. When a protocol version is incompatible, a human friendly error
-message is shown to the end user.
-
-**Stdout/Stderr Syncing.** While plugins are subprocesses, they can continue
-to use stdout/stderr as usual and the output will get mirrored back to
-the host process. The host process can control what `io.Writer` these
-streams go to to prevent this from happening.
-
-**TTY Preservation.** Plugin subprocesses are connected to the identical
-stdin file descriptor as the host process, allowing software that requires
-a TTY to work. For example, a plugin can execute `ssh` and even though there
-are multiple subprocesses and RPC happening, it will look and act perfectly
-to the end user.
-
-**Host upgrade while a plugin is running.** Plugins can be "reattached"
-so that the host process can be upgraded while the plugin is still running.
-This requires the host/plugin to know this is possible and daemonize
-properly. `NewClient` takes a `ReattachConfig` to determine if and how to
-reattach.
-
-**Cryptographically Secure Plugins.** Plugins can be verified with an expected
-checksum and RPC communications can be configured to use TLS. The host process
-must be properly secured to protect this configuration.
-
-## Architecture
-
-The HashiCorp plugin system works by launching subprocesses and communicating
-over RPC (using standard `net/rpc` or [gRPC](http://www.grpc.io)). A single
-connection is made between any plugin and the host process. For net/rpc-based
-plugins, we use a [connection multiplexing](https://github.com/hashicorp/yamux)
-library to multiplex any other connections on top. For gRPC-based plugins,
-the HTTP2 protocol handles multiplexing.
-
-This architecture has a number of benefits:
-
- * Plugins can't crash your host process: A panic in a plugin doesn't
- panic the plugin user.
-
- * Plugins are very easy to write: just write a Go application and `go build`.
- Or use any other language to write a gRPC server with a tiny amount of
- boilerplate to support go-plugin.
-
- * Plugins are very easy to install: just put the binary in a location where
- the host will find it (depends on the host but this library also provides
- helpers), and the plugin host handles the rest.
-
- * Plugins can be relatively secure: The plugin only has access to the
- interfaces and args given to it, not to the entire memory space of the
- process. Additionally, go-plugin can communicate with the plugin over
- TLS.
-
-## Usage
-
-To use the plugin system, you must take the following steps. These are
-high-level steps that must be done. Examples are available in the
-`examples/` directory.
-
- 1. Choose the interface(s) you want to expose for plugins.
-
- 2. For each interface, implement an implementation of that interface
- that communicates over a `net/rpc` connection or over a
- [gRPC](http://www.grpc.io) connection or both. You'll have to implement
- both a client and server implementation.
-
- 3. Create a `Plugin` implementation that knows how to create the RPC
- client/server for a given plugin type.
-
- 4. Plugin authors call `plugin.Serve` to serve a plugin from the
- `main` function.
-
- 5. Plugin users use `plugin.Client` to launch a subprocess and request
- an interface implementation over RPC.
-
-That's it! In practice, step 2 is the most tedious and time consuming step.
-Even so, it isn't very difficult and you can see examples in the `examples/`
-directory as well as throughout our various open source projects.
-
-For complete API documentation, see [GoDoc](https://godoc.org/github.com/hashicorp/go-plugin).
-
-## Roadmap
-
-Our plugin system is constantly evolving. As we use the plugin system for
-new projects or for new features in existing projects, we constantly find
-improvements we can make.
-
-At this point in time, the roadmap for the plugin system is:
-
-**Semantic Versioning.** Plugins will be able to implement a semantic version.
-This plugin system will give host processes a system for constraining
-versions. This is in addition to the protocol versioning already present
-which is more for larger underlying changes.
-
-## What About Shared Libraries?
-
-When we started using plugins (late 2012, early 2013), plugins over RPC
-were the only option since Go didn't support dynamic library loading. Today,
-Go supports the [plugin](https://golang.org/pkg/plugin/) standard library with
-a number of limitations. Since 2012, our plugin system has stabilized
-from tens of millions of users using it, and has many benefits we've come to
-value greatly.
-
-For example, we use this plugin system in
-[Vault](https://www.vaultproject.io) where dynamic library loading is
-not acceptable for security reasons. That is an extreme
-example, but we believe our library system has more upsides than downsides
-over dynamic library loading and since we've had it built and tested for years,
-we'll continue to use it.
-
-Shared libraries have one major advantage over our system which is much
-higher performance. In real world scenarios across our various tools,
-we've never required any more performance out of our plugin system and it
-has seen very high throughput, so this isn't a concern for us at the moment.
diff --git a/vendor/github.com/hashicorp/go-plugin/buf.gen.yaml b/vendor/github.com/hashicorp/go-plugin/buf.gen.yaml
deleted file mode 100644
index 033d0153..00000000
--- a/vendor/github.com/hashicorp/go-plugin/buf.gen.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-# Copyright (c) HashiCorp, Inc.
-# SPDX-License-Identifier: MPL-2.0
-
-version: v1
-plugins:
- - plugin: buf.build/protocolbuffers/go
- out: .
- opt:
- - paths=source_relative
- - plugin: buf.build/grpc/go:v1.3.0
- out: .
- opt:
- - paths=source_relative
- - require_unimplemented_servers=false
diff --git a/vendor/github.com/hashicorp/go-plugin/buf.yaml b/vendor/github.com/hashicorp/go-plugin/buf.yaml
deleted file mode 100644
index 3d0da4c7..00000000
--- a/vendor/github.com/hashicorp/go-plugin/buf.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-# Copyright (c) HashiCorp, Inc.
-# SPDX-License-Identifier: MPL-2.0
-
-version: v1
-build:
- excludes:
- - examples/
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go
deleted file mode 100644
index b813ba8c..00000000
--- a/vendor/github.com/hashicorp/go-plugin/client.go
+++ /dev/null
@@ -1,1241 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package plugin
-
-import (
- "bufio"
- "context"
- "crypto/subtle"
- "crypto/tls"
- "crypto/x509"
- "encoding/base64"
- "errors"
- "fmt"
- "hash"
- "io"
- "io/ioutil"
- "net"
- "os"
- "os/exec"
- "path/filepath"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/hashicorp/go-hclog"
- "github.com/hashicorp/go-plugin/internal/cmdrunner"
- "github.com/hashicorp/go-plugin/internal/grpcmux"
- "github.com/hashicorp/go-plugin/runner"
- "google.golang.org/grpc"
-)
-
-// If this is 1, then we've called CleanupClients. This can be used
-// by plugin RPC implementations to change error behavior since you
-// can expected network connection errors at this point. This should be
-// read by using sync/atomic.
-var Killed uint32 = 0
-
-// This is a slice of the "managed" clients which are cleaned up when
-// calling Cleanup
-var managedClients = make([]*Client, 0, 5)
-var managedClientsLock sync.Mutex
-
-// Error types
-var (
- // ErrProcessNotFound is returned when a client is instantiated to
- // reattach to an existing process and it isn't found.
- ErrProcessNotFound = cmdrunner.ErrProcessNotFound
-
- // ErrChecksumsDoNotMatch is returned when binary's checksum doesn't match
- // the one provided in the SecureConfig.
- ErrChecksumsDoNotMatch = errors.New("checksums did not match")
-
- // ErrSecureNoChecksum is returned when an empty checksum is provided to the
- // SecureConfig.
- ErrSecureConfigNoChecksum = errors.New("no checksum provided")
-
- // ErrSecureNoHash is returned when a nil Hash object is provided to the
- // SecureConfig.
- ErrSecureConfigNoHash = errors.New("no hash implementation provided")
-
- // ErrSecureConfigAndReattach is returned when both Reattach and
- // SecureConfig are set.
- ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set")
-
- // ErrGRPCBrokerMuxNotSupported is returned when the client requests
- // multiplexing over the gRPC broker, but the plugin does not support the
- // feature. In most cases, this should be resolvable by updating and
- // rebuilding the plugin, or restarting the plugin with
- // ClientConfig.GRPCBrokerMultiplex set to false.
- ErrGRPCBrokerMuxNotSupported = errors.New("client requested gRPC broker multiplexing but plugin does not support the feature")
-)
-
-// defaultPluginLogBufferSize is the default size of the buffer used to read from stderr for plugin log lines.
-const defaultPluginLogBufferSize = 64 * 1024
-
-// Client handles the lifecycle of a plugin application. It launches
-// plugins, connects to them, dispenses interface implementations, and handles
-// killing the process.
-//
-// Plugin hosts should use one Client for each plugin executable. To
-// dispense a plugin type, use the `Client.Client` function, and then
-// cal `Dispense`. This awkward API is mostly historical but is used to split
-// the client that deals with subprocess management and the client that
-// does RPC management.
-//
-// See NewClient and ClientConfig for using a Client.
-type Client struct {
- config *ClientConfig
- exited bool
- l sync.Mutex
- address net.Addr
- runner runner.AttachedRunner
- client ClientProtocol
- protocol Protocol
- logger hclog.Logger
- doneCtx context.Context
- ctxCancel context.CancelFunc
- negotiatedVersion int
-
- // clientWaitGroup is used to manage the lifecycle of the plugin management
- // goroutines.
- clientWaitGroup sync.WaitGroup
-
- // pipesWaitGroup is used to prevent the command's Wait() function from
- // being called before we've finished reading from the stdout and stderr pipe.
- pipesWaitGroup sync.WaitGroup
-
- // processKilled is used for testing only, to flag when the process was
- // forcefully killed.
- processKilled bool
-
- unixSocketCfg UnixSocketConfig
-
- grpcMuxerOnce sync.Once
- grpcMuxer *grpcmux.GRPCClientMuxer
-}
-
-// NegotiatedVersion returns the protocol version negotiated with the server.
-// This is only valid after Start() is called.
-func (c *Client) NegotiatedVersion() int {
- return c.negotiatedVersion
-}
-
-// ID returns a unique ID for the running plugin. By default this is the process
-// ID (pid), but it could take other forms if RunnerFunc was provided.
-func (c *Client) ID() string {
- c.l.Lock()
- defer c.l.Unlock()
-
- if c.runner != nil {
- return c.runner.ID()
- }
-
- return ""
-}
-
-// ClientConfig is the configuration used to initialize a new
-// plugin client. After being used to initialize a plugin client,
-// that configuration must not be modified again.
-type ClientConfig struct {
- // HandshakeConfig is the configuration that must match servers.
- HandshakeConfig
-
- // Plugins are the plugins that can be consumed.
- // The implied version of this PluginSet is the Handshake.ProtocolVersion.
- Plugins PluginSet
-
- // VersionedPlugins is a map of PluginSets for specific protocol versions.
- // These can be used to negotiate a compatible version between client and
- // server. If this is set, Handshake.ProtocolVersion is not required.
- VersionedPlugins map[int]PluginSet
-
- // One of the following must be set, but not both.
- //
- // Cmd is the unstarted subprocess for starting the plugin. If this is
- // set, then the Client starts the plugin process on its own and connects
- // to it.
- //
- // Reattach is configuration for reattaching to an existing plugin process
- // that is already running. This isn't common.
- Cmd *exec.Cmd
- Reattach *ReattachConfig
-
- // RunnerFunc allows consumers to provide their own implementation of
- // runner.Runner and control the context within which a plugin is executed.
- // The cmd argument will have been copied from the config and populated with
- // environment variables that a go-plugin server expects to read such as
- // AutoMTLS certs and the magic cookie key.
- RunnerFunc func(l hclog.Logger, cmd *exec.Cmd, tmpDir string) (runner.Runner, error)
-
- // SecureConfig is configuration for verifying the integrity of the
- // executable. It can not be used with Reattach.
- SecureConfig *SecureConfig
-
- // TLSConfig is used to enable TLS on the RPC client.
- TLSConfig *tls.Config
-
- // Managed represents if the client should be managed by the
- // plugin package or not. If true, then by calling CleanupClients,
- // it will automatically be cleaned up. Otherwise, the client
- // user is fully responsible for making sure to Kill all plugin
- // clients. By default the client is _not_ managed.
- Managed bool
-
- // The minimum and maximum port to use for communicating with
- // the subprocess. If not set, this defaults to 10,000 and 25,000
- // respectively.
- MinPort, MaxPort uint
-
- // StartTimeout is the timeout to wait for the plugin to say it
- // has started successfully.
- StartTimeout time.Duration
-
- // If non-nil, then the stderr of the client will be written to here
- // (as well as the log). This is the original os.Stderr of the subprocess.
- // This isn't the output of synced stderr.
- Stderr io.Writer
-
- // SyncStdout, SyncStderr can be set to override the
- // respective os.Std* values in the plugin. Care should be taken to
- // avoid races here. If these are nil, then this will be set to
- // ioutil.Discard.
- SyncStdout io.Writer
- SyncStderr io.Writer
-
- // AllowedProtocols is a list of allowed protocols. If this isn't set,
- // then only netrpc is allowed. This is so that older go-plugin systems
- // can show friendly errors if they see a plugin with an unknown
- // protocol.
- //
- // By setting this, you can cause an error immediately on plugin start
- // if an unsupported protocol is used with a good error message.
- //
- // If this isn't set at all (nil value), then only net/rpc is accepted.
- // This is done for legacy reasons. You must explicitly opt-in to
- // new protocols.
- AllowedProtocols []Protocol
-
- // Logger is the logger that the client will used. If none is provided,
- // it will default to hclog's default logger.
- Logger hclog.Logger
-
- // PluginLogBufferSize is the buffer size(bytes) to read from stderr for plugin log lines.
- // If this is 0, then the default of 64KB is used.
- PluginLogBufferSize int
-
- // AutoMTLS has the client and server automatically negotiate mTLS for
- // transport authentication. This ensures that only the original client will
- // be allowed to connect to the server, and all other connections will be
- // rejected. The client will also refuse to connect to any server that isn't
- // the original instance started by the client.
- //
- // In this mode of operation, the client generates a one-time use tls
- // certificate, sends the public x.509 certificate to the new server, and
- // the server generates a one-time use tls certificate, and sends the public
- // x.509 certificate back to the client. These are used to authenticate all
- // rpc connections between the client and server.
- //
- // Setting AutoMTLS to true implies that the server must support the
- // protocol, and correctly negotiate the tls certificates, or a connection
- // failure will result.
- //
- // The client should not set TLSConfig, nor should the server set a
- // TLSProvider, because AutoMTLS implies that a new certificate and tls
- // configuration will be generated at startup.
- //
- // You cannot Reattach to a server with this option enabled.
- AutoMTLS bool
-
- // GRPCDialOptions allows plugin users to pass custom grpc.DialOption
- // to create gRPC connections. This only affects plugins using the gRPC
- // protocol.
- GRPCDialOptions []grpc.DialOption
-
- // GRPCBrokerMultiplex turns on multiplexing for the gRPC broker. The gRPC
- // broker will multiplex all brokered gRPC servers over the plugin's original
- // listener socket instead of making a new listener for each server. The
- // go-plugin library currently only includes a Go implementation for the
- // server (i.e. plugin) side of gRPC broker multiplexing.
- //
- // Does not support reattaching.
- //
- // Multiplexed gRPC streams MUST be established sequentially, i.e. after
- // calling AcceptAndServe from one side, wait for the other side to Dial
- // before calling AcceptAndServe again.
- GRPCBrokerMultiplex bool
-
- // SkipHostEnv allows plugins to run without inheriting the parent process'
- // environment variables.
- SkipHostEnv bool
-
- // UnixSocketConfig configures additional options for any Unix sockets
- // that are created. Not normally required. Not supported on Windows.
- UnixSocketConfig *UnixSocketConfig
-}
-
-type UnixSocketConfig struct {
- // If set, go-plugin will change the owner of any Unix sockets created to
- // this group, and set them as group-writable. Can be a name or gid. The
- // client process must be a member of this group or chown will fail.
- Group string
-
- // TempDir specifies the base directory to use when creating a plugin-specific
- // temporary directory. It is expected to already exist and be writable. If
- // not set, defaults to the directory chosen by os.MkdirTemp.
- TempDir string
-
- // The directory to create Unix sockets in. Internally created and managed
- // by go-plugin and deleted when the plugin is killed. Will be created
- // inside TempDir if specified.
- socketDir string
-}
-
-// ReattachConfig is used to configure a client to reattach to an
-// already-running plugin process. You can retrieve this information by
-// calling ReattachConfig on Client.
-type ReattachConfig struct {
- Protocol Protocol
- ProtocolVersion int
- Addr net.Addr
- Pid int
-
- // ReattachFunc allows consumers to provide their own implementation of
- // runner.AttachedRunner and attach to something other than a plain process.
- // At least one of Pid or ReattachFunc must be set.
- ReattachFunc runner.ReattachFunc
-
- // Test is set to true if this is reattaching to to a plugin in "test mode"
- // (see ServeConfig.Test). In this mode, client.Kill will NOT kill the
- // process and instead will rely on the plugin to terminate itself. This
- // should not be used in non-test environments.
- Test bool
-}
-
-// SecureConfig is used to configure a client to verify the integrity of an
-// executable before running. It does this by verifying the checksum is
-// expected. Hash is used to specify the hashing method to use when checksumming
-// the file. The configuration is verified by the client by calling the
-// SecureConfig.Check() function.
-//
-// The host process should ensure the checksum was provided by a trusted and
-// authoritative source. The binary should be installed in such a way that it
-// can not be modified by an unauthorized user between the time of this check
-// and the time of execution.
-type SecureConfig struct {
- Checksum []byte
- Hash hash.Hash
-}
-
-// Check takes the filepath to an executable and returns true if the checksum of
-// the file matches the checksum provided in the SecureConfig.
-func (s *SecureConfig) Check(filePath string) (bool, error) {
- if len(s.Checksum) == 0 {
- return false, ErrSecureConfigNoChecksum
- }
-
- if s.Hash == nil {
- return false, ErrSecureConfigNoHash
- }
-
- file, err := os.Open(filePath)
- if err != nil {
- return false, err
- }
- defer file.Close()
-
- _, err = io.Copy(s.Hash, file)
- if err != nil {
- return false, err
- }
-
- sum := s.Hash.Sum(nil)
-
- return subtle.ConstantTimeCompare(sum, s.Checksum) == 1, nil
-}
-
-// This makes sure all the managed subprocesses are killed and properly
-// logged. This should be called before the parent process running the
-// plugins exits.
-//
-// This must only be called _once_.
-func CleanupClients() {
- // Set the killed to true so that we don't get unexpected panics
- atomic.StoreUint32(&Killed, 1)
-
- // Kill all the managed clients in parallel and use a WaitGroup
- // to wait for them all to finish up.
- var wg sync.WaitGroup
- managedClientsLock.Lock()
- for _, client := range managedClients {
- wg.Add(1)
-
- go func(client *Client) {
- client.Kill()
- wg.Done()
- }(client)
- }
- managedClientsLock.Unlock()
-
- wg.Wait()
-}
-
-// NewClient creates a new plugin client which manages the lifecycle of an external
-// plugin and gets the address for the RPC connection.
-//
-// The client must be cleaned up at some point by calling Kill(). If
-// the client is a managed client (created with ClientConfig.Managed) you
-// can just call CleanupClients at the end of your program and they will
-// be properly cleaned.
-func NewClient(config *ClientConfig) (c *Client) {
- if config.MinPort == 0 && config.MaxPort == 0 {
- config.MinPort = 10000
- config.MaxPort = 25000
- }
-
- if config.StartTimeout == 0 {
- config.StartTimeout = 1 * time.Minute
- }
-
- if config.Stderr == nil {
- config.Stderr = ioutil.Discard
- }
-
- if config.SyncStdout == nil {
- config.SyncStdout = io.Discard
- }
- if config.SyncStderr == nil {
- config.SyncStderr = io.Discard
- }
-
- if config.AllowedProtocols == nil {
- config.AllowedProtocols = []Protocol{ProtocolNetRPC}
- }
-
- if config.Logger == nil {
- config.Logger = hclog.New(&hclog.LoggerOptions{
- Output: hclog.DefaultOutput,
- Level: hclog.Trace,
- Name: "plugin",
- })
- }
-
- if config.PluginLogBufferSize == 0 {
- config.PluginLogBufferSize = defaultPluginLogBufferSize
- }
-
- c = &Client{
- config: config,
- logger: config.Logger,
- }
- if config.Managed {
- managedClientsLock.Lock()
- managedClients = append(managedClients, c)
- managedClientsLock.Unlock()
- }
-
- return
-}
-
-// Client returns the protocol client for this connection.
-//
-// Subsequent calls to this will return the same client.
-func (c *Client) Client() (ClientProtocol, error) {
- _, err := c.Start()
- if err != nil {
- return nil, err
- }
-
- c.l.Lock()
- defer c.l.Unlock()
-
- if c.client != nil {
- return c.client, nil
- }
-
- switch c.protocol {
- case ProtocolNetRPC:
- c.client, err = newRPCClient(c)
-
- case ProtocolGRPC:
- c.client, err = newGRPCClient(c.doneCtx, c)
-
- default:
- return nil, fmt.Errorf("unknown server protocol: %s", c.protocol)
- }
-
- if err != nil {
- c.client = nil
- return nil, err
- }
-
- return c.client, nil
-}
-
-// Tells whether or not the underlying process has exited.
-func (c *Client) Exited() bool {
- c.l.Lock()
- defer c.l.Unlock()
- return c.exited
-}
-
-// killed is used in tests to check if a process failed to exit gracefully, and
-// needed to be killed.
-func (c *Client) killed() bool {
- c.l.Lock()
- defer c.l.Unlock()
- return c.processKilled
-}
-
-// End the executing subprocess (if it is running) and perform any cleanup
-// tasks necessary such as capturing any remaining logs and so on.
-//
-// This method blocks until the process successfully exits.
-//
-// This method can safely be called multiple times.
-func (c *Client) Kill() {
- // Grab a lock to read some private fields.
- c.l.Lock()
- runner := c.runner
- addr := c.address
- hostSocketDir := c.unixSocketCfg.socketDir
- c.l.Unlock()
-
- // If there is no runner or ID, there is nothing to kill.
- if runner == nil || runner.ID() == "" {
- return
- }
-
- defer func() {
- // Wait for the all client goroutines to finish.
- c.clientWaitGroup.Wait()
-
- if hostSocketDir != "" {
- os.RemoveAll(hostSocketDir)
- }
-
- // Make sure there is no reference to the old process after it has been
- // killed.
- c.l.Lock()
- c.runner = nil
- c.l.Unlock()
- }()
-
- // We need to check for address here. It is possible that the plugin
- // started (process != nil) but has no address (addr == nil) if the
- // plugin failed at startup. If we do have an address, we need to close
- // the plugin net connections.
- graceful := false
- if addr != nil {
- // Close the client to cleanly exit the process.
- client, err := c.Client()
- if err == nil {
- err = client.Close()
-
- // If there is no error, then we attempt to wait for a graceful
- // exit. If there was an error, we assume that graceful cleanup
- // won't happen and just force kill.
- graceful = err == nil
- if err != nil {
- // If there was an error just log it. We're going to force
- // kill in a moment anyways.
- c.logger.Warn("error closing client during Kill", "err", err)
- }
- } else {
- c.logger.Error("client", "error", err)
- }
- }
-
- // If we're attempting a graceful exit, then we wait for a short period
- // of time to allow that to happen. To wait for this we just wait on the
- // doneCh which would be closed if the process exits.
- if graceful {
- select {
- case <-c.doneCtx.Done():
- c.logger.Debug("plugin exited")
- return
- case <-time.After(2 * time.Second):
- }
- }
-
- // If graceful exiting failed, just kill it
- c.logger.Warn("plugin failed to exit gracefully")
- if err := runner.Kill(context.Background()); err != nil {
- c.logger.Debug("error killing plugin", "error", err)
- }
-
- c.l.Lock()
- c.processKilled = true
- c.l.Unlock()
-}
-
-// Start the underlying subprocess, communicating with it to negotiate
-// a port for RPC connections, and returning the address to connect via RPC.
-//
-// This method is safe to call multiple times. Subsequent calls have no effect.
-// Once a client has been started once, it cannot be started again, even if
-// it was killed.
-func (c *Client) Start() (addr net.Addr, err error) {
- c.l.Lock()
- defer c.l.Unlock()
-
- if c.address != nil {
- return c.address, nil
- }
-
- // If one of cmd or reattach isn't set, then it is an error. We wrap
- // this in a {} for scoping reasons, and hopeful that the escape
- // analysis will pop the stack here.
- {
- var mutuallyExclusiveOptions int
- if c.config.Cmd != nil {
- mutuallyExclusiveOptions += 1
- }
- if c.config.Reattach != nil {
- mutuallyExclusiveOptions += 1
- }
- if c.config.RunnerFunc != nil {
- mutuallyExclusiveOptions += 1
- }
- if mutuallyExclusiveOptions != 1 {
- return nil, fmt.Errorf("exactly one of Cmd, or Reattach, or RunnerFunc must be set")
- }
-
- if c.config.SecureConfig != nil && c.config.Reattach != nil {
- return nil, ErrSecureConfigAndReattach
- }
-
- if c.config.GRPCBrokerMultiplex && c.config.Reattach != nil {
- return nil, fmt.Errorf("gRPC broker multiplexing is not supported with Reattach config")
- }
- }
-
- if c.config.Reattach != nil {
- return c.reattach()
- }
-
- if c.config.VersionedPlugins == nil {
- c.config.VersionedPlugins = make(map[int]PluginSet)
- }
-
- // handle all plugins as versioned, using the handshake config as the default.
- version := int(c.config.ProtocolVersion)
-
- // Make sure we're not overwriting a real version 0. If ProtocolVersion was
- // non-zero, then we have to just assume the user made sure that
- // VersionedPlugins doesn't conflict.
- if _, ok := c.config.VersionedPlugins[version]; !ok && c.config.Plugins != nil {
- c.config.VersionedPlugins[version] = c.config.Plugins
- }
-
- var versionStrings []string
- for v := range c.config.VersionedPlugins {
- versionStrings = append(versionStrings, strconv.Itoa(v))
- }
-
- env := []string{
- fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue),
- fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort),
- fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort),
- fmt.Sprintf("PLUGIN_PROTOCOL_VERSIONS=%s", strings.Join(versionStrings, ",")),
- }
- if c.config.GRPCBrokerMultiplex {
- env = append(env, fmt.Sprintf("%s=true", envMultiplexGRPC))
- }
-
- cmd := c.config.Cmd
- if cmd == nil {
- // It's only possible to get here if RunnerFunc is non-nil, but we'll
- // still use cmd as a spec to populate metadata for the external
- // implementation to consume.
- cmd = exec.Command("")
- }
- if !c.config.SkipHostEnv {
- cmd.Env = append(cmd.Env, os.Environ()...)
- }
- cmd.Env = append(cmd.Env, env...)
- cmd.Stdin = os.Stdin
-
- if c.config.SecureConfig != nil {
- if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil {
- return nil, fmt.Errorf("error verifying checksum: %s", err)
- } else if !ok {
- return nil, ErrChecksumsDoNotMatch
- }
- }
-
- // Setup a temporary certificate for client/server mtls, and send the public
- // certificate to the plugin.
- if c.config.AutoMTLS {
- c.logger.Info("configuring client automatic mTLS")
- certPEM, keyPEM, err := generateCert()
- if err != nil {
- c.logger.Error("failed to generate client certificate", "error", err)
- return nil, err
- }
- cert, err := tls.X509KeyPair(certPEM, keyPEM)
- if err != nil {
- c.logger.Error("failed to parse client certificate", "error", err)
- return nil, err
- }
-
- cmd.Env = append(cmd.Env, fmt.Sprintf("PLUGIN_CLIENT_CERT=%s", certPEM))
-
- c.config.TLSConfig = &tls.Config{
- Certificates: []tls.Certificate{cert},
- ClientAuth: tls.RequireAndVerifyClientCert,
- MinVersion: tls.VersionTLS12,
- ServerName: "localhost",
- }
- }
-
- if c.config.UnixSocketConfig != nil {
- c.unixSocketCfg = *c.config.UnixSocketConfig
- }
-
- if c.unixSocketCfg.Group != "" {
- cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", EnvUnixSocketGroup, c.unixSocketCfg.Group))
- }
-
- var runner runner.Runner
- switch {
- case c.config.RunnerFunc != nil:
- c.unixSocketCfg.socketDir, err = os.MkdirTemp(c.unixSocketCfg.TempDir, "plugin-dir")
- if err != nil {
- return nil, err
- }
- // os.MkdirTemp creates folders with 0o700, so if we have a group
- // configured we need to make it group-writable.
- if c.unixSocketCfg.Group != "" {
- err = setGroupWritable(c.unixSocketCfg.socketDir, c.unixSocketCfg.Group, 0o770)
- if err != nil {
- return nil, err
- }
- }
- cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", EnvUnixSocketDir, c.unixSocketCfg.socketDir))
- c.logger.Trace("created temporary directory for unix sockets", "dir", c.unixSocketCfg.socketDir)
-
- runner, err = c.config.RunnerFunc(c.logger, cmd, c.unixSocketCfg.socketDir)
- if err != nil {
- return nil, err
- }
- default:
- runner, err = cmdrunner.NewCmdRunner(c.logger, cmd)
- if err != nil {
- return nil, err
- }
-
- }
-
- c.runner = runner
- startCtx, startCtxCancel := context.WithTimeout(context.Background(), c.config.StartTimeout)
- defer startCtxCancel()
- err = runner.Start(startCtx)
- if err != nil {
- return nil, err
- }
-
- // Make sure the command is properly cleaned up if there is an error
- defer func() {
- rErr := recover()
-
- if err != nil || rErr != nil {
- runner.Kill(context.Background())
- }
-
- if rErr != nil {
- panic(rErr)
- }
- }()
-
- // Create a context for when we kill
- c.doneCtx, c.ctxCancel = context.WithCancel(context.Background())
-
- // Start goroutine that logs the stderr
- c.clientWaitGroup.Add(1)
- c.pipesWaitGroup.Add(1)
- // logStderr calls c.pipesWaitGroup.Done()
- go c.logStderr(runner.Name(), runner.Stderr())
-
- c.clientWaitGroup.Add(1)
- go func() {
- // ensure the context is cancelled when we're done
- defer c.ctxCancel()
-
- defer c.clientWaitGroup.Done()
-
- // wait to finish reading from stdout/stderr since the stdout/stderr pipe readers
- // will be closed by the subsequent call to cmd.Wait().
- c.pipesWaitGroup.Wait()
-
- // Wait for the command to end.
- err := runner.Wait(context.Background())
- if err != nil {
- c.logger.Error("plugin process exited", "plugin", runner.Name(), "id", runner.ID(), "error", err.Error())
- } else {
- // Log and make sure to flush the logs right away
- c.logger.Info("plugin process exited", "plugin", runner.Name(), "id", runner.ID())
- }
-
- os.Stderr.Sync()
-
- // Set that we exited, which takes a lock
- c.l.Lock()
- defer c.l.Unlock()
- c.exited = true
- }()
-
- // Start a goroutine that is going to be reading the lines
- // out of stdout
- linesCh := make(chan string)
- c.clientWaitGroup.Add(1)
- c.pipesWaitGroup.Add(1)
- go func() {
- defer c.clientWaitGroup.Done()
- defer c.pipesWaitGroup.Done()
- defer close(linesCh)
-
- scanner := bufio.NewScanner(runner.Stdout())
- for scanner.Scan() {
- linesCh <- scanner.Text()
- }
- if scanner.Err() != nil {
- c.logger.Error("error encountered while scanning stdout", "error", scanner.Err())
- }
- }()
-
- // Make sure after we exit we read the lines from stdout forever
- // so they don't block since it is a pipe.
- // The scanner goroutine above will close this, but track it with a wait
- // group for completeness.
- c.clientWaitGroup.Add(1)
- defer func() {
- go func() {
- defer c.clientWaitGroup.Done()
- for range linesCh {
- }
- }()
- }()
-
- // Some channels for the next step
- timeout := time.After(c.config.StartTimeout)
-
- // Start looking for the address
- c.logger.Debug("waiting for RPC address", "plugin", runner.Name())
- select {
- case <-timeout:
- err = errors.New("timeout while waiting for plugin to start")
- case <-c.doneCtx.Done():
- err = errors.New("plugin exited before we could connect")
- case line, ok := <-linesCh:
- // Trim the line and split by "|" in order to get the parts of
- // the output.
- line = strings.TrimSpace(line)
- parts := strings.Split(line, "|")
- if len(parts) < 4 {
- errText := fmt.Sprintf("Unrecognized remote plugin message: %s", line)
- if !ok {
- errText += "\n" + "Failed to read any lines from plugin's stdout"
- }
- additionalNotes := runner.Diagnose(context.Background())
- if additionalNotes != "" {
- errText += "\n" + additionalNotes
- }
- err = errors.New(errText)
- return
- }
-
- // Check the core protocol. Wrapped in a {} for scoping.
- {
- var coreProtocol int
- coreProtocol, err = strconv.Atoi(parts[0])
- if err != nil {
- err = fmt.Errorf("Error parsing core protocol version: %s", err)
- return
- }
-
- if coreProtocol != CoreProtocolVersion {
- err = fmt.Errorf("Incompatible core API version with plugin. "+
- "Plugin version: %s, Core version: %d\n\n"+
- "To fix this, the plugin usually only needs to be recompiled.\n"+
- "Please report this to the plugin author.", parts[0], CoreProtocolVersion)
- return
- }
- }
-
- // Test the API version
- version, pluginSet, err := c.checkProtoVersion(parts[1])
- if err != nil {
- return addr, err
- }
-
- // set the Plugins value to the compatible set, so the version
- // doesn't need to be passed through to the ClientProtocol
- // implementation.
- c.config.Plugins = pluginSet
- c.negotiatedVersion = version
- c.logger.Debug("using plugin", "version", version)
-
- network, address, err := runner.PluginToHost(parts[2], parts[3])
- if err != nil {
- return addr, err
- }
-
- switch network {
- case "tcp":
- addr, err = net.ResolveTCPAddr("tcp", address)
- case "unix":
- addr, err = net.ResolveUnixAddr("unix", address)
- default:
- err = fmt.Errorf("Unknown address type: %s", address)
- }
-
- // If we have a server type, then record that. We default to net/rpc
- // for backwards compatibility.
- c.protocol = ProtocolNetRPC
- if len(parts) >= 5 {
- c.protocol = Protocol(parts[4])
- }
-
- found := false
- for _, p := range c.config.AllowedProtocols {
- if p == c.protocol {
- found = true
- break
- }
- }
- if !found {
- err = fmt.Errorf("Unsupported plugin protocol %q. Supported: %v",
- c.protocol, c.config.AllowedProtocols)
- return addr, err
- }
-
- // See if we have a TLS certificate from the server.
- // Checking if the length is > 50 rules out catching the unused "extra"
- // data returned from some older implementations.
- if len(parts) >= 6 && len(parts[5]) > 50 {
- err := c.loadServerCert(parts[5])
- if err != nil {
- return nil, fmt.Errorf("error parsing server cert: %s", err)
- }
- }
-
- if c.config.GRPCBrokerMultiplex && c.protocol == ProtocolGRPC {
- if len(parts) <= 6 {
- return nil, fmt.Errorf("%w; for Go plugins, you will need to update the "+
- "github.com/hashicorp/go-plugin dependency and recompile", ErrGRPCBrokerMuxNotSupported)
- }
- if muxSupported, err := strconv.ParseBool(parts[6]); err != nil {
- return nil, fmt.Errorf("error parsing %q as a boolean for gRPC broker multiplexing support", parts[6])
- } else if !muxSupported {
- return nil, ErrGRPCBrokerMuxNotSupported
- }
- }
- }
-
- c.address = addr
- return
-}
-
-// loadServerCert is used by AutoMTLS to read an x.509 cert returned by the
-// server, and load it as the RootCA and ClientCA for the client TLSConfig.
-func (c *Client) loadServerCert(cert string) error {
- certPool := x509.NewCertPool()
-
- asn1, err := base64.RawStdEncoding.DecodeString(cert)
- if err != nil {
- return err
- }
-
- x509Cert, err := x509.ParseCertificate([]byte(asn1))
- if err != nil {
- return err
- }
-
- certPool.AddCert(x509Cert)
-
- c.config.TLSConfig.RootCAs = certPool
- c.config.TLSConfig.ClientCAs = certPool
- return nil
-}
-
-func (c *Client) reattach() (net.Addr, error) {
- reattachFunc := c.config.Reattach.ReattachFunc
- // For backwards compatibility default to cmdrunner.ReattachFunc
- if reattachFunc == nil {
- reattachFunc = cmdrunner.ReattachFunc(c.config.Reattach.Pid, c.config.Reattach.Addr)
- }
-
- r, err := reattachFunc()
- if err != nil {
- return nil, err
- }
-
- // Create a context for when we kill
- c.doneCtx, c.ctxCancel = context.WithCancel(context.Background())
-
- c.clientWaitGroup.Add(1)
- // Goroutine to mark exit status
- go func(r runner.AttachedRunner) {
- defer c.clientWaitGroup.Done()
-
- // ensure the context is cancelled when we're done
- defer c.ctxCancel()
-
- // Wait for the process to die
- r.Wait(context.Background())
-
- // Log so we can see it
- c.logger.Debug("reattached plugin process exited")
-
- // Mark it
- c.l.Lock()
- defer c.l.Unlock()
- c.exited = true
- }(r)
-
- // Set the address and protocol
- c.address = c.config.Reattach.Addr
- c.protocol = c.config.Reattach.Protocol
- if c.protocol == "" {
- // Default the protocol to net/rpc for backwards compatibility
- c.protocol = ProtocolNetRPC
- }
-
- if c.config.Reattach.Test {
- c.negotiatedVersion = c.config.Reattach.ProtocolVersion
- } else {
- // If we're in test mode, we do NOT set the runner. This avoids the
- // runner being killed (the only purpose we have for setting c.runner
- // when reattaching), since in test mode the process is responsible for
- // exiting on its own.
- c.runner = r
- }
-
- return c.address, nil
-}
-
-// checkProtoVersion returns the negotiated version and PluginSet.
-// This returns an error if the server returned an incompatible protocol
-// version, or an invalid handshake response.
-func (c *Client) checkProtoVersion(protoVersion string) (int, PluginSet, error) {
- serverVersion, err := strconv.Atoi(protoVersion)
- if err != nil {
- return 0, nil, fmt.Errorf("Error parsing protocol version %q: %s", protoVersion, err)
- }
-
- // record these for the error message
- var clientVersions []int
-
- // all versions, including the legacy ProtocolVersion have been added to
- // the versions set
- for version, plugins := range c.config.VersionedPlugins {
- clientVersions = append(clientVersions, version)
-
- if serverVersion != version {
- continue
- }
- return version, plugins, nil
- }
-
- return 0, nil, fmt.Errorf("Incompatible API version with plugin. "+
- "Plugin version: %d, Client versions: %d", serverVersion, clientVersions)
-}
-
-// ReattachConfig returns the information that must be provided to NewClient
-// to reattach to the plugin process that this client started. This is
-// useful for plugins that detach from their parent process.
-//
-// If this returns nil then the process hasn't been started yet. Please
-// call Start or Client before calling this.
-//
-// Clients who specified a RunnerFunc will need to populate their own
-// ReattachFunc in the returned ReattachConfig before it can be used.
-func (c *Client) ReattachConfig() *ReattachConfig {
- c.l.Lock()
- defer c.l.Unlock()
-
- if c.address == nil {
- return nil
- }
-
- if c.config.Cmd != nil && c.config.Cmd.Process == nil {
- return nil
- }
-
- // If we connected via reattach, just return the information as-is
- if c.config.Reattach != nil {
- return c.config.Reattach
- }
-
- reattach := &ReattachConfig{
- Protocol: c.protocol,
- Addr: c.address,
- }
-
- if c.config.Cmd != nil && c.config.Cmd.Process != nil {
- reattach.Pid = c.config.Cmd.Process.Pid
- }
-
- return reattach
-}
-
-// Protocol returns the protocol of server on the remote end. This will
-// start the plugin process if it isn't already started. Errors from
-// starting the plugin are surpressed and ProtocolInvalid is returned. It
-// is recommended you call Start explicitly before calling Protocol to ensure
-// no errors occur.
-func (c *Client) Protocol() Protocol {
- _, err := c.Start()
- if err != nil {
- return ProtocolInvalid
- }
-
- return c.protocol
-}
-
-func netAddrDialer(addr net.Addr) func(string, time.Duration) (net.Conn, error) {
- return func(_ string, _ time.Duration) (net.Conn, error) {
- // Connect to the client
- conn, err := net.Dial(addr.Network(), addr.String())
- if err != nil {
- return nil, err
- }
- if tcpConn, ok := conn.(*net.TCPConn); ok {
- // Make sure to set keep alive so that the connection doesn't die
- tcpConn.SetKeepAlive(true)
- }
-
- return conn, nil
- }
-}
-
-// dialer is compatible with grpc.WithDialer and creates the connection
-// to the plugin.
-func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) {
- muxer, err := c.getGRPCMuxer(c.address)
- if err != nil {
- return nil, err
- }
-
- var conn net.Conn
- if muxer.Enabled() {
- conn, err = muxer.Dial()
- if err != nil {
- return nil, err
- }
- } else {
- conn, err = netAddrDialer(c.address)("", timeout)
- if err != nil {
- return nil, err
- }
- }
-
- // If we have a TLS config we wrap our connection. We only do this
- // for net/rpc since gRPC uses its own mechanism for TLS.
- if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil {
- conn = tls.Client(conn, c.config.TLSConfig)
- }
-
- return conn, nil
-}
-
-func (c *Client) getGRPCMuxer(addr net.Addr) (*grpcmux.GRPCClientMuxer, error) {
- if c.protocol != ProtocolGRPC || !c.config.GRPCBrokerMultiplex {
- return nil, nil
- }
-
- var err error
- c.grpcMuxerOnce.Do(func() {
- c.grpcMuxer, err = grpcmux.NewGRPCClientMuxer(c.logger, addr)
- })
- if err != nil {
- return nil, err
- }
-
- return c.grpcMuxer, nil
-}
-
-func (c *Client) logStderr(name string, r io.Reader) {
- defer c.clientWaitGroup.Done()
- defer c.pipesWaitGroup.Done()
- l := c.logger.Named(filepath.Base(name))
-
- reader := bufio.NewReaderSize(r, c.config.PluginLogBufferSize)
- // continuation indicates the previous line was a prefix
- continuation := false
-
- for {
- line, isPrefix, err := reader.ReadLine()
- switch {
- case err == io.EOF:
- return
- case err != nil:
- l.Error("reading plugin stderr", "error", err)
- return
- }
-
- c.config.Stderr.Write(line)
-
- // The line was longer than our max token size, so it's likely
- // incomplete and won't unmarshal.
- if isPrefix || continuation {
- l.Debug(string(line))
-
- // if we're finishing a continued line, add the newline back in
- if !isPrefix {
- c.config.Stderr.Write([]byte{'\n'})
- }
-
- continuation = isPrefix
- continue
- }
-
- c.config.Stderr.Write([]byte{'\n'})
-
- entry, err := parseJSON(line)
- // If output is not JSON format, print directly to Debug
- if err != nil {
- // Attempt to infer the desired log level from the commonly used
- // string prefixes
- switch line := string(line); {
- case strings.HasPrefix(line, "[TRACE]"):
- l.Trace(line)
- case strings.HasPrefix(line, "[DEBUG]"):
- l.Debug(line)
- case strings.HasPrefix(line, "[INFO]"):
- l.Info(line)
- case strings.HasPrefix(line, "[WARN]"):
- l.Warn(line)
- case strings.HasPrefix(line, "[ERROR]"):
- l.Error(line)
- default:
- l.Debug(line)
- }
- } else {
- out := flattenKVPairs(entry.KVPairs)
-
- out = append(out, "timestamp", entry.Timestamp.Format(hclog.TimeFormat))
- switch hclog.LevelFromString(entry.Level) {
- case hclog.Trace:
- l.Trace(entry.Message, out...)
- case hclog.Debug:
- l.Debug(entry.Message, out...)
- case hclog.Info:
- l.Info(entry.Message, out...)
- case hclog.Warn:
- l.Warn(entry.Message, out...)
- case hclog.Error:
- l.Error(entry.Message, out...)
- default:
- // if there was no log level, it's likely this is unexpected
- // json from something other than hclog, and we should output
- // it verbatim.
- l.Debug(string(line))
- }
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/constants.go b/vendor/github.com/hashicorp/go-plugin/constants.go
deleted file mode 100644
index e7f5bbe5..00000000
--- a/vendor/github.com/hashicorp/go-plugin/constants.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package plugin
-
-const (
- // EnvUnixSocketDir specifies the directory that _plugins_ should create unix
- // sockets in. Does not affect client behavior.
- EnvUnixSocketDir = "PLUGIN_UNIX_SOCKET_DIR"
-
- // EnvUnixSocketGroup specifies the owning, writable group to set for Unix
- // sockets created by _plugins_. Does not affect client behavior.
- EnvUnixSocketGroup = "PLUGIN_UNIX_SOCKET_GROUP"
-
- envMultiplexGRPC = "PLUGIN_MULTIPLEX_GRPC"
-)
diff --git a/vendor/github.com/hashicorp/go-plugin/discover.go b/vendor/github.com/hashicorp/go-plugin/discover.go
deleted file mode 100644
index c5b96242..00000000
--- a/vendor/github.com/hashicorp/go-plugin/discover.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package plugin
-
-import (
- "path/filepath"
-)
-
-// Discover discovers plugins that are in a given directory.
-//
-// The directory doesn't need to be absolute. For example, "." will work fine.
-//
-// This currently assumes any file matching the glob is a plugin.
-// In the future this may be smarter about checking that a file is
-// executable and so on.
-//
-// TODO: test
-func Discover(glob, dir string) ([]string, error) {
- var err error
-
- // Make the directory absolute if it isn't already
- if !filepath.IsAbs(dir) {
- dir, err = filepath.Abs(dir)
- if err != nil {
- return nil, err
- }
- }
-
- return filepath.Glob(filepath.Join(dir, glob))
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/error.go b/vendor/github.com/hashicorp/go-plugin/error.go
deleted file mode 100644
index e62a2191..00000000
--- a/vendor/github.com/hashicorp/go-plugin/error.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package plugin
-
-// This is a type that wraps error types so that they can be messaged
-// across RPC channels. Since "error" is an interface, we can't always
-// gob-encode the underlying structure. This is a valid error interface
-// implementer that we will push across.
-type BasicError struct {
- Message string
-}
-
-// NewBasicError is used to create a BasicError.
-//
-// err is allowed to be nil.
-func NewBasicError(err error) *BasicError {
- if err == nil {
- return nil
- }
-
- return &BasicError{err.Error()}
-}
-
-func (e *BasicError) Error() string {
- return e.Message
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go
deleted file mode 100644
index 5b17e37f..00000000
--- a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go
+++ /dev/null
@@ -1,654 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package plugin
-
-import (
- "context"
- "crypto/tls"
- "errors"
- "fmt"
- "log"
- "net"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/hashicorp/go-plugin/internal/grpcmux"
- "github.com/hashicorp/go-plugin/internal/plugin"
- "github.com/hashicorp/go-plugin/runner"
-
- "github.com/oklog/run"
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials"
-)
-
-// streamer interface is used in the broker to send/receive connection
-// information.
-type streamer interface {
- Send(*plugin.ConnInfo) error
- Recv() (*plugin.ConnInfo, error)
- Close()
-}
-
-// sendErr is used to pass errors back during a send.
-type sendErr struct {
- i *plugin.ConnInfo
- ch chan error
-}
-
-// gRPCBrokerServer is used by the plugin to start a stream and to send
-// connection information to/from the plugin. Implements GRPCBrokerServer and
-// streamer interfaces.
-type gRPCBrokerServer struct {
- plugin.UnimplementedGRPCBrokerServer
-
- // send is used to send connection info to the gRPC stream.
- send chan *sendErr
-
- // recv is used to receive connection info from the gRPC stream.
- recv chan *plugin.ConnInfo
-
- // quit closes down the stream.
- quit chan struct{}
-
- // o is used to ensure we close the quit channel only once.
- o sync.Once
-}
-
-func newGRPCBrokerServer() *gRPCBrokerServer {
- return &gRPCBrokerServer{
- send: make(chan *sendErr),
- recv: make(chan *plugin.ConnInfo),
- quit: make(chan struct{}),
- }
-}
-
-// StartStream implements the GRPCBrokerServer interface and will block until
-// the quit channel is closed or the context reports Done. The stream will pass
-// connection information to/from the client.
-func (s *gRPCBrokerServer) StartStream(stream plugin.GRPCBroker_StartStreamServer) error {
- doneCh := stream.Context().Done()
- defer s.Close()
-
- // Proccess send stream
- go func() {
- for {
- select {
- case <-doneCh:
- return
- case <-s.quit:
- return
- case se := <-s.send:
- err := stream.Send(se.i)
- se.ch <- err
- }
- }
- }()
-
- // Process receive stream
- for {
- i, err := stream.Recv()
- if err != nil {
- return err
- }
- select {
- case <-doneCh:
- return nil
- case <-s.quit:
- return nil
- case s.recv <- i:
- }
- }
-
- return nil
-}
-
-// Send is used by the GRPCBroker to pass connection information into the stream
-// to the client.
-func (s *gRPCBrokerServer) Send(i *plugin.ConnInfo) error {
- ch := make(chan error)
- defer close(ch)
-
- select {
- case <-s.quit:
- return errors.New("broker closed")
- case s.send <- &sendErr{
- i: i,
- ch: ch,
- }:
- }
-
- return <-ch
-}
-
-// Recv is used by the GRPCBroker to pass connection information that has been
-// sent from the client from the stream to the broker.
-func (s *gRPCBrokerServer) Recv() (*plugin.ConnInfo, error) {
- select {
- case <-s.quit:
- return nil, errors.New("broker closed")
- case i := <-s.recv:
- return i, nil
- }
-}
-
-// Close closes the quit channel, shutting down the stream.
-func (s *gRPCBrokerServer) Close() {
- s.o.Do(func() {
- close(s.quit)
- })
-}
-
-// gRPCBrokerClientImpl is used by the client to start a stream and to send
-// connection information to/from the client. Implements GRPCBrokerClient and
-// streamer interfaces.
-type gRPCBrokerClientImpl struct {
- // client is the underlying GRPC client used to make calls to the server.
- client plugin.GRPCBrokerClient
-
- // send is used to send connection info to the gRPC stream.
- send chan *sendErr
-
- // recv is used to receive connection info from the gRPC stream.
- recv chan *plugin.ConnInfo
-
- // quit closes down the stream.
- quit chan struct{}
-
- // o is used to ensure we close the quit channel only once.
- o sync.Once
-}
-
-func newGRPCBrokerClient(conn *grpc.ClientConn) *gRPCBrokerClientImpl {
- return &gRPCBrokerClientImpl{
- client: plugin.NewGRPCBrokerClient(conn),
- send: make(chan *sendErr),
- recv: make(chan *plugin.ConnInfo),
- quit: make(chan struct{}),
- }
-}
-
-// StartStream implements the GRPCBrokerClient interface and will block until
-// the quit channel is closed or the context reports Done. The stream will pass
-// connection information to/from the plugin.
-func (s *gRPCBrokerClientImpl) StartStream() error {
- ctx, cancelFunc := context.WithCancel(context.Background())
- defer cancelFunc()
- defer s.Close()
-
- stream, err := s.client.StartStream(ctx)
- if err != nil {
- return err
- }
- doneCh := stream.Context().Done()
-
- go func() {
- for {
- select {
- case <-doneCh:
- return
- case <-s.quit:
- return
- case se := <-s.send:
- err := stream.Send(se.i)
- se.ch <- err
- }
- }
- }()
-
- for {
- i, err := stream.Recv()
- if err != nil {
- return err
- }
- select {
- case <-doneCh:
- return nil
- case <-s.quit:
- return nil
- case s.recv <- i:
- }
- }
-
- return nil
-}
-
-// Send is used by the GRPCBroker to pass connection information into the stream
-// to the plugin.
-func (s *gRPCBrokerClientImpl) Send(i *plugin.ConnInfo) error {
- ch := make(chan error)
- defer close(ch)
-
- select {
- case <-s.quit:
- return errors.New("broker closed")
- case s.send <- &sendErr{
- i: i,
- ch: ch,
- }:
- }
-
- return <-ch
-}
-
-// Recv is used by the GRPCBroker to pass connection information that has been
-// sent from the plugin to the broker.
-func (s *gRPCBrokerClientImpl) Recv() (*plugin.ConnInfo, error) {
- select {
- case <-s.quit:
- return nil, errors.New("broker closed")
- case i := <-s.recv:
- return i, nil
- }
-}
-
-// Close closes the quit channel, shutting down the stream.
-func (s *gRPCBrokerClientImpl) Close() {
- s.o.Do(func() {
- close(s.quit)
- })
-}
-
-// GRPCBroker is responsible for brokering connections by unique ID.
-//
-// It is used by plugins to create multiple gRPC connections and data
-// streams between the plugin process and the host process.
-//
-// This allows a plugin to request a channel with a specific ID to connect to
-// or accept a connection from, and the broker handles the details of
-// holding these channels open while they're being negotiated.
-//
-// The Plugin interface has access to these for both Server and Client.
-// The broker can be used by either (optionally) to reserve and connect to
-// new streams. This is useful for complex args and return values,
-// or anything else you might need a data stream for.
-type GRPCBroker struct {
- nextId uint32
- streamer streamer
- tls *tls.Config
- doneCh chan struct{}
- o sync.Once
-
- clientStreams map[uint32]*gRPCBrokerPending
- serverStreams map[uint32]*gRPCBrokerPending
-
- unixSocketCfg UnixSocketConfig
- addrTranslator runner.AddrTranslator
-
- dialMutex sync.Mutex
-
- muxer grpcmux.GRPCMuxer
-
- sync.Mutex
-}
-
-type gRPCBrokerPending struct {
- ch chan *plugin.ConnInfo
- doneCh chan struct{}
- once sync.Once
-}
-
-func newGRPCBroker(s streamer, tls *tls.Config, unixSocketCfg UnixSocketConfig, addrTranslator runner.AddrTranslator, muxer grpcmux.GRPCMuxer) *GRPCBroker {
- return &GRPCBroker{
- streamer: s,
- tls: tls,
- doneCh: make(chan struct{}),
-
- clientStreams: make(map[uint32]*gRPCBrokerPending),
- serverStreams: make(map[uint32]*gRPCBrokerPending),
- muxer: muxer,
-
- unixSocketCfg: unixSocketCfg,
- addrTranslator: addrTranslator,
- }
-}
-
-// Accept accepts a connection by ID.
-//
-// This should not be called multiple times with the same ID at one time.
-func (b *GRPCBroker) Accept(id uint32) (net.Listener, error) {
- if b.muxer.Enabled() {
- p := b.getServerStream(id)
- go func() {
- err := b.listenForKnocks(id)
- if err != nil {
- log.Printf("[ERR]: error listening for knocks, id: %d, error: %s", id, err)
- }
- }()
-
- ln, err := b.muxer.Listener(id, p.doneCh)
- if err != nil {
- return nil, err
- }
-
- ln = &rmListener{
- Listener: ln,
- close: func() error {
- // We could have multiple listeners on the same ID, so use sync.Once
- // for closing doneCh to ensure we don't get a panic.
- p.once.Do(func() {
- close(p.doneCh)
- })
-
- b.Lock()
- defer b.Unlock()
-
- // No longer need to listen for knocks once the listener is closed.
- delete(b.serverStreams, id)
-
- return nil
- },
- }
-
- return ln, nil
- }
-
- listener, err := serverListener(b.unixSocketCfg)
- if err != nil {
- return nil, err
- }
-
- advertiseNet := listener.Addr().Network()
- advertiseAddr := listener.Addr().String()
- if b.addrTranslator != nil {
- advertiseNet, advertiseAddr, err = b.addrTranslator.HostToPlugin(advertiseNet, advertiseAddr)
- if err != nil {
- return nil, err
- }
- }
- err = b.streamer.Send(&plugin.ConnInfo{
- ServiceId: id,
- Network: advertiseNet,
- Address: advertiseAddr,
- })
- if err != nil {
- return nil, err
- }
-
- return listener, nil
-}
-
-// AcceptAndServe is used to accept a specific stream ID and immediately
-// serve a gRPC server on that stream ID. This is used to easily serve
-// complex arguments. Each AcceptAndServe call opens a new listener socket and
-// sends the connection info down the stream to the dialer. Since a new
-// connection is opened every call, these calls should be used sparingly.
-// Multiple gRPC server implementations can be registered to a single
-// AcceptAndServe call.
-func (b *GRPCBroker) AcceptAndServe(id uint32, newGRPCServer func([]grpc.ServerOption) *grpc.Server) {
- ln, err := b.Accept(id)
- if err != nil {
- log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err)
- return
- }
- defer ln.Close()
-
- var opts []grpc.ServerOption
- if b.tls != nil {
- opts = []grpc.ServerOption{grpc.Creds(credentials.NewTLS(b.tls))}
- }
-
- server := newGRPCServer(opts)
-
- // Here we use a run group to close this goroutine if the server is shutdown
- // or the broker is shutdown.
- var g run.Group
- {
- // Serve on the listener, if shutting down call GracefulStop.
- g.Add(func() error {
- return server.Serve(ln)
- }, func(err error) {
- server.GracefulStop()
- })
- }
- {
- // block on the closeCh or the doneCh. If we are shutting down close the
- // closeCh.
- closeCh := make(chan struct{})
- g.Add(func() error {
- select {
- case <-b.doneCh:
- case <-closeCh:
- }
- return nil
- }, func(err error) {
- close(closeCh)
- })
- }
-
- // Block until we are done
- g.Run()
-}
-
-// Close closes the stream and all servers.
-func (b *GRPCBroker) Close() error {
- b.streamer.Close()
- b.o.Do(func() {
- close(b.doneCh)
- })
- return nil
-}
-
-func (b *GRPCBroker) listenForKnocks(id uint32) error {
- p := b.getServerStream(id)
- for {
- select {
- case msg := <-p.ch:
- // Shouldn't be possible.
- if msg.ServiceId != id {
- return fmt.Errorf("knock received with wrong service ID; expected %d but got %d", id, msg.ServiceId)
- }
-
- // Also shouldn't be possible.
- if msg.Knock == nil || !msg.Knock.Knock || msg.Knock.Ack {
- return fmt.Errorf("knock received for service ID %d with incorrect values; knock=%+v", id, msg.Knock)
- }
-
- // Successful knock, open the door for the given ID.
- var ackError string
- err := b.muxer.AcceptKnock(id)
- if err != nil {
- ackError = err.Error()
- }
-
- // Send back an acknowledgement to allow the client to start dialling.
- err = b.streamer.Send(&plugin.ConnInfo{
- ServiceId: id,
- Knock: &plugin.ConnInfo_Knock{
- Knock: true,
- Ack: true,
- Error: ackError,
- },
- })
- if err != nil {
- return fmt.Errorf("error sending back knock acknowledgement: %w", err)
- }
- case <-p.doneCh:
- return nil
- }
- }
-}
-
-func (b *GRPCBroker) knock(id uint32) error {
- // Send a knock.
- err := b.streamer.Send(&plugin.ConnInfo{
- ServiceId: id,
- Knock: &plugin.ConnInfo_Knock{
- Knock: true,
- },
- })
- if err != nil {
- return err
- }
-
- // Wait for the ack.
- p := b.getClientStream(id)
- select {
- case msg := <-p.ch:
- if msg.ServiceId != id {
- return fmt.Errorf("handshake failed for multiplexing on id %d; got response for %d", id, msg.ServiceId)
- }
- if msg.Knock == nil || !msg.Knock.Knock || !msg.Knock.Ack {
- return fmt.Errorf("handshake failed for multiplexing on id %d; expected knock and ack, but got %+v", id, msg.Knock)
- }
- if msg.Knock.Error != "" {
- return fmt.Errorf("failed to knock for id %d: %s", id, msg.Knock.Error)
- }
- case <-time.After(5 * time.Second):
- return fmt.Errorf("timeout waiting for multiplexing knock handshake on id %d", id)
- }
-
- return nil
-}
-
-func (b *GRPCBroker) muxDial(id uint32) func(string, time.Duration) (net.Conn, error) {
- return func(string, time.Duration) (net.Conn, error) {
- b.dialMutex.Lock()
- defer b.dialMutex.Unlock()
-
- // Tell the other side the listener ID it should give the next stream to.
- err := b.knock(id)
- if err != nil {
- return nil, fmt.Errorf("failed to knock before dialling client: %w", err)
- }
-
- conn, err := b.muxer.Dial()
- if err != nil {
- return nil, err
- }
-
- return conn, nil
- }
-}
-
-// Dial opens a connection by ID.
-func (b *GRPCBroker) Dial(id uint32) (conn *grpc.ClientConn, err error) {
- if b.muxer.Enabled() {
- return dialGRPCConn(b.tls, b.muxDial(id))
- }
-
- var c *plugin.ConnInfo
-
- // Open the stream
- p := b.getClientStream(id)
- select {
- case c = <-p.ch:
- close(p.doneCh)
- case <-time.After(5 * time.Second):
- return nil, fmt.Errorf("timeout waiting for connection info")
- }
-
- network, address := c.Network, c.Address
- if b.addrTranslator != nil {
- network, address, err = b.addrTranslator.PluginToHost(network, address)
- if err != nil {
- return nil, err
- }
- }
-
- var addr net.Addr
- switch network {
- case "tcp":
- addr, err = net.ResolveTCPAddr("tcp", address)
- case "unix":
- addr, err = net.ResolveUnixAddr("unix", address)
- default:
- err = fmt.Errorf("Unknown address type: %s", c.Address)
- }
- if err != nil {
- return nil, err
- }
-
- return dialGRPCConn(b.tls, netAddrDialer(addr))
-}
-
-// NextId returns a unique ID to use next.
-//
-// It is possible for very long-running plugin hosts to wrap this value,
-// though it would require a very large amount of calls. In practice
-// we've never seen it happen.
-func (m *GRPCBroker) NextId() uint32 {
- return atomic.AddUint32(&m.nextId, 1)
-}
-
-// Run starts the brokering and should be executed in a goroutine, since it
-// blocks forever, or until the session closes.
-//
-// Uses of GRPCBroker never need to call this. It is called internally by
-// the plugin host/client.
-func (m *GRPCBroker) Run() {
- for {
- msg, err := m.streamer.Recv()
- if err != nil {
- // Once we receive an error, just exit
- break
- }
-
- // Initialize the waiter
- var p *gRPCBrokerPending
- if msg.Knock != nil && msg.Knock.Knock && !msg.Knock.Ack {
- p = m.getServerStream(msg.ServiceId)
- // The server side doesn't close the channel immediately as it needs
- // to continuously listen for knocks.
- } else {
- p = m.getClientStream(msg.ServiceId)
- go m.timeoutWait(msg.ServiceId, p)
- }
- select {
- case p.ch <- msg:
- default:
- }
- }
-}
-
-// getClientStream is a buffer to receive new connection info and knock acks
-// by stream ID.
-func (m *GRPCBroker) getClientStream(id uint32) *gRPCBrokerPending {
- m.Lock()
- defer m.Unlock()
-
- p, ok := m.clientStreams[id]
- if ok {
- return p
- }
-
- m.clientStreams[id] = &gRPCBrokerPending{
- ch: make(chan *plugin.ConnInfo, 1),
- doneCh: make(chan struct{}),
- }
- return m.clientStreams[id]
-}
-
-// getServerStream is a buffer to receive knocks to a multiplexed stream ID
-// that its side is listening on. Not used unless multiplexing is enabled.
-func (m *GRPCBroker) getServerStream(id uint32) *gRPCBrokerPending {
- m.Lock()
- defer m.Unlock()
-
- p, ok := m.serverStreams[id]
- if ok {
- return p
- }
-
- m.serverStreams[id] = &gRPCBrokerPending{
- ch: make(chan *plugin.ConnInfo, 1),
- doneCh: make(chan struct{}),
- }
- return m.serverStreams[id]
-}
-
-func (m *GRPCBroker) timeoutWait(id uint32, p *gRPCBrokerPending) {
- // Wait for the stream to either be picked up and connected, or
- // for a timeout.
- select {
- case <-p.doneCh:
- case <-time.After(5 * time.Second):
- }
-
- m.Lock()
- defer m.Unlock()
-
- // Delete the stream so no one else can grab it
- delete(m.clientStreams, id)
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go
deleted file mode 100644
index 627649d8..00000000
--- a/vendor/github.com/hashicorp/go-plugin/grpc_client.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package plugin
-
-import (
- "context"
- "crypto/tls"
- "fmt"
- "math"
- "net"
- "time"
-
- "github.com/hashicorp/go-plugin/internal/plugin"
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/health/grpc_health_v1"
-)
-
-func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error), dialOpts ...grpc.DialOption) (*grpc.ClientConn, error) {
- // Build dialing options.
- opts := make([]grpc.DialOption, 0)
-
- // We use a custom dialer so that we can connect over unix domain sockets.
- opts = append(opts, grpc.WithDialer(dialer))
-
- // Fail right away
- opts = append(opts, grpc.FailOnNonTempDialError(true))
-
- // If we have no TLS configuration set, we need to explicitly tell grpc
- // that we're connecting with an insecure connection.
- if tls == nil {
- opts = append(opts, grpc.WithInsecure())
- } else {
- opts = append(opts, grpc.WithTransportCredentials(
- credentials.NewTLS(tls)))
- }
-
- opts = append(opts,
- grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)),
- grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(math.MaxInt32)))
-
- // Add our custom options if we have any
- opts = append(opts, dialOpts...)
-
- // Connect. Note the first parameter is unused because we use a custom
- // dialer that has the state to see the address.
- conn, err := grpc.Dial("unused", opts...)
- if err != nil {
- return nil, err
- }
-
- return conn, nil
-}
-
-// newGRPCClient creates a new GRPCClient. The Client argument is expected
-// to be successfully started already with a lock held.
-func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) {
- conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer, c.config.GRPCDialOptions...)
- if err != nil {
- return nil, err
- }
-
- muxer, err := c.getGRPCMuxer(c.address)
- if err != nil {
- return nil, err
- }
-
- // Start the broker.
- brokerGRPCClient := newGRPCBrokerClient(conn)
- broker := newGRPCBroker(brokerGRPCClient, c.config.TLSConfig, c.unixSocketCfg, c.runner, muxer)
- go broker.Run()
- go brokerGRPCClient.StartStream()
-
- // Start the stdio client
- stdioClient, err := newGRPCStdioClient(doneCtx, c.logger.Named("stdio"), conn)
- if err != nil {
- return nil, err
- }
- go stdioClient.Run(c.config.SyncStdout, c.config.SyncStderr)
-
- cl := &GRPCClient{
- Conn: conn,
- Plugins: c.config.Plugins,
- doneCtx: doneCtx,
- broker: broker,
- controller: plugin.NewGRPCControllerClient(conn),
- }
-
- return cl, nil
-}
-
-// GRPCClient connects to a GRPCServer over gRPC to dispense plugin types.
-type GRPCClient struct {
- Conn *grpc.ClientConn
- Plugins map[string]Plugin
-
- doneCtx context.Context
- broker *GRPCBroker
-
- controller plugin.GRPCControllerClient
-}
-
-// ClientProtocol impl.
-func (c *GRPCClient) Close() error {
- c.broker.Close()
- c.controller.Shutdown(c.doneCtx, &plugin.Empty{})
- return c.Conn.Close()
-}
-
-// ClientProtocol impl.
-func (c *GRPCClient) Dispense(name string) (interface{}, error) {
- raw, ok := c.Plugins[name]
- if !ok {
- return nil, fmt.Errorf("unknown plugin type: %s", name)
- }
-
- p, ok := raw.(GRPCPlugin)
- if !ok {
- return nil, fmt.Errorf("plugin %q doesn't support gRPC", name)
- }
-
- return p.GRPCClient(c.doneCtx, c.broker, c.Conn)
-}
-
-// ClientProtocol impl.
-func (c *GRPCClient) Ping() error {
- client := grpc_health_v1.NewHealthClient(c.Conn)
- _, err := client.Check(context.Background(), &grpc_health_v1.HealthCheckRequest{
- Service: GRPCServiceName,
- })
-
- return err
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_controller.go b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go
deleted file mode 100644
index 2085356c..00000000
--- a/vendor/github.com/hashicorp/go-plugin/grpc_controller.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package plugin
-
-import (
- "context"
-
- "github.com/hashicorp/go-plugin/internal/plugin"
-)
-
-// GRPCControllerServer handles shutdown calls to terminate the server when the
-// plugin client is closed.
-type grpcControllerServer struct {
- server *GRPCServer
-}
-
-// Shutdown stops the grpc server. It first will attempt a graceful stop, then a
-// full stop on the server.
-func (s *grpcControllerServer) Shutdown(ctx context.Context, _ *plugin.Empty) (*plugin.Empty, error) {
- resp := &plugin.Empty{}
-
- // TODO: figure out why GracefullStop doesn't work.
- s.server.Stop()
- return resp, nil
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go
deleted file mode 100644
index a5f40c7f..00000000
--- a/vendor/github.com/hashicorp/go-plugin/grpc_server.go
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package plugin
-
-import (
- "bytes"
- "crypto/tls"
- "encoding/json"
- "fmt"
- "io"
- "net"
-
- hclog "github.com/hashicorp/go-hclog"
- "github.com/hashicorp/go-plugin/internal/grpcmux"
- "github.com/hashicorp/go-plugin/internal/plugin"
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/health"
- "google.golang.org/grpc/health/grpc_health_v1"
- "google.golang.org/grpc/reflection"
-)
-
-// GRPCServiceName is the name of the service that the health check should
-// return as passing.
-const GRPCServiceName = "plugin"
-
-// DefaultGRPCServer can be used with the "GRPCServer" field for Server
-// as a default factory method to create a gRPC server with no extra options.
-func DefaultGRPCServer(opts []grpc.ServerOption) *grpc.Server {
- return grpc.NewServer(opts...)
-}
-
-// GRPCServer is a ServerType implementation that serves plugins over
-// gRPC. This allows plugins to easily be written for other languages.
-//
-// The GRPCServer outputs a custom configuration as a base64-encoded
-// JSON structure represented by the GRPCServerConfig config structure.
-type GRPCServer struct {
- // Plugins are the list of plugins to serve.
- Plugins map[string]Plugin
-
- // Server is the actual server that will accept connections. This
- // will be used for plugin registration as well.
- Server func([]grpc.ServerOption) *grpc.Server
-
- // TLS should be the TLS configuration if available. If this is nil,
- // the connection will not have transport security.
- TLS *tls.Config
-
- // DoneCh is the channel that is closed when this server has exited.
- DoneCh chan struct{}
-
- // Stdout/StderrLis are the readers for stdout/stderr that will be copied
- // to the stdout/stderr connection that is output.
- Stdout io.Reader
- Stderr io.Reader
-
- config GRPCServerConfig
- server *grpc.Server
- broker *GRPCBroker
- stdioServer *grpcStdioServer
-
- logger hclog.Logger
-
- muxer *grpcmux.GRPCServerMuxer
-}
-
-// ServerProtocol impl.
-func (s *GRPCServer) Init() error {
- // Create our server
- var opts []grpc.ServerOption
- if s.TLS != nil {
- opts = append(opts, grpc.Creds(credentials.NewTLS(s.TLS)))
- }
- s.server = s.Server(opts)
-
- // Register the health service
- healthCheck := health.NewServer()
- healthCheck.SetServingStatus(
- GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING)
- grpc_health_v1.RegisterHealthServer(s.server, healthCheck)
-
- // Register the reflection service
- reflection.Register(s.server)
-
- // Register the broker service
- brokerServer := newGRPCBrokerServer()
- plugin.RegisterGRPCBrokerServer(s.server, brokerServer)
- s.broker = newGRPCBroker(brokerServer, s.TLS, unixSocketConfigFromEnv(), nil, s.muxer)
- go s.broker.Run()
-
- // Register the controller
- controllerServer := &grpcControllerServer{server: s}
- plugin.RegisterGRPCControllerServer(s.server, controllerServer)
-
- // Register the stdio service
- s.stdioServer = newGRPCStdioServer(s.logger, s.Stdout, s.Stderr)
- plugin.RegisterGRPCStdioServer(s.server, s.stdioServer)
-
- // Register all our plugins onto the gRPC server.
- for k, raw := range s.Plugins {
- p, ok := raw.(GRPCPlugin)
- if !ok {
- return fmt.Errorf("%q is not a GRPC-compatible plugin", k)
- }
-
- if err := p.GRPCServer(s.broker, s.server); err != nil {
- return fmt.Errorf("error registering %q: %s", k, err)
- }
- }
-
- return nil
-}
-
-// Stop calls Stop on the underlying grpc.Server and Close on the underlying
-// grpc.Broker if present.
-func (s *GRPCServer) Stop() {
- s.server.Stop()
-
- if s.broker != nil {
- s.broker.Close()
- s.broker = nil
- }
-}
-
-// GracefulStop calls GracefulStop on the underlying grpc.Server and Close on
-// the underlying grpc.Broker if present.
-func (s *GRPCServer) GracefulStop() {
- s.server.GracefulStop()
-
- if s.broker != nil {
- s.broker.Close()
- s.broker = nil
- }
-}
-
-// Config is the GRPCServerConfig encoded as JSON then base64.
-func (s *GRPCServer) Config() string {
- // Create a buffer that will contain our final contents
- var buf bytes.Buffer
-
- // Wrap the base64 encoding with JSON encoding.
- if err := json.NewEncoder(&buf).Encode(s.config); err != nil {
- // We panic since ths shouldn't happen under any scenario. We
- // carefully control the structure being encoded here and it should
- // always be successful.
- panic(err)
- }
-
- return buf.String()
-}
-
-func (s *GRPCServer) Serve(lis net.Listener) {
- defer close(s.DoneCh)
- err := s.server.Serve(lis)
- if err != nil {
- s.logger.Error("grpc server", "error", err)
- }
-}
-
-// GRPCServerConfig is the extra configuration passed along for consumers
-// to facilitate using GRPC plugins.
-type GRPCServerConfig struct {
- StdoutAddr string `json:"stdout_addr"`
- StderrAddr string `json:"stderr_addr"`
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go b/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go
deleted file mode 100644
index ae06c116..00000000
--- a/vendor/github.com/hashicorp/go-plugin/grpc_stdio.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package plugin
-
-import (
- "bufio"
- "bytes"
- "context"
- "io"
-
- empty "github.com/golang/protobuf/ptypes/empty"
- hclog "github.com/hashicorp/go-hclog"
- "github.com/hashicorp/go-plugin/internal/plugin"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-// grpcStdioBuffer is the buffer size we try to fill when sending a chunk of
-// stdio data. This is currently 1 KB for no reason other than that seems like
-// enough (stdio data isn't that common) and is fairly low.
-const grpcStdioBuffer = 1 * 1024
-
-// grpcStdioServer implements the Stdio service and streams stdiout/stderr.
-type grpcStdioServer struct {
- stdoutCh <-chan []byte
- stderrCh <-chan []byte
-}
-
-// newGRPCStdioServer creates a new grpcStdioServer and starts the stream
-// copying for the given out and err readers.
-//
-// This must only be called ONCE per srcOut, srcErr.
-func newGRPCStdioServer(log hclog.Logger, srcOut, srcErr io.Reader) *grpcStdioServer {
- stdoutCh := make(chan []byte)
- stderrCh := make(chan []byte)
-
- // Begin copying the streams
- go copyChan(log, stdoutCh, srcOut)
- go copyChan(log, stderrCh, srcErr)
-
- // Construct our server
- return &grpcStdioServer{
- stdoutCh: stdoutCh,
- stderrCh: stderrCh,
- }
-}
-
-// StreamStdio streams our stdout/err as the response.
-func (s *grpcStdioServer) StreamStdio(
- _ *empty.Empty,
- srv plugin.GRPCStdio_StreamStdioServer,
-) error {
- // Share the same data value between runs. Sending this over the wire
- // marshals it so we can reuse this.
- var data plugin.StdioData
-
- for {
- // Read our data
- select {
- case data.Data = <-s.stdoutCh:
- data.Channel = plugin.StdioData_STDOUT
-
- case data.Data = <-s.stderrCh:
- data.Channel = plugin.StdioData_STDERR
-
- case <-srv.Context().Done():
- return nil
- }
-
- // Not sure if this is possible, but if we somehow got here and
- // we didn't populate any data at all, then just continue.
- if len(data.Data) == 0 {
- continue
- }
-
- // Send our data to the client.
- if err := srv.Send(&data); err != nil {
- return err
- }
- }
-}
-
-// grpcStdioClient wraps the stdio service as a client to copy
-// the stdio data to output writers.
-type grpcStdioClient struct {
- log hclog.Logger
- stdioClient plugin.GRPCStdio_StreamStdioClient
-}
-
-// newGRPCStdioClient creates a grpcStdioClient. This will perform the
-// initial connection to the stdio service. If the stdio service is unavailable
-// then this will be a no-op. This allows this to work without error for
-// plugins that don't support this.
-func newGRPCStdioClient(
- ctx context.Context,
- log hclog.Logger,
- conn *grpc.ClientConn,
-) (*grpcStdioClient, error) {
- client := plugin.NewGRPCStdioClient(conn)
-
- // Connect immediately to the endpoint
- stdioClient, err := client.StreamStdio(ctx, &empty.Empty{})
-
- // If we get an Unavailable or Unimplemented error, this means that the plugin isn't
- // updated and linking to the latest version of go-plugin that supports
- // this. We fall back to the previous behavior of just not syncing anything.
- if status.Code(err) == codes.Unavailable || status.Code(err) == codes.Unimplemented {
- log.Warn("stdio service not available, stdout/stderr syncing unavailable")
- stdioClient = nil
- err = nil
- }
- if err != nil {
- return nil, err
- }
-
- return &grpcStdioClient{
- log: log,
- stdioClient: stdioClient,
- }, nil
-}
-
-// Run starts the loop that receives stdio data and writes it to the given
-// writers. This blocks and should be run in a goroutine.
-func (c *grpcStdioClient) Run(stdout, stderr io.Writer) {
- // This will be nil if stdio is not supported by the plugin
- if c.stdioClient == nil {
- c.log.Warn("stdio service unavailable, run will do nothing")
- return
- }
-
- for {
- c.log.Trace("waiting for stdio data")
- data, err := c.stdioClient.Recv()
- if err != nil {
- if err == io.EOF ||
- status.Code(err) == codes.Unavailable ||
- status.Code(err) == codes.Canceled ||
- status.Code(err) == codes.Unimplemented ||
- err == context.Canceled {
- c.log.Debug("received EOF, stopping recv loop", "err", err)
- return
- }
-
- c.log.Error("error receiving data", "err", err)
- return
- }
-
- // Determine our output writer based on channel
- var w io.Writer
- switch data.Channel {
- case plugin.StdioData_STDOUT:
- w = stdout
-
- case plugin.StdioData_STDERR:
- w = stderr
-
- default:
- c.log.Warn("unknown channel, dropping", "channel", data.Channel)
- continue
- }
-
- // Write! In the event of an error we just continue.
- if c.log.IsTrace() {
- c.log.Trace("received data", "channel", data.Channel.String(), "len", len(data.Data))
- }
- if _, err := io.Copy(w, bytes.NewReader(data.Data)); err != nil {
- c.log.Error("failed to copy all bytes", "err", err)
- }
- }
-}
-
-// copyChan copies an io.Reader into a channel.
-func copyChan(log hclog.Logger, dst chan<- []byte, src io.Reader) {
- bufsrc := bufio.NewReader(src)
-
- for {
- // Make our data buffer. We allocate a new one per loop iteration
- // so that we can send it over the channel.
- var data [1024]byte
-
- // Read the data, this will block until data is available
- n, err := bufsrc.Read(data[:])
-
- // We have to check if we have data BEFORE err != nil. The bufio
- // docs guarantee n == 0 on EOF but its better to be safe here.
- if n > 0 {
- // We have data! Send it on the channel. This will block if there
- // is no reader on the other side. We expect that go-plugin will
- // connect immediately to the stdio server to drain this so we want
- // this block to happen for backpressure.
- dst <- data[:n]
- }
-
- // If we hit EOF we're done copying
- if err == io.EOF {
- log.Debug("stdio EOF, exiting copy loop")
- return
- }
-
- // Any other error we just exit the loop. We don't expect there to
- // be errors since our use case for this is reading/writing from
- // a in-process pipe (os.Pipe).
- if err != nil {
- log.Warn("error copying stdio data, stopping copy", "err", err)
- return
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/addr_translator.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/addr_translator.go
deleted file mode 100644
index 1854d2dd..00000000
--- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/addr_translator.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package cmdrunner
-
-// addrTranslator implements stateless identity functions, as the host and plugin
-// run in the same context wrt Unix and network addresses.
-type addrTranslator struct{}
-
-func (*addrTranslator) PluginToHost(pluginNet, pluginAddr string) (string, string, error) {
- return pluginNet, pluginAddr, nil
-}
-
-func (*addrTranslator) HostToPlugin(hostNet, hostAddr string) (string, string, error) {
- return hostNet, hostAddr, nil
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_reattach.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_reattach.go
deleted file mode 100644
index dce1a86a..00000000
--- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_reattach.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package cmdrunner
-
-import (
- "context"
- "fmt"
- "net"
- "os"
-
- "github.com/hashicorp/go-plugin/runner"
-)
-
-// ReattachFunc returns a function that allows reattaching to a plugin running
-// as a plain process. The process may or may not be a child process.
-func ReattachFunc(pid int, addr net.Addr) runner.ReattachFunc {
- return func() (runner.AttachedRunner, error) {
- p, err := os.FindProcess(pid)
- if err != nil {
- // On Unix systems, FindProcess never returns an error.
- // On Windows, for non-existent pids it returns:
- // os.SyscallError - 'OpenProcess: the paremter is incorrect'
- return nil, ErrProcessNotFound
- }
-
- // Attempt to connect to the addr since on Unix systems FindProcess
- // doesn't actually return an error if it can't find the process.
- conn, err := net.Dial(addr.Network(), addr.String())
- if err != nil {
- p.Kill()
- return nil, ErrProcessNotFound
- }
- conn.Close()
-
- return &CmdAttachedRunner{
- pid: pid,
- process: p,
- }, nil
- }
-}
-
-// CmdAttachedRunner is mostly a subset of CmdRunner, except the Wait function
-// does not assume the process is a child of the host process, and so uses a
-// different implementation to wait on the process.
-type CmdAttachedRunner struct {
- pid int
- process *os.Process
-
- addrTranslator
-}
-
-func (c *CmdAttachedRunner) Wait(_ context.Context) error {
- return pidWait(c.pid)
-}
-
-func (c *CmdAttachedRunner) Kill(_ context.Context) error {
- return c.process.Kill()
-}
-
-func (c *CmdAttachedRunner) ID() string {
- return fmt.Sprintf("%d", c.pid)
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_runner.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_runner.go
deleted file mode 100644
index b26fea92..00000000
--- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/cmd_runner.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package cmdrunner
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
- "os"
- "os/exec"
-
- "github.com/hashicorp/go-hclog"
- "github.com/hashicorp/go-plugin/runner"
-)
-
-var (
- _ runner.Runner = (*CmdRunner)(nil)
-
- // ErrProcessNotFound is returned when a client is instantiated to
- // reattach to an existing process and it isn't found.
- ErrProcessNotFound = errors.New("Reattachment process not found")
-)
-
-const unrecognizedRemotePluginMessage = `This usually means
- the plugin was not compiled for this architecture,
- the plugin is missing dynamic-link libraries necessary to run,
- the plugin is not executable by this process due to file permissions, or
- the plugin failed to negotiate the initial go-plugin protocol handshake
-%s`
-
-// CmdRunner implements the runner.Runner interface. It mostly just passes through
-// to exec.Cmd methods.
-type CmdRunner struct {
- logger hclog.Logger
- cmd *exec.Cmd
-
- stdout io.ReadCloser
- stderr io.ReadCloser
-
- // Cmd info is persisted early, since the process information will be removed
- // after Kill is called.
- path string
- pid int
-
- addrTranslator
-}
-
-// NewCmdRunner returns an implementation of runner.Runner for running a plugin
-// as a subprocess. It must be passed a cmd that hasn't yet been started.
-func NewCmdRunner(logger hclog.Logger, cmd *exec.Cmd) (*CmdRunner, error) {
- stdout, err := cmd.StdoutPipe()
- if err != nil {
- return nil, err
- }
-
- stderr, err := cmd.StderrPipe()
- if err != nil {
- return nil, err
- }
-
- return &CmdRunner{
- logger: logger,
- cmd: cmd,
- stdout: stdout,
- stderr: stderr,
- path: cmd.Path,
- }, nil
-}
-
-func (c *CmdRunner) Start(_ context.Context) error {
- c.logger.Debug("starting plugin", "path", c.cmd.Path, "args", c.cmd.Args)
- err := c.cmd.Start()
- if err != nil {
- return err
- }
-
- c.pid = c.cmd.Process.Pid
- c.logger.Debug("plugin started", "path", c.path, "pid", c.pid)
- return nil
-}
-
-func (c *CmdRunner) Wait(_ context.Context) error {
- return c.cmd.Wait()
-}
-
-func (c *CmdRunner) Kill(_ context.Context) error {
- if c.cmd.Process != nil {
- err := c.cmd.Process.Kill()
- // Swallow ErrProcessDone, we support calling Kill multiple times.
- if !errors.Is(err, os.ErrProcessDone) {
- return err
- }
- return nil
- }
-
- return nil
-}
-
-func (c *CmdRunner) Stdout() io.ReadCloser {
- return c.stdout
-}
-
-func (c *CmdRunner) Stderr() io.ReadCloser {
- return c.stderr
-}
-
-func (c *CmdRunner) Name() string {
- return c.path
-}
-
-func (c *CmdRunner) ID() string {
- return fmt.Sprintf("%d", c.pid)
-}
-
-// peTypes is a list of Portable Executable (PE) machine types from https://learn.microsoft.com/en-us/windows/win32/debug/pe-format
-// mapped to GOARCH types. It is not comprehensive, and only includes machine types that Go supports.
-var peTypes = map[uint16]string{
- 0x14c: "386",
- 0x1c0: "arm",
- 0x6264: "loong64",
- 0x8664: "amd64",
- 0xaa64: "arm64",
-}
-
-func (c *CmdRunner) Diagnose(_ context.Context) string {
- return fmt.Sprintf(unrecognizedRemotePluginMessage, additionalNotesAboutCommand(c.cmd.Path))
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_unix.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_unix.go
deleted file mode 100644
index ce04cfeb..00000000
--- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_unix.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-//go:build !windows
-// +build !windows
-
-package cmdrunner
-
-import (
- "debug/elf"
- "debug/macho"
- "debug/pe"
- "fmt"
- "os"
- "os/user"
- "runtime"
- "strconv"
- "syscall"
-)
-
-// additionalNotesAboutCommand tries to get additional information about a command that might help diagnose
-// why it won't run correctly. It runs as a best effort only.
-func additionalNotesAboutCommand(path string) string {
- notes := ""
- stat, err := os.Stat(path)
- if err != nil {
- return notes
- }
-
- notes += "\nAdditional notes about plugin:\n"
- notes += fmt.Sprintf(" Path: %s\n", path)
- notes += fmt.Sprintf(" Mode: %s\n", stat.Mode())
- statT, ok := stat.Sys().(*syscall.Stat_t)
- if ok {
- currentUsername := "?"
- if u, err := user.LookupId(strconv.FormatUint(uint64(os.Getuid()), 10)); err == nil {
- currentUsername = u.Username
- }
- currentGroup := "?"
- if g, err := user.LookupGroupId(strconv.FormatUint(uint64(os.Getgid()), 10)); err == nil {
- currentGroup = g.Name
- }
- username := "?"
- if u, err := user.LookupId(strconv.FormatUint(uint64(statT.Uid), 10)); err == nil {
- username = u.Username
- }
- group := "?"
- if g, err := user.LookupGroupId(strconv.FormatUint(uint64(statT.Gid), 10)); err == nil {
- group = g.Name
- }
- notes += fmt.Sprintf(" Owner: %d [%s] (current: %d [%s])\n", statT.Uid, username, os.Getuid(), currentUsername)
- notes += fmt.Sprintf(" Group: %d [%s] (current: %d [%s])\n", statT.Gid, group, os.Getgid(), currentGroup)
- }
-
- if elfFile, err := elf.Open(path); err == nil {
- defer elfFile.Close()
- notes += fmt.Sprintf(" ELF architecture: %s (current architecture: %s)\n", elfFile.Machine, runtime.GOARCH)
- } else if machoFile, err := macho.Open(path); err == nil {
- defer machoFile.Close()
- notes += fmt.Sprintf(" MachO architecture: %s (current architecture: %s)\n", machoFile.Cpu, runtime.GOARCH)
- } else if peFile, err := pe.Open(path); err == nil {
- defer peFile.Close()
- machine, ok := peTypes[peFile.Machine]
- if !ok {
- machine = "unknown"
- }
- notes += fmt.Sprintf(" PE architecture: %s (current architecture: %s)\n", machine, runtime.GOARCH)
- }
- return notes
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_windows.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_windows.go
deleted file mode 100644
index 39c51dd1..00000000
--- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/notes_windows.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-//go:build windows
-// +build windows
-
-package cmdrunner
-
-import (
- "debug/elf"
- "debug/macho"
- "debug/pe"
- "fmt"
- "os"
- "runtime"
-)
-
-// additionalNotesAboutCommand tries to get additional information about a command that might help diagnose
-// why it won't run correctly. It runs as a best effort only.
-func additionalNotesAboutCommand(path string) string {
- notes := ""
- stat, err := os.Stat(path)
- if err != nil {
- return notes
- }
-
- notes += "\nAdditional notes about plugin:\n"
- notes += fmt.Sprintf(" Path: %s\n", path)
- notes += fmt.Sprintf(" Mode: %s\n", stat.Mode())
-
- if elfFile, err := elf.Open(path); err == nil {
- defer elfFile.Close()
- notes += fmt.Sprintf(" ELF architecture: %s (current architecture: %s)\n", elfFile.Machine, runtime.GOARCH)
- } else if machoFile, err := macho.Open(path); err == nil {
- defer machoFile.Close()
- notes += fmt.Sprintf(" MachO architecture: %s (current architecture: %s)\n", machoFile.Cpu, runtime.GOARCH)
- } else if peFile, err := pe.Open(path); err == nil {
- defer peFile.Close()
- machine, ok := peTypes[peFile.Machine]
- if !ok {
- machine = "unknown"
- }
- notes += fmt.Sprintf(" PE architecture: %s (current architecture: %s)\n", machine, runtime.GOARCH)
- }
- return notes
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process.go
deleted file mode 100644
index 6c34dc77..00000000
--- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package cmdrunner
-
-import "time"
-
-// pidAlive checks whether a pid is alive.
-func pidAlive(pid int) bool {
- return _pidAlive(pid)
-}
-
-// pidWait blocks for a process to exit.
-func pidWait(pid int) error {
- ticker := time.NewTicker(1 * time.Second)
- defer ticker.Stop()
-
- for range ticker.C {
- if !pidAlive(pid) {
- break
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_posix.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_posix.go
deleted file mode 100644
index bf3fc5b6..00000000
--- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_posix.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-//go:build !windows
-// +build !windows
-
-package cmdrunner
-
-import (
- "os"
- "syscall"
-)
-
-// _pidAlive tests whether a process is alive or not by sending it Signal 0,
-// since Go otherwise has no way to test this.
-func _pidAlive(pid int) bool {
- proc, err := os.FindProcess(pid)
- if err == nil {
- err = proc.Signal(syscall.Signal(0))
- }
-
- return err == nil
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_windows.go b/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_windows.go
deleted file mode 100644
index 6c39df28..00000000
--- a/vendor/github.com/hashicorp/go-plugin/internal/cmdrunner/process_windows.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package cmdrunner
-
-import (
- "syscall"
-)
-
-const (
- // Weird name but matches the MSDN docs
- exit_STILL_ACTIVE = 259
-
- processDesiredAccess = syscall.STANDARD_RIGHTS_READ |
- syscall.PROCESS_QUERY_INFORMATION |
- syscall.SYNCHRONIZE
-)
-
-// _pidAlive tests whether a process is alive or not
-func _pidAlive(pid int) bool {
- h, err := syscall.OpenProcess(processDesiredAccess, false, uint32(pid))
- if err != nil {
- return false
- }
- defer syscall.CloseHandle(h)
-
- var ec uint32
- if e := syscall.GetExitCodeProcess(h, &ec); e != nil {
- return false
- }
-
- return ec == exit_STILL_ACTIVE
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_client_listener.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_client_listener.go
deleted file mode 100644
index e8a3a152..00000000
--- a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_client_listener.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package grpcmux
-
-import (
- "io"
- "net"
-
- "github.com/hashicorp/yamux"
-)
-
-var _ net.Listener = (*blockedClientListener)(nil)
-
-// blockedClientListener accepts connections for a specific gRPC broker stream
-// ID on the client (host) side of the connection.
-type blockedClientListener struct {
- session *yamux.Session
- waitCh chan struct{}
- doneCh <-chan struct{}
-}
-
-func newBlockedClientListener(session *yamux.Session, doneCh <-chan struct{}) *blockedClientListener {
- return &blockedClientListener{
- waitCh: make(chan struct{}, 1),
- doneCh: doneCh,
- session: session,
- }
-}
-
-func (b *blockedClientListener) Accept() (net.Conn, error) {
- select {
- case <-b.waitCh:
- return b.session.Accept()
- case <-b.doneCh:
- return nil, io.EOF
- }
-}
-
-func (b *blockedClientListener) Addr() net.Addr {
- return b.session.Addr()
-}
-
-func (b *blockedClientListener) Close() error {
- // We don't close the session, the client muxer is responsible for that.
- return nil
-}
-
-func (b *blockedClientListener) unblock() {
- b.waitCh <- struct{}{}
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_server_listener.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_server_listener.go
deleted file mode 100644
index 0edb2c05..00000000
--- a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_server_listener.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package grpcmux
-
-import (
- "io"
- "net"
-)
-
-var _ net.Listener = (*blockedServerListener)(nil)
-
-// blockedServerListener accepts connections for a specific gRPC broker stream
-// ID on the server (plugin) side of the connection.
-type blockedServerListener struct {
- addr net.Addr
- acceptCh chan acceptResult
- doneCh <-chan struct{}
-}
-
-type acceptResult struct {
- conn net.Conn
- err error
-}
-
-func newBlockedServerListener(addr net.Addr, doneCh <-chan struct{}) *blockedServerListener {
- return &blockedServerListener{
- addr: addr,
- acceptCh: make(chan acceptResult),
- doneCh: doneCh,
- }
-}
-
-func (b *blockedServerListener) Accept() (net.Conn, error) {
- select {
- case accept := <-b.acceptCh:
- return accept.conn, accept.err
- case <-b.doneCh:
- return nil, io.EOF
- }
-}
-
-func (b *blockedServerListener) Addr() net.Addr {
- return b.addr
-}
-
-func (b *blockedServerListener) Close() error {
- return nil
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_client_muxer.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_client_muxer.go
deleted file mode 100644
index b203ba46..00000000
--- a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_client_muxer.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package grpcmux
-
-import (
- "fmt"
- "net"
- "sync"
-
- "github.com/hashicorp/go-hclog"
- "github.com/hashicorp/yamux"
-)
-
-var _ GRPCMuxer = (*GRPCClientMuxer)(nil)
-
-// GRPCClientMuxer implements the client (host) side of the gRPC broker's
-// GRPCMuxer interface for multiplexing multiple gRPC broker connections over
-// a single net.Conn.
-//
-// The client dials the initial net.Conn eagerly, and creates a yamux.Session
-// as the implementation for multiplexing any additional connections.
-//
-// Each net.Listener returned from Listener will block until the client receives
-// a knock that matches its gRPC broker stream ID. There is no default listener
-// on the client, as it is a client for the gRPC broker's control services. (See
-// GRPCServerMuxer for more details).
-type GRPCClientMuxer struct {
- logger hclog.Logger
- session *yamux.Session
-
- acceptMutex sync.Mutex
- acceptListeners map[uint32]*blockedClientListener
-}
-
-func NewGRPCClientMuxer(logger hclog.Logger, addr net.Addr) (*GRPCClientMuxer, error) {
- // Eagerly establish the underlying connection as early as possible.
- logger.Debug("making new client mux initial connection", "addr", addr)
- conn, err := net.Dial(addr.Network(), addr.String())
- if err != nil {
- return nil, err
- }
- if tcpConn, ok := conn.(*net.TCPConn); ok {
- // Make sure to set keep alive so that the connection doesn't die
- _ = tcpConn.SetKeepAlive(true)
- }
-
- cfg := yamux.DefaultConfig()
- cfg.Logger = logger.Named("yamux").StandardLogger(&hclog.StandardLoggerOptions{
- InferLevels: true,
- })
- cfg.LogOutput = nil
- sess, err := yamux.Client(conn, cfg)
- if err != nil {
- return nil, err
- }
-
- logger.Debug("client muxer connected", "addr", addr)
- m := &GRPCClientMuxer{
- logger: logger,
- session: sess,
- acceptListeners: make(map[uint32]*blockedClientListener),
- }
-
- return m, nil
-}
-
-func (m *GRPCClientMuxer) Enabled() bool {
- return m != nil
-}
-
-func (m *GRPCClientMuxer) Listener(id uint32, doneCh <-chan struct{}) (net.Listener, error) {
- ln := newBlockedClientListener(m.session, doneCh)
-
- m.acceptMutex.Lock()
- m.acceptListeners[id] = ln
- m.acceptMutex.Unlock()
-
- return ln, nil
-}
-
-func (m *GRPCClientMuxer) AcceptKnock(id uint32) error {
- m.acceptMutex.Lock()
- defer m.acceptMutex.Unlock()
-
- ln, ok := m.acceptListeners[id]
- if !ok {
- return fmt.Errorf("no listener for id %d", id)
- }
- ln.unblock()
- return nil
-}
-
-func (m *GRPCClientMuxer) Dial() (net.Conn, error) {
- stream, err := m.session.Open()
- if err != nil {
- return nil, fmt.Errorf("error dialling new client stream: %w", err)
- }
-
- return stream, nil
-}
-
-func (m *GRPCClientMuxer) Close() error {
- return m.session.Close()
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_muxer.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_muxer.go
deleted file mode 100644
index c52aaf55..00000000
--- a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_muxer.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package grpcmux
-
-import (
- "net"
-)
-
-// GRPCMuxer enables multiple implementations of net.Listener to accept
-// connections over a single "main" multiplexed net.Conn, and dial multiple
-// client connections over the same multiplexed net.Conn.
-//
-// The first multiplexed connection is used to serve the gRPC broker's own
-// control services: plugin.GRPCBroker, plugin.GRPCController, plugin.GRPCStdio.
-//
-// Clients must "knock" before dialling, to tell the server side that the
-// next net.Conn should be accepted onto a specific stream ID. The knock is a
-// bidirectional streaming message on the plugin.GRPCBroker service.
-type GRPCMuxer interface {
- // Enabled determines whether multiplexing should be used. It saves users
- // of the interface from having to compare an interface with nil, which
- // is a bit awkward to do correctly.
- Enabled() bool
-
- // Listener returns a multiplexed listener that will wait until AcceptKnock
- // is called with a matching ID before its Accept function returns.
- Listener(id uint32, doneCh <-chan struct{}) (net.Listener, error)
-
- // AcceptKnock unblocks the listener with the matching ID, and returns an
- // error if it hasn't been created yet.
- AcceptKnock(id uint32) error
-
- // Dial makes a new multiplexed client connection. To dial a specific ID,
- // a knock must be sent first.
- Dial() (net.Conn, error)
-
- // Close closes connections and releases any resources associated with the
- // muxer.
- Close() error
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_server_muxer.go b/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_server_muxer.go
deleted file mode 100644
index 27696ee7..00000000
--- a/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_server_muxer.go
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package grpcmux
-
-import (
- "errors"
- "fmt"
- "net"
- "sync"
- "time"
-
- "github.com/hashicorp/go-hclog"
- "github.com/hashicorp/yamux"
-)
-
-var _ GRPCMuxer = (*GRPCServerMuxer)(nil)
-var _ net.Listener = (*GRPCServerMuxer)(nil)
-
-// GRPCServerMuxer implements the server (plugin) side of the gRPC broker's
-// GRPCMuxer interface for multiplexing multiple gRPC broker connections over
-// a single net.Conn.
-//
-// The server side needs a listener to serve the gRPC broker's control services,
-// which includes the service we will receive knocks on. That means we always
-// accept the first connection onto a "default" main listener, and if we accept
-// any further connections without receiving a knock first, they are also given
-// to the default listener.
-//
-// When creating additional multiplexed listeners for specific stream IDs, we
-// can't control the order in which gRPC servers will call Accept() on each
-// listener, but we do need to control which gRPC server accepts which connection.
-// As such, each multiplexed listener blocks waiting on a channel. It will be
-// unblocked when a knock is received for the matching stream ID.
-type GRPCServerMuxer struct {
- addr net.Addr
- logger hclog.Logger
-
- sessionErrCh chan error
- sess *yamux.Session
-
- knockCh chan uint32
-
- acceptMutex sync.Mutex
- acceptChannels map[uint32]chan acceptResult
-}
-
-func NewGRPCServerMuxer(logger hclog.Logger, ln net.Listener) *GRPCServerMuxer {
- m := &GRPCServerMuxer{
- addr: ln.Addr(),
- logger: logger,
-
- sessionErrCh: make(chan error),
-
- knockCh: make(chan uint32, 1),
- acceptChannels: make(map[uint32]chan acceptResult),
- }
-
- go m.acceptSession(ln)
-
- return m
-}
-
-// acceptSessionAndMuxAccept is responsible for establishing the yamux session,
-// and then kicking off the acceptLoop function.
-func (m *GRPCServerMuxer) acceptSession(ln net.Listener) {
- defer close(m.sessionErrCh)
-
- m.logger.Debug("accepting initial connection", "addr", m.addr)
- conn, err := ln.Accept()
- if err != nil {
- m.sessionErrCh <- err
- return
- }
-
- m.logger.Debug("initial server connection accepted", "addr", m.addr)
- cfg := yamux.DefaultConfig()
- cfg.Logger = m.logger.Named("yamux").StandardLogger(&hclog.StandardLoggerOptions{
- InferLevels: true,
- })
- cfg.LogOutput = nil
- m.sess, err = yamux.Server(conn, cfg)
- if err != nil {
- m.sessionErrCh <- err
- return
- }
-}
-
-func (m *GRPCServerMuxer) session() (*yamux.Session, error) {
- select {
- case err := <-m.sessionErrCh:
- if err != nil {
- return nil, err
- }
- case <-time.After(5 * time.Second):
- return nil, errors.New("timed out waiting for connection to be established")
- }
-
- // Should never happen.
- if m.sess == nil {
- return nil, errors.New("no connection established and no error received")
- }
-
- return m.sess, nil
-}
-
-// Accept accepts all incoming connections and routes them to the correct
-// stream ID based on the most recent knock received.
-func (m *GRPCServerMuxer) Accept() (net.Conn, error) {
- session, err := m.session()
- if err != nil {
- return nil, fmt.Errorf("error establishing yamux session: %w", err)
- }
-
- for {
- conn, acceptErr := session.Accept()
-
- select {
- case id := <-m.knockCh:
- m.acceptMutex.Lock()
- acceptCh, ok := m.acceptChannels[id]
- m.acceptMutex.Unlock()
-
- if !ok {
- if conn != nil {
- _ = conn.Close()
- }
- return nil, fmt.Errorf("received knock on ID %d that doesn't have a listener", id)
- }
- m.logger.Debug("sending conn to brokered listener", "id", id)
- acceptCh <- acceptResult{
- conn: conn,
- err: acceptErr,
- }
- default:
- m.logger.Debug("sending conn to default listener")
- return conn, acceptErr
- }
- }
-}
-
-func (m *GRPCServerMuxer) Addr() net.Addr {
- return m.addr
-}
-
-func (m *GRPCServerMuxer) Close() error {
- session, err := m.session()
- if err != nil {
- return err
- }
-
- return session.Close()
-}
-
-func (m *GRPCServerMuxer) Enabled() bool {
- return m != nil
-}
-
-func (m *GRPCServerMuxer) Listener(id uint32, doneCh <-chan struct{}) (net.Listener, error) {
- sess, err := m.session()
- if err != nil {
- return nil, err
- }
-
- ln := newBlockedServerListener(sess.Addr(), doneCh)
- m.acceptMutex.Lock()
- m.acceptChannels[id] = ln.acceptCh
- m.acceptMutex.Unlock()
-
- return ln, nil
-}
-
-func (m *GRPCServerMuxer) Dial() (net.Conn, error) {
- sess, err := m.session()
- if err != nil {
- return nil, err
- }
-
- stream, err := sess.OpenStream()
- if err != nil {
- return nil, fmt.Errorf("error dialling new server stream: %w", err)
- }
-
- return stream, nil
-}
-
-func (m *GRPCServerMuxer) AcceptKnock(id uint32) error {
- m.knockCh <- id
- return nil
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go
deleted file mode 100644
index acc6dc9c..00000000
--- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go
+++ /dev/null
@@ -1,264 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.31.0
-// protoc (unknown)
-// source: internal/plugin/grpc_broker.proto
-
-package plugin
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-type ConnInfo struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
- Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"`
- Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"`
- Knock *ConnInfo_Knock `protobuf:"bytes,4,opt,name=knock,proto3" json:"knock,omitempty"`
-}
-
-func (x *ConnInfo) Reset() {
- *x = ConnInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_plugin_grpc_broker_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ConnInfo) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ConnInfo) ProtoMessage() {}
-
-func (x *ConnInfo) ProtoReflect() protoreflect.Message {
- mi := &file_internal_plugin_grpc_broker_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ConnInfo.ProtoReflect.Descriptor instead.
-func (*ConnInfo) Descriptor() ([]byte, []int) {
- return file_internal_plugin_grpc_broker_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *ConnInfo) GetServiceId() uint32 {
- if x != nil {
- return x.ServiceId
- }
- return 0
-}
-
-func (x *ConnInfo) GetNetwork() string {
- if x != nil {
- return x.Network
- }
- return ""
-}
-
-func (x *ConnInfo) GetAddress() string {
- if x != nil {
- return x.Address
- }
- return ""
-}
-
-func (x *ConnInfo) GetKnock() *ConnInfo_Knock {
- if x != nil {
- return x.Knock
- }
- return nil
-}
-
-type ConnInfo_Knock struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Knock bool `protobuf:"varint,1,opt,name=knock,proto3" json:"knock,omitempty"`
- Ack bool `protobuf:"varint,2,opt,name=ack,proto3" json:"ack,omitempty"`
- Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"`
-}
-
-func (x *ConnInfo_Knock) Reset() {
- *x = ConnInfo_Knock{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_plugin_grpc_broker_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ConnInfo_Knock) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ConnInfo_Knock) ProtoMessage() {}
-
-func (x *ConnInfo_Knock) ProtoReflect() protoreflect.Message {
- mi := &file_internal_plugin_grpc_broker_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ConnInfo_Knock.ProtoReflect.Descriptor instead.
-func (*ConnInfo_Knock) Descriptor() ([]byte, []int) {
- return file_internal_plugin_grpc_broker_proto_rawDescGZIP(), []int{0, 0}
-}
-
-func (x *ConnInfo_Knock) GetKnock() bool {
- if x != nil {
- return x.Knock
- }
- return false
-}
-
-func (x *ConnInfo_Knock) GetAck() bool {
- if x != nil {
- return x.Ack
- }
- return false
-}
-
-func (x *ConnInfo_Knock) GetError() string {
- if x != nil {
- return x.Error
- }
- return ""
-}
-
-var File_internal_plugin_grpc_broker_proto protoreflect.FileDescriptor
-
-var file_internal_plugin_grpc_broker_proto_rawDesc = []byte{
- 0x0a, 0x21, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69,
- 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x22, 0xd2, 0x01, 0x0a, 0x08,
- 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x73, 0x65,
- 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f,
- 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72,
- 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2c, 0x0a, 0x05, 0x6b,
- 0x6e, 0x6f, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x6c, 0x75,
- 0x67, 0x69, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4b, 0x6e, 0x6f,
- 0x63, 0x6b, 0x52, 0x05, 0x6b, 0x6e, 0x6f, 0x63, 0x6b, 0x1a, 0x45, 0x0a, 0x05, 0x4b, 0x6e, 0x6f,
- 0x63, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x6b, 0x6e, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x08, 0x52, 0x05, 0x6b, 0x6e, 0x6f, 0x63, 0x6b, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x63, 0x6b, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x61, 0x63, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72,
- 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72,
- 0x32, 0x43, 0x0a, 0x0a, 0x47, 0x52, 0x50, 0x43, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x12, 0x35,
- 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x10, 0x2e,
- 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x1a,
- 0x10, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66,
- 0x6f, 0x28, 0x01, 0x30, 0x01, 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69,
- 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_internal_plugin_grpc_broker_proto_rawDescOnce sync.Once
- file_internal_plugin_grpc_broker_proto_rawDescData = file_internal_plugin_grpc_broker_proto_rawDesc
-)
-
-func file_internal_plugin_grpc_broker_proto_rawDescGZIP() []byte {
- file_internal_plugin_grpc_broker_proto_rawDescOnce.Do(func() {
- file_internal_plugin_grpc_broker_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_plugin_grpc_broker_proto_rawDescData)
- })
- return file_internal_plugin_grpc_broker_proto_rawDescData
-}
-
-var file_internal_plugin_grpc_broker_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
-var file_internal_plugin_grpc_broker_proto_goTypes = []interface{}{
- (*ConnInfo)(nil), // 0: plugin.ConnInfo
- (*ConnInfo_Knock)(nil), // 1: plugin.ConnInfo.Knock
-}
-var file_internal_plugin_grpc_broker_proto_depIdxs = []int32{
- 1, // 0: plugin.ConnInfo.knock:type_name -> plugin.ConnInfo.Knock
- 0, // 1: plugin.GRPCBroker.StartStream:input_type -> plugin.ConnInfo
- 0, // 2: plugin.GRPCBroker.StartStream:output_type -> plugin.ConnInfo
- 2, // [2:3] is the sub-list for method output_type
- 1, // [1:2] is the sub-list for method input_type
- 1, // [1:1] is the sub-list for extension type_name
- 1, // [1:1] is the sub-list for extension extendee
- 0, // [0:1] is the sub-list for field type_name
-}
-
-func init() { file_internal_plugin_grpc_broker_proto_init() }
-func file_internal_plugin_grpc_broker_proto_init() {
- if File_internal_plugin_grpc_broker_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_internal_plugin_grpc_broker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ConnInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_internal_plugin_grpc_broker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ConnInfo_Knock); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_internal_plugin_grpc_broker_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 2,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_internal_plugin_grpc_broker_proto_goTypes,
- DependencyIndexes: file_internal_plugin_grpc_broker_proto_depIdxs,
- MessageInfos: file_internal_plugin_grpc_broker_proto_msgTypes,
- }.Build()
- File_internal_plugin_grpc_broker_proto = out.File
- file_internal_plugin_grpc_broker_proto_rawDesc = nil
- file_internal_plugin_grpc_broker_proto_goTypes = nil
- file_internal_plugin_grpc_broker_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto
deleted file mode 100644
index c92cd645..00000000
--- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-syntax = "proto3";
-package plugin;
-option go_package = "./plugin";
-
-message ConnInfo {
- uint32 service_id = 1;
- string network = 2;
- string address = 3;
- message Knock {
- bool knock = 1;
- bool ack = 2;
- string error = 3;
- }
- Knock knock = 4;
-}
-
-service GRPCBroker {
- rpc StartStream(stream ConnInfo) returns (stream ConnInfo);
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker_grpc.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker_grpc.pb.go
deleted file mode 100644
index 1b0f8070..00000000
--- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker_grpc.pb.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
-// versions:
-// - protoc-gen-go-grpc v1.3.0
-// - protoc (unknown)
-// source: internal/plugin/grpc_broker.proto
-
-package plugin
-
-import (
- context "context"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
-)
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.32.0 or later.
-const _ = grpc.SupportPackageIsVersion7
-
-const (
- GRPCBroker_StartStream_FullMethodName = "/plugin.GRPCBroker/StartStream"
-)
-
-// GRPCBrokerClient is the client API for GRPCBroker service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
-type GRPCBrokerClient interface {
- StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error)
-}
-
-type gRPCBrokerClient struct {
- cc grpc.ClientConnInterface
-}
-
-func NewGRPCBrokerClient(cc grpc.ClientConnInterface) GRPCBrokerClient {
- return &gRPCBrokerClient{cc}
-}
-
-func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) {
- stream, err := c.cc.NewStream(ctx, &GRPCBroker_ServiceDesc.Streams[0], GRPCBroker_StartStream_FullMethodName, opts...)
- if err != nil {
- return nil, err
- }
- x := &gRPCBrokerStartStreamClient{stream}
- return x, nil
-}
-
-type GRPCBroker_StartStreamClient interface {
- Send(*ConnInfo) error
- Recv() (*ConnInfo, error)
- grpc.ClientStream
-}
-
-type gRPCBrokerStartStreamClient struct {
- grpc.ClientStream
-}
-
-func (x *gRPCBrokerStartStreamClient) Send(m *ConnInfo) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *gRPCBrokerStartStreamClient) Recv() (*ConnInfo, error) {
- m := new(ConnInfo)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// GRPCBrokerServer is the server API for GRPCBroker service.
-// All implementations should embed UnimplementedGRPCBrokerServer
-// for forward compatibility
-type GRPCBrokerServer interface {
- StartStream(GRPCBroker_StartStreamServer) error
-}
-
-// UnimplementedGRPCBrokerServer should be embedded to have forward compatible implementations.
-type UnimplementedGRPCBrokerServer struct {
-}
-
-func (UnimplementedGRPCBrokerServer) StartStream(GRPCBroker_StartStreamServer) error {
- return status.Errorf(codes.Unimplemented, "method StartStream not implemented")
-}
-
-// UnsafeGRPCBrokerServer may be embedded to opt out of forward compatibility for this service.
-// Use of this interface is not recommended, as added methods to GRPCBrokerServer will
-// result in compilation errors.
-type UnsafeGRPCBrokerServer interface {
- mustEmbedUnimplementedGRPCBrokerServer()
-}
-
-func RegisterGRPCBrokerServer(s grpc.ServiceRegistrar, srv GRPCBrokerServer) {
- s.RegisterService(&GRPCBroker_ServiceDesc, srv)
-}
-
-func _GRPCBroker_StartStream_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(GRPCBrokerServer).StartStream(&gRPCBrokerStartStreamServer{stream})
-}
-
-type GRPCBroker_StartStreamServer interface {
- Send(*ConnInfo) error
- Recv() (*ConnInfo, error)
- grpc.ServerStream
-}
-
-type gRPCBrokerStartStreamServer struct {
- grpc.ServerStream
-}
-
-func (x *gRPCBrokerStartStreamServer) Send(m *ConnInfo) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *gRPCBrokerStartStreamServer) Recv() (*ConnInfo, error) {
- m := new(ConnInfo)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// GRPCBroker_ServiceDesc is the grpc.ServiceDesc for GRPCBroker service.
-// It's only intended for direct use with grpc.RegisterService,
-// and not to be introspected or modified (even as a copy)
-var GRPCBroker_ServiceDesc = grpc.ServiceDesc{
- ServiceName: "plugin.GRPCBroker",
- HandlerType: (*GRPCBrokerServer)(nil),
- Methods: []grpc.MethodDesc{},
- Streams: []grpc.StreamDesc{
- {
- StreamName: "StartStream",
- Handler: _GRPCBroker_StartStream_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- },
- Metadata: "internal/plugin/grpc_broker.proto",
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go
deleted file mode 100644
index 8ca48e0d..00000000
--- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.31.0
-// protoc (unknown)
-// source: internal/plugin/grpc_controller.proto
-
-package plugin
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-type Empty struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *Empty) Reset() {
- *x = Empty{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_plugin_grpc_controller_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Empty) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Empty) ProtoMessage() {}
-
-func (x *Empty) ProtoReflect() protoreflect.Message {
- mi := &file_internal_plugin_grpc_controller_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Empty.ProtoReflect.Descriptor instead.
-func (*Empty) Descriptor() ([]byte, []int) {
- return file_internal_plugin_grpc_controller_proto_rawDescGZIP(), []int{0}
-}
-
-var File_internal_plugin_grpc_controller_proto protoreflect.FileDescriptor
-
-var file_internal_plugin_grpc_controller_proto_rawDesc = []byte{
- 0x0a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69,
- 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65,
- 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x22,
- 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x32, 0x3a, 0x0a, 0x0e, 0x47, 0x52, 0x50, 0x43,
- 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x08, 0x53, 0x68,
- 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x0d, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e,
- 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x0d, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x45,
- 0x6d, 0x70, 0x74, 0x79, 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e,
- 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_internal_plugin_grpc_controller_proto_rawDescOnce sync.Once
- file_internal_plugin_grpc_controller_proto_rawDescData = file_internal_plugin_grpc_controller_proto_rawDesc
-)
-
-func file_internal_plugin_grpc_controller_proto_rawDescGZIP() []byte {
- file_internal_plugin_grpc_controller_proto_rawDescOnce.Do(func() {
- file_internal_plugin_grpc_controller_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_plugin_grpc_controller_proto_rawDescData)
- })
- return file_internal_plugin_grpc_controller_proto_rawDescData
-}
-
-var file_internal_plugin_grpc_controller_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_internal_plugin_grpc_controller_proto_goTypes = []interface{}{
- (*Empty)(nil), // 0: plugin.Empty
-}
-var file_internal_plugin_grpc_controller_proto_depIdxs = []int32{
- 0, // 0: plugin.GRPCController.Shutdown:input_type -> plugin.Empty
- 0, // 1: plugin.GRPCController.Shutdown:output_type -> plugin.Empty
- 1, // [1:2] is the sub-list for method output_type
- 0, // [0:1] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_internal_plugin_grpc_controller_proto_init() }
-func file_internal_plugin_grpc_controller_proto_init() {
- if File_internal_plugin_grpc_controller_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_internal_plugin_grpc_controller_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Empty); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_internal_plugin_grpc_controller_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 1,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_internal_plugin_grpc_controller_proto_goTypes,
- DependencyIndexes: file_internal_plugin_grpc_controller_proto_depIdxs,
- MessageInfos: file_internal_plugin_grpc_controller_proto_msgTypes,
- }.Build()
- File_internal_plugin_grpc_controller_proto = out.File
- file_internal_plugin_grpc_controller_proto_rawDesc = nil
- file_internal_plugin_grpc_controller_proto_goTypes = nil
- file_internal_plugin_grpc_controller_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto
deleted file mode 100644
index 2755fa63..00000000
--- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-syntax = "proto3";
-package plugin;
-option go_package = "./plugin";
-
-message Empty {
-}
-
-// The GRPCController is responsible for telling the plugin server to shutdown.
-service GRPCController {
- rpc Shutdown(Empty) returns (Empty);
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller_grpc.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller_grpc.pb.go
deleted file mode 100644
index 427611aa..00000000
--- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller_grpc.pb.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
-// versions:
-// - protoc-gen-go-grpc v1.3.0
-// - protoc (unknown)
-// source: internal/plugin/grpc_controller.proto
-
-package plugin
-
-import (
- context "context"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
-)
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.32.0 or later.
-const _ = grpc.SupportPackageIsVersion7
-
-const (
- GRPCController_Shutdown_FullMethodName = "/plugin.GRPCController/Shutdown"
-)
-
-// GRPCControllerClient is the client API for GRPCController service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
-type GRPCControllerClient interface {
- Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error)
-}
-
-type gRPCControllerClient struct {
- cc grpc.ClientConnInterface
-}
-
-func NewGRPCControllerClient(cc grpc.ClientConnInterface) GRPCControllerClient {
- return &gRPCControllerClient{cc}
-}
-
-func (c *gRPCControllerClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) {
- out := new(Empty)
- err := c.cc.Invoke(ctx, GRPCController_Shutdown_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// GRPCControllerServer is the server API for GRPCController service.
-// All implementations should embed UnimplementedGRPCControllerServer
-// for forward compatibility
-type GRPCControllerServer interface {
- Shutdown(context.Context, *Empty) (*Empty, error)
-}
-
-// UnimplementedGRPCControllerServer should be embedded to have forward compatible implementations.
-type UnimplementedGRPCControllerServer struct {
-}
-
-func (UnimplementedGRPCControllerServer) Shutdown(context.Context, *Empty) (*Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Shutdown not implemented")
-}
-
-// UnsafeGRPCControllerServer may be embedded to opt out of forward compatibility for this service.
-// Use of this interface is not recommended, as added methods to GRPCControllerServer will
-// result in compilation errors.
-type UnsafeGRPCControllerServer interface {
- mustEmbedUnimplementedGRPCControllerServer()
-}
-
-func RegisterGRPCControllerServer(s grpc.ServiceRegistrar, srv GRPCControllerServer) {
- s.RegisterService(&GRPCController_ServiceDesc, srv)
-}
-
-func _GRPCController_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(Empty)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(GRPCControllerServer).Shutdown(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: GRPCController_Shutdown_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(GRPCControllerServer).Shutdown(ctx, req.(*Empty))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-// GRPCController_ServiceDesc is the grpc.ServiceDesc for GRPCController service.
-// It's only intended for direct use with grpc.RegisterService,
-// and not to be introspected or modified (even as a copy)
-var GRPCController_ServiceDesc = grpc.ServiceDesc{
- ServiceName: "plugin.GRPCController",
- HandlerType: (*GRPCControllerServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Shutdown",
- Handler: _GRPCController_Shutdown_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "internal/plugin/grpc_controller.proto",
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go
deleted file mode 100644
index 139cbb4a..00000000
--- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go
+++ /dev/null
@@ -1,225 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.31.0
-// protoc (unknown)
-// source: internal/plugin/grpc_stdio.proto
-
-package plugin
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- emptypb "google.golang.org/protobuf/types/known/emptypb"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-type StdioData_Channel int32
-
-const (
- StdioData_INVALID StdioData_Channel = 0
- StdioData_STDOUT StdioData_Channel = 1
- StdioData_STDERR StdioData_Channel = 2
-)
-
-// Enum value maps for StdioData_Channel.
-var (
- StdioData_Channel_name = map[int32]string{
- 0: "INVALID",
- 1: "STDOUT",
- 2: "STDERR",
- }
- StdioData_Channel_value = map[string]int32{
- "INVALID": 0,
- "STDOUT": 1,
- "STDERR": 2,
- }
-)
-
-func (x StdioData_Channel) Enum() *StdioData_Channel {
- p := new(StdioData_Channel)
- *p = x
- return p
-}
-
-func (x StdioData_Channel) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (StdioData_Channel) Descriptor() protoreflect.EnumDescriptor {
- return file_internal_plugin_grpc_stdio_proto_enumTypes[0].Descriptor()
-}
-
-func (StdioData_Channel) Type() protoreflect.EnumType {
- return &file_internal_plugin_grpc_stdio_proto_enumTypes[0]
-}
-
-func (x StdioData_Channel) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use StdioData_Channel.Descriptor instead.
-func (StdioData_Channel) EnumDescriptor() ([]byte, []int) {
- return file_internal_plugin_grpc_stdio_proto_rawDescGZIP(), []int{0, 0}
-}
-
-// StdioData is a single chunk of stdout or stderr data that is streamed
-// from GRPCStdio.
-type StdioData struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Channel StdioData_Channel `protobuf:"varint,1,opt,name=channel,proto3,enum=plugin.StdioData_Channel" json:"channel,omitempty"`
- Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
-}
-
-func (x *StdioData) Reset() {
- *x = StdioData{}
- if protoimpl.UnsafeEnabled {
- mi := &file_internal_plugin_grpc_stdio_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *StdioData) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StdioData) ProtoMessage() {}
-
-func (x *StdioData) ProtoReflect() protoreflect.Message {
- mi := &file_internal_plugin_grpc_stdio_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StdioData.ProtoReflect.Descriptor instead.
-func (*StdioData) Descriptor() ([]byte, []int) {
- return file_internal_plugin_grpc_stdio_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *StdioData) GetChannel() StdioData_Channel {
- if x != nil {
- return x.Channel
- }
- return StdioData_INVALID
-}
-
-func (x *StdioData) GetData() []byte {
- if x != nil {
- return x.Data
- }
- return nil
-}
-
-var File_internal_plugin_grpc_stdio_proto protoreflect.FileDescriptor
-
-var file_internal_plugin_grpc_stdio_proto_rawDesc = []byte{
- 0x0a, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69,
- 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x74, 0x64, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x12, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74,
- 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x84, 0x01, 0x0a, 0x09, 0x53, 0x74, 0x64, 0x69,
- 0x6f, 0x44, 0x61, 0x74, 0x61, 0x12, 0x33, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e,
- 0x53, 0x74, 0x64, 0x69, 0x6f, 0x44, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65,
- 0x6c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61,
- 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x2e,
- 0x0a, 0x07, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56,
- 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x44, 0x4f, 0x55, 0x54,
- 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x44, 0x45, 0x52, 0x52, 0x10, 0x02, 0x32, 0x47,
- 0x0a, 0x09, 0x47, 0x52, 0x50, 0x43, 0x53, 0x74, 0x64, 0x69, 0x6f, 0x12, 0x3a, 0x0a, 0x0b, 0x53,
- 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x64, 0x69, 0x6f, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70,
- 0x74, 0x79, 0x1a, 0x11, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x64, 0x69,
- 0x6f, 0x44, 0x61, 0x74, 0x61, 0x30, 0x01, 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x6c, 0x75,
- 0x67, 0x69, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_internal_plugin_grpc_stdio_proto_rawDescOnce sync.Once
- file_internal_plugin_grpc_stdio_proto_rawDescData = file_internal_plugin_grpc_stdio_proto_rawDesc
-)
-
-func file_internal_plugin_grpc_stdio_proto_rawDescGZIP() []byte {
- file_internal_plugin_grpc_stdio_proto_rawDescOnce.Do(func() {
- file_internal_plugin_grpc_stdio_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_plugin_grpc_stdio_proto_rawDescData)
- })
- return file_internal_plugin_grpc_stdio_proto_rawDescData
-}
-
-var file_internal_plugin_grpc_stdio_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_internal_plugin_grpc_stdio_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_internal_plugin_grpc_stdio_proto_goTypes = []interface{}{
- (StdioData_Channel)(0), // 0: plugin.StdioData.Channel
- (*StdioData)(nil), // 1: plugin.StdioData
- (*emptypb.Empty)(nil), // 2: google.protobuf.Empty
-}
-var file_internal_plugin_grpc_stdio_proto_depIdxs = []int32{
- 0, // 0: plugin.StdioData.channel:type_name -> plugin.StdioData.Channel
- 2, // 1: plugin.GRPCStdio.StreamStdio:input_type -> google.protobuf.Empty
- 1, // 2: plugin.GRPCStdio.StreamStdio:output_type -> plugin.StdioData
- 2, // [2:3] is the sub-list for method output_type
- 1, // [1:2] is the sub-list for method input_type
- 1, // [1:1] is the sub-list for extension type_name
- 1, // [1:1] is the sub-list for extension extendee
- 0, // [0:1] is the sub-list for field type_name
-}
-
-func init() { file_internal_plugin_grpc_stdio_proto_init() }
-func file_internal_plugin_grpc_stdio_proto_init() {
- if File_internal_plugin_grpc_stdio_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_internal_plugin_grpc_stdio_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StdioData); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_internal_plugin_grpc_stdio_proto_rawDesc,
- NumEnums: 1,
- NumMessages: 1,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_internal_plugin_grpc_stdio_proto_goTypes,
- DependencyIndexes: file_internal_plugin_grpc_stdio_proto_depIdxs,
- EnumInfos: file_internal_plugin_grpc_stdio_proto_enumTypes,
- MessageInfos: file_internal_plugin_grpc_stdio_proto_msgTypes,
- }.Build()
- File_internal_plugin_grpc_stdio_proto = out.File
- file_internal_plugin_grpc_stdio_proto_rawDesc = nil
- file_internal_plugin_grpc_stdio_proto_goTypes = nil
- file_internal_plugin_grpc_stdio_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto
deleted file mode 100644
index f48ac76c..00000000
--- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-syntax = "proto3";
-package plugin;
-option go_package = "./plugin";
-
-import "google/protobuf/empty.proto";
-
-// GRPCStdio is a service that is automatically run by the plugin process
-// to stream any stdout/err data so that it can be mirrored on the plugin
-// host side.
-service GRPCStdio {
- // StreamStdio returns a stream that contains all the stdout/stderr.
- // This RPC endpoint must only be called ONCE. Once stdio data is consumed
- // it is not sent again.
- //
- // Callers should connect early to prevent blocking on the plugin process.
- rpc StreamStdio(google.protobuf.Empty) returns (stream StdioData);
-}
-
-// StdioData is a single chunk of stdout or stderr data that is streamed
-// from GRPCStdio.
-message StdioData {
- enum Channel {
- INVALID = 0;
- STDOUT = 1;
- STDERR = 2;
- }
-
- Channel channel = 1;
- bytes data = 2;
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio_grpc.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio_grpc.pb.go
deleted file mode 100644
index f82b1503..00000000
--- a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio_grpc.pb.go
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
-// versions:
-// - protoc-gen-go-grpc v1.3.0
-// - protoc (unknown)
-// source: internal/plugin/grpc_stdio.proto
-
-package plugin
-
-import (
- context "context"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
- emptypb "google.golang.org/protobuf/types/known/emptypb"
-)
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.32.0 or later.
-const _ = grpc.SupportPackageIsVersion7
-
-const (
- GRPCStdio_StreamStdio_FullMethodName = "/plugin.GRPCStdio/StreamStdio"
-)
-
-// GRPCStdioClient is the client API for GRPCStdio service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
-type GRPCStdioClient interface {
- // StreamStdio returns a stream that contains all the stdout/stderr.
- // This RPC endpoint must only be called ONCE. Once stdio data is consumed
- // it is not sent again.
- //
- // Callers should connect early to prevent blocking on the plugin process.
- StreamStdio(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error)
-}
-
-type gRPCStdioClient struct {
- cc grpc.ClientConnInterface
-}
-
-func NewGRPCStdioClient(cc grpc.ClientConnInterface) GRPCStdioClient {
- return &gRPCStdioClient{cc}
-}
-
-func (c *gRPCStdioClient) StreamStdio(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) {
- stream, err := c.cc.NewStream(ctx, &GRPCStdio_ServiceDesc.Streams[0], GRPCStdio_StreamStdio_FullMethodName, opts...)
- if err != nil {
- return nil, err
- }
- x := &gRPCStdioStreamStdioClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
- }
- return x, nil
-}
-
-type GRPCStdio_StreamStdioClient interface {
- Recv() (*StdioData, error)
- grpc.ClientStream
-}
-
-type gRPCStdioStreamStdioClient struct {
- grpc.ClientStream
-}
-
-func (x *gRPCStdioStreamStdioClient) Recv() (*StdioData, error) {
- m := new(StdioData)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// GRPCStdioServer is the server API for GRPCStdio service.
-// All implementations should embed UnimplementedGRPCStdioServer
-// for forward compatibility
-type GRPCStdioServer interface {
- // StreamStdio returns a stream that contains all the stdout/stderr.
- // This RPC endpoint must only be called ONCE. Once stdio data is consumed
- // it is not sent again.
- //
- // Callers should connect early to prevent blocking on the plugin process.
- StreamStdio(*emptypb.Empty, GRPCStdio_StreamStdioServer) error
-}
-
-// UnimplementedGRPCStdioServer should be embedded to have forward compatible implementations.
-type UnimplementedGRPCStdioServer struct {
-}
-
-func (UnimplementedGRPCStdioServer) StreamStdio(*emptypb.Empty, GRPCStdio_StreamStdioServer) error {
- return status.Errorf(codes.Unimplemented, "method StreamStdio not implemented")
-}
-
-// UnsafeGRPCStdioServer may be embedded to opt out of forward compatibility for this service.
-// Use of this interface is not recommended, as added methods to GRPCStdioServer will
-// result in compilation errors.
-type UnsafeGRPCStdioServer interface {
- mustEmbedUnimplementedGRPCStdioServer()
-}
-
-func RegisterGRPCStdioServer(s grpc.ServiceRegistrar, srv GRPCStdioServer) {
- s.RegisterService(&GRPCStdio_ServiceDesc, srv)
-}
-
-func _GRPCStdio_StreamStdio_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(emptypb.Empty)
- if err := stream.RecvMsg(m); err != nil {
- return err
- }
- return srv.(GRPCStdioServer).StreamStdio(m, &gRPCStdioStreamStdioServer{stream})
-}
-
-type GRPCStdio_StreamStdioServer interface {
- Send(*StdioData) error
- grpc.ServerStream
-}
-
-type gRPCStdioStreamStdioServer struct {
- grpc.ServerStream
-}
-
-func (x *gRPCStdioStreamStdioServer) Send(m *StdioData) error {
- return x.ServerStream.SendMsg(m)
-}
-
-// GRPCStdio_ServiceDesc is the grpc.ServiceDesc for GRPCStdio service.
-// It's only intended for direct use with grpc.RegisterService,
-// and not to be introspected or modified (even as a copy)
-var GRPCStdio_ServiceDesc = grpc.ServiceDesc{
- ServiceName: "plugin.GRPCStdio",
- HandlerType: (*GRPCStdioServer)(nil),
- Methods: []grpc.MethodDesc{},
- Streams: []grpc.StreamDesc{
- {
- StreamName: "StreamStdio",
- Handler: _GRPCStdio_StreamStdio_Handler,
- ServerStreams: true,
- },
- },
- Metadata: "internal/plugin/grpc_stdio.proto",
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/log_entry.go b/vendor/github.com/hashicorp/go-plugin/log_entry.go
deleted file mode 100644
index ab963d56..00000000
--- a/vendor/github.com/hashicorp/go-plugin/log_entry.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package plugin
-
-import (
- "encoding/json"
- "time"
-)
-
-// logEntry is the JSON payload that gets sent to Stderr from the plugin to the host
-type logEntry struct {
- Message string `json:"@message"`
- Level string `json:"@level"`
- Timestamp time.Time `json:"timestamp"`
- KVPairs []*logEntryKV `json:"kv_pairs"`
-}
-
-// logEntryKV is a key value pair within the Output payload
-type logEntryKV struct {
- Key string `json:"key"`
- Value interface{} `json:"value"`
-}
-
-// flattenKVPairs is used to flatten KVPair slice into []interface{}
-// for hclog consumption.
-func flattenKVPairs(kvs []*logEntryKV) []interface{} {
- var result []interface{}
- for _, kv := range kvs {
- result = append(result, kv.Key)
- result = append(result, kv.Value)
- }
-
- return result
-}
-
-// parseJSON handles parsing JSON output
-func parseJSON(input []byte) (*logEntry, error) {
- var raw map[string]interface{}
- entry := &logEntry{}
-
- err := json.Unmarshal(input, &raw)
- if err != nil {
- return nil, err
- }
-
- // Parse hclog-specific objects
- if v, ok := raw["@message"]; ok {
- entry.Message = v.(string)
- delete(raw, "@message")
- }
-
- if v, ok := raw["@level"]; ok {
- entry.Level = v.(string)
- delete(raw, "@level")
- }
-
- if v, ok := raw["@timestamp"]; ok {
- t, err := time.Parse("2006-01-02T15:04:05.000000Z07:00", v.(string))
- if err != nil {
- return nil, err
- }
- entry.Timestamp = t
- delete(raw, "@timestamp")
- }
-
- // Parse dynamic KV args from the hclog payload.
- for k, v := range raw {
- entry.KVPairs = append(entry.KVPairs, &logEntryKV{
- Key: k,
- Value: v,
- })
- }
-
- return entry, nil
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/mtls.go b/vendor/github.com/hashicorp/go-plugin/mtls.go
deleted file mode 100644
index 09ecafaf..00000000
--- a/vendor/github.com/hashicorp/go-plugin/mtls.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package plugin
-
-import (
- "bytes"
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rand"
- "crypto/x509"
- "crypto/x509/pkix"
- "encoding/pem"
- "math/big"
- "time"
-)
-
-// generateCert generates a temporary certificate for plugin authentication. The
-// certificate and private key are returns in PEM format.
-func generateCert() (cert []byte, privateKey []byte, err error) {
- key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
- if err != nil {
- return nil, nil, err
- }
-
- serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
- sn, err := rand.Int(rand.Reader, serialNumberLimit)
- if err != nil {
- return nil, nil, err
- }
-
- host := "localhost"
-
- template := &x509.Certificate{
- Subject: pkix.Name{
- CommonName: host,
- Organization: []string{"HashiCorp"},
- },
- DNSNames: []string{host},
- ExtKeyUsage: []x509.ExtKeyUsage{
- x509.ExtKeyUsageClientAuth,
- x509.ExtKeyUsageServerAuth,
- },
- KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign,
- BasicConstraintsValid: true,
- SerialNumber: sn,
- NotBefore: time.Now().Add(-30 * time.Second),
- NotAfter: time.Now().Add(262980 * time.Hour),
- IsCA: true,
- }
-
- der, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key)
- if err != nil {
- return nil, nil, err
- }
-
- var certOut bytes.Buffer
- if err := pem.Encode(&certOut, &pem.Block{Type: "CERTIFICATE", Bytes: der}); err != nil {
- return nil, nil, err
- }
-
- keyBytes, err := x509.MarshalECPrivateKey(key)
- if err != nil {
- return nil, nil, err
- }
-
- var keyOut bytes.Buffer
- if err := pem.Encode(&keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}); err != nil {
- return nil, nil, err
- }
-
- cert = certOut.Bytes()
- privateKey = keyOut.Bytes()
-
- return cert, privateKey, nil
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/mux_broker.go b/vendor/github.com/hashicorp/go-plugin/mux_broker.go
deleted file mode 100644
index 4eb1208f..00000000
--- a/vendor/github.com/hashicorp/go-plugin/mux_broker.go
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package plugin
-
-import (
- "encoding/binary"
- "fmt"
- "log"
- "net"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/hashicorp/yamux"
-)
-
-// MuxBroker is responsible for brokering multiplexed connections by unique ID.
-//
-// It is used by plugins to multiplex multiple RPC connections and data
-// streams on top of a single connection between the plugin process and the
-// host process.
-//
-// This allows a plugin to request a channel with a specific ID to connect to
-// or accept a connection from, and the broker handles the details of
-// holding these channels open while they're being negotiated.
-//
-// The Plugin interface has access to these for both Server and Client.
-// The broker can be used by either (optionally) to reserve and connect to
-// new multiplexed streams. This is useful for complex args and return values,
-// or anything else you might need a data stream for.
-type MuxBroker struct {
- nextId uint32
- session *yamux.Session
- streams map[uint32]*muxBrokerPending
-
- sync.Mutex
-}
-
-type muxBrokerPending struct {
- ch chan net.Conn
- doneCh chan struct{}
-}
-
-func newMuxBroker(s *yamux.Session) *MuxBroker {
- return &MuxBroker{
- session: s,
- streams: make(map[uint32]*muxBrokerPending),
- }
-}
-
-// Accept accepts a connection by ID.
-//
-// This should not be called multiple times with the same ID at one time.
-func (m *MuxBroker) Accept(id uint32) (net.Conn, error) {
- var c net.Conn
- p := m.getStream(id)
- select {
- case c = <-p.ch:
- close(p.doneCh)
- case <-time.After(5 * time.Second):
- m.Lock()
- defer m.Unlock()
- delete(m.streams, id)
-
- return nil, fmt.Errorf("timeout waiting for accept")
- }
-
- // Ack our connection
- if err := binary.Write(c, binary.LittleEndian, id); err != nil {
- c.Close()
- return nil, err
- }
-
- return c, nil
-}
-
-// AcceptAndServe is used to accept a specific stream ID and immediately
-// serve an RPC server on that stream ID. This is used to easily serve
-// complex arguments.
-//
-// The served interface is always registered to the "Plugin" name.
-func (m *MuxBroker) AcceptAndServe(id uint32, v interface{}) {
- conn, err := m.Accept(id)
- if err != nil {
- log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err)
- return
- }
-
- serve(conn, "Plugin", v)
-}
-
-// Close closes the connection and all sub-connections.
-func (m *MuxBroker) Close() error {
- return m.session.Close()
-}
-
-// Dial opens a connection by ID.
-func (m *MuxBroker) Dial(id uint32) (net.Conn, error) {
- // Open the stream
- stream, err := m.session.OpenStream()
- if err != nil {
- return nil, err
- }
-
- // Write the stream ID onto the wire.
- if err := binary.Write(stream, binary.LittleEndian, id); err != nil {
- stream.Close()
- return nil, err
- }
-
- // Read the ack that we connected. Then we're off!
- var ack uint32
- if err := binary.Read(stream, binary.LittleEndian, &ack); err != nil {
- stream.Close()
- return nil, err
- }
- if ack != id {
- stream.Close()
- return nil, fmt.Errorf("bad ack: %d (expected %d)", ack, id)
- }
-
- return stream, nil
-}
-
-// NextId returns a unique ID to use next.
-//
-// It is possible for very long-running plugin hosts to wrap this value,
-// though it would require a very large amount of RPC calls. In practice
-// we've never seen it happen.
-func (m *MuxBroker) NextId() uint32 {
- return atomic.AddUint32(&m.nextId, 1)
-}
-
-// Run starts the brokering and should be executed in a goroutine, since it
-// blocks forever, or until the session closes.
-//
-// Uses of MuxBroker never need to call this. It is called internally by
-// the plugin host/client.
-func (m *MuxBroker) Run() {
- for {
- stream, err := m.session.AcceptStream()
- if err != nil {
- // Once we receive an error, just exit
- break
- }
-
- // Read the stream ID from the stream
- var id uint32
- if err := binary.Read(stream, binary.LittleEndian, &id); err != nil {
- stream.Close()
- continue
- }
-
- // Initialize the waiter
- p := m.getStream(id)
- select {
- case p.ch <- stream:
- default:
- }
-
- // Wait for a timeout
- go m.timeoutWait(id, p)
- }
-}
-
-func (m *MuxBroker) getStream(id uint32) *muxBrokerPending {
- m.Lock()
- defer m.Unlock()
-
- p, ok := m.streams[id]
- if ok {
- return p
- }
-
- m.streams[id] = &muxBrokerPending{
- ch: make(chan net.Conn, 1),
- doneCh: make(chan struct{}),
- }
- return m.streams[id]
-}
-
-func (m *MuxBroker) timeoutWait(id uint32, p *muxBrokerPending) {
- // Wait for the stream to either be picked up and connected, or
- // for a timeout.
- timeout := false
- select {
- case <-p.doneCh:
- case <-time.After(5 * time.Second):
- timeout = true
- }
-
- m.Lock()
- defer m.Unlock()
-
- // Delete the stream so no one else can grab it
- delete(m.streams, id)
-
- // If we timed out, then check if we have a channel in the buffer,
- // and if so, close it.
- if timeout {
- select {
- case s := <-p.ch:
- s.Close()
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go
deleted file mode 100644
index 184749b9..00000000
--- a/vendor/github.com/hashicorp/go-plugin/plugin.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-// The plugin package exposes functions and helpers for communicating to
-// plugins which are implemented as standalone binary applications.
-//
-// plugin.Client fully manages the lifecycle of executing the application,
-// connecting to it, and returning the RPC client for dispensing plugins.
-//
-// plugin.Serve fully manages listeners to expose an RPC server from a binary
-// that plugin.Client can connect to.
-package plugin
-
-import (
- "context"
- "errors"
- "net/rpc"
-
- "google.golang.org/grpc"
-)
-
-// Plugin is the interface that is implemented to serve/connect to an
-// inteface implementation.
-type Plugin interface {
- // Server should return the RPC server compatible struct to serve
- // the methods that the Client calls over net/rpc.
- Server(*MuxBroker) (interface{}, error)
-
- // Client returns an interface implementation for the plugin you're
- // serving that communicates to the server end of the plugin.
- Client(*MuxBroker, *rpc.Client) (interface{}, error)
-}
-
-// GRPCPlugin is the interface that is implemented to serve/connect to
-// a plugin over gRPC.
-type GRPCPlugin interface {
- // GRPCServer should register this plugin for serving with the
- // given GRPCServer. Unlike Plugin.Server, this is only called once
- // since gRPC plugins serve singletons.
- GRPCServer(*GRPCBroker, *grpc.Server) error
-
- // GRPCClient should return the interface implementation for the plugin
- // you're serving via gRPC. The provided context will be canceled by
- // go-plugin in the event of the plugin process exiting.
- GRPCClient(context.Context, *GRPCBroker, *grpc.ClientConn) (interface{}, error)
-}
-
-// NetRPCUnsupportedPlugin implements Plugin but returns errors for the
-// Server and Client functions. This will effectively disable support for
-// net/rpc based plugins.
-//
-// This struct can be embedded in your struct.
-type NetRPCUnsupportedPlugin struct{}
-
-func (p NetRPCUnsupportedPlugin) Server(*MuxBroker) (interface{}, error) {
- return nil, errors.New("net/rpc plugin protocol not supported")
-}
-
-func (p NetRPCUnsupportedPlugin) Client(*MuxBroker, *rpc.Client) (interface{}, error) {
- return nil, errors.New("net/rpc plugin protocol not supported")
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/process.go b/vendor/github.com/hashicorp/go-plugin/process.go
deleted file mode 100644
index b8844636..00000000
--- a/vendor/github.com/hashicorp/go-plugin/process.go
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package plugin
diff --git a/vendor/github.com/hashicorp/go-plugin/protocol.go b/vendor/github.com/hashicorp/go-plugin/protocol.go
deleted file mode 100644
index e4b7be38..00000000
--- a/vendor/github.com/hashicorp/go-plugin/protocol.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package plugin
-
-import (
- "io"
- "net"
-)
-
-// Protocol is an enum representing the types of protocols.
-type Protocol string
-
-const (
- ProtocolInvalid Protocol = ""
- ProtocolNetRPC Protocol = "netrpc"
- ProtocolGRPC Protocol = "grpc"
-)
-
-// ServerProtocol is an interface that must be implemented for new plugin
-// protocols to be servers.
-type ServerProtocol interface {
- // Init is called once to configure and initialize the protocol, but
- // not start listening. This is the point at which all validation should
- // be done and errors returned.
- Init() error
-
- // Config is extra configuration to be outputted to stdout. This will
- // be automatically base64 encoded to ensure it can be parsed properly.
- // This can be an empty string if additional configuration is not needed.
- Config() string
-
- // Serve is called to serve connections on the given listener. This should
- // continue until the listener is closed.
- Serve(net.Listener)
-}
-
-// ClientProtocol is an interface that must be implemented for new plugin
-// protocols to be clients.
-type ClientProtocol interface {
- io.Closer
-
- // Dispense dispenses a new instance of the plugin with the given name.
- Dispense(string) (interface{}, error)
-
- // Ping checks that the client connection is still healthy.
- Ping() error
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go
deleted file mode 100644
index 142454df..00000000
--- a/vendor/github.com/hashicorp/go-plugin/rpc_client.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package plugin
-
-import (
- "crypto/tls"
- "fmt"
- "io"
- "net"
- "net/rpc"
-
- "github.com/hashicorp/yamux"
-)
-
-// RPCClient connects to an RPCServer over net/rpc to dispense plugin types.
-type RPCClient struct {
- broker *MuxBroker
- control *rpc.Client
- plugins map[string]Plugin
-
- // These are the streams used for the various stdout/err overrides
- stdout, stderr net.Conn
-}
-
-// newRPCClient creates a new RPCClient. The Client argument is expected
-// to be successfully started already with a lock held.
-func newRPCClient(c *Client) (*RPCClient, error) {
- // Connect to the client
- conn, err := net.Dial(c.address.Network(), c.address.String())
- if err != nil {
- return nil, err
- }
- if tcpConn, ok := conn.(*net.TCPConn); ok {
- // Make sure to set keep alive so that the connection doesn't die
- tcpConn.SetKeepAlive(true)
- }
-
- if c.config.TLSConfig != nil {
- conn = tls.Client(conn, c.config.TLSConfig)
- }
-
- // Create the actual RPC client
- result, err := NewRPCClient(conn, c.config.Plugins)
- if err != nil {
- conn.Close()
- return nil, err
- }
-
- // Begin the stream syncing so that stdin, out, err work properly
- err = result.SyncStreams(
- c.config.SyncStdout,
- c.config.SyncStderr)
- if err != nil {
- result.Close()
- return nil, err
- }
-
- return result, nil
-}
-
-// NewRPCClient creates a client from an already-open connection-like value.
-// Dial is typically used instead.
-func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) {
- // Create the yamux client so we can multiplex
- mux, err := yamux.Client(conn, nil)
- if err != nil {
- conn.Close()
- return nil, err
- }
-
- // Connect to the control stream.
- control, err := mux.Open()
- if err != nil {
- mux.Close()
- return nil, err
- }
-
- // Connect stdout, stderr streams
- stdstream := make([]net.Conn, 2)
- for i, _ := range stdstream {
- stdstream[i], err = mux.Open()
- if err != nil {
- mux.Close()
- return nil, err
- }
- }
-
- // Create the broker and start it up
- broker := newMuxBroker(mux)
- go broker.Run()
-
- // Build the client using our broker and control channel.
- return &RPCClient{
- broker: broker,
- control: rpc.NewClient(control),
- plugins: plugins,
- stdout: stdstream[0],
- stderr: stdstream[1],
- }, nil
-}
-
-// SyncStreams should be called to enable syncing of stdout,
-// stderr with the plugin.
-//
-// This will return immediately and the syncing will continue to happen
-// in the background. You do not need to launch this in a goroutine itself.
-//
-// This should never be called multiple times.
-func (c *RPCClient) SyncStreams(stdout io.Writer, stderr io.Writer) error {
- go copyStream("stdout", stdout, c.stdout)
- go copyStream("stderr", stderr, c.stderr)
- return nil
-}
-
-// Close closes the connection. The client is no longer usable after this
-// is called.
-func (c *RPCClient) Close() error {
- // Call the control channel and ask it to gracefully exit. If this
- // errors, then we save it so that we always return an error but we
- // want to try to close the other channels anyways.
- var empty struct{}
- returnErr := c.control.Call("Control.Quit", true, &empty)
-
- // Close the other streams we have
- if err := c.control.Close(); err != nil {
- return err
- }
- if err := c.stdout.Close(); err != nil {
- return err
- }
- if err := c.stderr.Close(); err != nil {
- return err
- }
- if err := c.broker.Close(); err != nil {
- return err
- }
-
- // Return back the error we got from Control.Quit. This is very important
- // since we MUST return non-nil error if this fails so that Client.Kill
- // will properly try a process.Kill.
- return returnErr
-}
-
-func (c *RPCClient) Dispense(name string) (interface{}, error) {
- p, ok := c.plugins[name]
- if !ok {
- return nil, fmt.Errorf("unknown plugin type: %s", name)
- }
-
- var id uint32
- if err := c.control.Call(
- "Dispenser.Dispense", name, &id); err != nil {
- return nil, err
- }
-
- conn, err := c.broker.Dial(id)
- if err != nil {
- return nil, err
- }
-
- return p.Client(c.broker, rpc.NewClient(conn))
-}
-
-// Ping pings the connection to ensure it is still alive.
-//
-// The error from the RPC call is returned exactly if you want to inspect
-// it for further error analysis. Any error returned from here would indicate
-// that the connection to the plugin is not healthy.
-func (c *RPCClient) Ping() error {
- var empty struct{}
- return c.control.Call("Control.Ping", true, &empty)
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go
deleted file mode 100644
index cec0a3d9..00000000
--- a/vendor/github.com/hashicorp/go-plugin/rpc_server.go
+++ /dev/null
@@ -1,209 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package plugin
-
-import (
- "errors"
- "fmt"
- "io"
- "log"
- "net"
- "net/rpc"
- "sync"
-
- "github.com/hashicorp/yamux"
-)
-
-// RPCServer listens for network connections and then dispenses interface
-// implementations over net/rpc.
-//
-// After setting the fields below, they shouldn't be read again directly
-// from the structure which may be reading/writing them concurrently.
-type RPCServer struct {
- Plugins map[string]Plugin
-
- // Stdout, Stderr are what this server will use instead of the
- // normal stdin/out/err. This is because due to the multi-process nature
- // of our plugin system, we can't use the normal process values so we
- // make our own custom one we pipe across.
- Stdout io.Reader
- Stderr io.Reader
-
- // DoneCh should be set to a non-nil channel that will be closed
- // when the control requests the RPC server to end.
- DoneCh chan<- struct{}
-
- lock sync.Mutex
-}
-
-// ServerProtocol impl.
-func (s *RPCServer) Init() error { return nil }
-
-// ServerProtocol impl.
-func (s *RPCServer) Config() string { return "" }
-
-// ServerProtocol impl.
-func (s *RPCServer) Serve(lis net.Listener) {
- defer s.done()
-
- for {
- conn, err := lis.Accept()
- if err != nil {
- severity := "ERR"
- if errors.Is(err, net.ErrClosed) {
- severity = "DEBUG"
- }
- log.Printf("[%s] plugin: plugin server: %s", severity, err)
- return
- }
-
- go s.ServeConn(conn)
- }
-}
-
-// ServeConn runs a single connection.
-//
-// ServeConn blocks, serving the connection until the client hangs up.
-func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) {
- // First create the yamux server to wrap this connection
- mux, err := yamux.Server(conn, nil)
- if err != nil {
- conn.Close()
- log.Printf("[ERR] plugin: error creating yamux server: %s", err)
- return
- }
-
- // Accept the control connection
- control, err := mux.Accept()
- if err != nil {
- mux.Close()
- if err != io.EOF {
- log.Printf("[ERR] plugin: error accepting control connection: %s", err)
- }
-
- return
- }
-
- // Connect the stdstreams (in, out, err)
- stdstream := make([]net.Conn, 2)
- for i := range stdstream {
- stdstream[i], err = mux.Accept()
- if err != nil {
- mux.Close()
- log.Printf("[ERR] plugin: accepting stream %d: %s", i, err)
- return
- }
- }
-
- // Copy std streams out to the proper place
- go copyStream("stdout", stdstream[0], s.Stdout)
- go copyStream("stderr", stdstream[1], s.Stderr)
-
- // Create the broker and start it up
- broker := newMuxBroker(mux)
- go broker.Run()
-
- // Use the control connection to build the dispenser and serve the
- // connection.
- server := rpc.NewServer()
- server.RegisterName("Control", &controlServer{
- server: s,
- })
- server.RegisterName("Dispenser", &dispenseServer{
- broker: broker,
- plugins: s.Plugins,
- })
- server.ServeConn(control)
-}
-
-// done is called internally by the control server to trigger the
-// doneCh to close which is listened to by the main process to cleanly
-// exit.
-func (s *RPCServer) done() {
- s.lock.Lock()
- defer s.lock.Unlock()
-
- if s.DoneCh != nil {
- close(s.DoneCh)
- s.DoneCh = nil
- }
-}
-
-// dispenseServer dispenses variousinterface implementations for Terraform.
-type controlServer struct {
- server *RPCServer
-}
-
-// Ping can be called to verify the connection (and likely the binary)
-// is still alive to a plugin.
-func (c *controlServer) Ping(
- null bool, response *struct{},
-) error {
- *response = struct{}{}
- return nil
-}
-
-func (c *controlServer) Quit(
- null bool, response *struct{},
-) error {
- // End the server
- c.server.done()
-
- // Always return true
- *response = struct{}{}
-
- return nil
-}
-
-// dispenseServer dispenses variousinterface implementations for Terraform.
-type dispenseServer struct {
- broker *MuxBroker
- plugins map[string]Plugin
-}
-
-func (d *dispenseServer) Dispense(
- name string, response *uint32,
-) error {
- // Find the function to create this implementation
- p, ok := d.plugins[name]
- if !ok {
- return fmt.Errorf("unknown plugin type: %s", name)
- }
-
- // Create the implementation first so we know if there is an error.
- impl, err := p.Server(d.broker)
- if err != nil {
- // We turn the error into an errors error so that it works across RPC
- return errors.New(err.Error())
- }
-
- // Reserve an ID for our implementation
- id := d.broker.NextId()
- *response = id
-
- // Run the rest in a goroutine since it can only happen once this RPC
- // call returns. We wait for a connection for the plugin implementation
- // and serve it.
- go func() {
- conn, err := d.broker.Accept(id)
- if err != nil {
- log.Printf("[ERR] go-plugin: plugin dispense error: %s: %s", name, err)
- return
- }
-
- serve(conn, "Plugin", impl)
- }()
-
- return nil
-}
-
-func serve(conn io.ReadWriteCloser, name string, v interface{}) {
- server := rpc.NewServer()
- if err := server.RegisterName(name, v); err != nil {
- log.Printf("[ERR] go-plugin: plugin dispense error: %s", err)
- return
- }
-
- server.ServeConn(conn)
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/runner/runner.go b/vendor/github.com/hashicorp/go-plugin/runner/runner.go
deleted file mode 100644
index e638ae5f..00000000
--- a/vendor/github.com/hashicorp/go-plugin/runner/runner.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package runner
-
-import (
- "context"
- "io"
-)
-
-// Runner defines the interface required by go-plugin to manage the lifecycle of
-// of a plugin and attempt to negotiate a connection with it. Note that this
-// is orthogonal to the protocol and transport used, which is negotiated over stdout.
-type Runner interface {
- // Start should start the plugin and ensure any work required for servicing
- // other interface methods is done. If the context is cancelled, it should
- // only abort any attempts to _start_ the plugin. Waiting and shutdown are
- // handled separately.
- Start(ctx context.Context) error
-
- // Diagnose makes a best-effort attempt to return any debug information that
- // might help users understand why a plugin failed to start and negotiate a
- // connection.
- Diagnose(ctx context.Context) string
-
- // Stdout is used to negotiate the go-plugin protocol.
- Stdout() io.ReadCloser
-
- // Stderr is used for forwarding plugin logs to the host process logger.
- Stderr() io.ReadCloser
-
- // Name is a human-friendly name for the plugin, such as the path to the
- // executable. It does not have to be unique.
- Name() string
-
- AttachedRunner
-}
-
-// AttachedRunner defines a limited subset of Runner's interface to represent the
-// reduced responsibility for plugin lifecycle when attaching to an already running
-// plugin.
-type AttachedRunner interface {
- // Wait should wait until the plugin stops running, whether in response to
- // an out of band signal or in response to calling Kill().
- Wait(ctx context.Context) error
-
- // Kill should stop the plugin and perform any cleanup required.
- Kill(ctx context.Context) error
-
- // ID is a unique identifier to represent the running plugin. e.g. pid or
- // container ID.
- ID() string
-
- AddrTranslator
-}
-
-// AddrTranslator translates addresses between the execution context of the host
-// process and the plugin. For example, if the plugin is in a container, the file
-// path for a Unix socket may be different between the host and the container.
-//
-// It is only intended to be used by the host process.
-type AddrTranslator interface {
- // Called before connecting on any addresses received back from the plugin.
- PluginToHost(pluginNet, pluginAddr string) (hostNet string, hostAddr string, err error)
-
- // Called on any host process addresses before they are sent to the plugin.
- HostToPlugin(hostNet, hostAddr string) (pluginNet string, pluginAddr string, err error)
-}
-
-// ReattachFunc can be passed to a client's reattach config to reattach to an
-// already running plugin instead of starting it ourselves.
-type ReattachFunc func() (AttachedRunner, error)
diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go
deleted file mode 100644
index e741bc7f..00000000
--- a/vendor/github.com/hashicorp/go-plugin/server.go
+++ /dev/null
@@ -1,665 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package plugin
-
-import (
- "context"
- "crypto/tls"
- "crypto/x509"
- "encoding/base64"
- "errors"
- "fmt"
- "io"
- "net"
- "os"
- "os/signal"
- "os/user"
- "runtime"
- "sort"
- "strconv"
- "strings"
-
- hclog "github.com/hashicorp/go-hclog"
- "github.com/hashicorp/go-plugin/internal/grpcmux"
- "google.golang.org/grpc"
-)
-
-// CoreProtocolVersion is the ProtocolVersion of the plugin system itself.
-// We will increment this whenever we change any protocol behavior. This
-// will invalidate any prior plugins but will at least allow us to iterate
-// on the core in a safe way. We will do our best to do this very
-// infrequently.
-const CoreProtocolVersion = 1
-
-// HandshakeConfig is the configuration used by client and servers to
-// handshake before starting a plugin connection. This is embedded by
-// both ServeConfig and ClientConfig.
-//
-// In practice, the plugin host creates a HandshakeConfig that is exported
-// and plugins then can easily consume it.
-type HandshakeConfig struct {
- // ProtocolVersion is the version that clients must match on to
- // agree they can communicate. This should match the ProtocolVersion
- // set on ClientConfig when using a plugin.
- // This field is not required if VersionedPlugins are being used in the
- // Client or Server configurations.
- ProtocolVersion uint
-
- // MagicCookieKey and value are used as a very basic verification
- // that a plugin is intended to be launched. This is not a security
- // measure, just a UX feature. If the magic cookie doesn't match,
- // we show human-friendly output.
- MagicCookieKey string
- MagicCookieValue string
-}
-
-// PluginSet is a set of plugins provided to be registered in the plugin
-// server.
-type PluginSet map[string]Plugin
-
-// ServeConfig configures what sorts of plugins are served.
-type ServeConfig struct {
- // HandshakeConfig is the configuration that must match clients.
- HandshakeConfig
-
- // TLSProvider is a function that returns a configured tls.Config.
- TLSProvider func() (*tls.Config, error)
-
- // Plugins are the plugins that are served.
- // The implied version of this PluginSet is the Handshake.ProtocolVersion.
- Plugins PluginSet
-
- // VersionedPlugins is a map of PluginSets for specific protocol versions.
- // These can be used to negotiate a compatible version between client and
- // server. If this is set, Handshake.ProtocolVersion is not required.
- VersionedPlugins map[int]PluginSet
-
- // GRPCServer should be non-nil to enable serving the plugins over
- // gRPC. This is a function to create the server when needed with the
- // given server options. The server options populated by go-plugin will
- // be for TLS if set. You may modify the input slice.
- //
- // Note that the grpc.Server will automatically be registered with
- // the gRPC health checking service. This is not optional since go-plugin
- // relies on this to implement Ping().
- GRPCServer func([]grpc.ServerOption) *grpc.Server
-
- // Logger is used to pass a logger into the server. If none is provided the
- // server will create a default logger.
- Logger hclog.Logger
-
- // Test, if non-nil, will put plugin serving into "test mode". This is
- // meant to be used as part of `go test` within a plugin's codebase to
- // launch the plugin in-process and output a ReattachConfig.
- //
- // This changes the behavior of the server in a number of ways to
- // accomodate the expectation of running in-process:
- //
- // * The handshake cookie is not validated.
- // * Stdout/stderr will receive plugin reads and writes
- // * Connection information will not be sent to stdout
- //
- Test *ServeTestConfig
-}
-
-// ServeTestConfig configures plugin serving for test mode. See ServeConfig.Test.
-type ServeTestConfig struct {
- // Context, if set, will force the plugin serving to end when cancelled.
- // This is only a test configuration because the non-test configuration
- // expects to take over the process and therefore end on an interrupt or
- // kill signal. For tests, we need to kill the plugin serving routinely
- // and this provides a way to do so.
- //
- // If you want to wait for the plugin process to close before moving on,
- // you can wait on CloseCh.
- Context context.Context
-
- // If this channel is non-nil, we will send the ReattachConfig via
- // this channel. This can be encoded (via JSON recommended) to the
- // plugin client to attach to this plugin.
- ReattachConfigCh chan<- *ReattachConfig
-
- // CloseCh, if non-nil, will be closed when serving exits. This can be
- // used along with Context to determine when the server is fully shut down.
- // If this is not set, you can still use Context on its own, but note there
- // may be a period of time between canceling the context and the plugin
- // server being shut down.
- CloseCh chan<- struct{}
-
- // SyncStdio, if true, will enable the client side "SyncStdout/Stderr"
- // functionality to work. This defaults to false because the implementation
- // of making this work within test environments is particularly messy
- // and SyncStdio functionality is fairly rare, so we default to the simple
- // scenario.
- SyncStdio bool
-}
-
-func unixSocketConfigFromEnv() UnixSocketConfig {
- return UnixSocketConfig{
- Group: os.Getenv(EnvUnixSocketGroup),
- socketDir: os.Getenv(EnvUnixSocketDir),
- }
-}
-
-// protocolVersion determines the protocol version and plugin set to be used by
-// the server. In the event that there is no suitable version, the last version
-// in the config is returned leaving the client to report the incompatibility.
-func protocolVersion(opts *ServeConfig) (int, Protocol, PluginSet) {
- protoVersion := int(opts.ProtocolVersion)
- pluginSet := opts.Plugins
- protoType := ProtocolNetRPC
- // Check if the client sent a list of acceptable versions
- var clientVersions []int
- if vs := os.Getenv("PLUGIN_PROTOCOL_VERSIONS"); vs != "" {
- for _, s := range strings.Split(vs, ",") {
- v, err := strconv.Atoi(s)
- if err != nil {
- fmt.Fprintf(os.Stderr, "server sent invalid plugin version %q", s)
- continue
- }
- clientVersions = append(clientVersions, v)
- }
- }
-
- // We want to iterate in reverse order, to ensure we match the newest
- // compatible plugin version.
- sort.Sort(sort.Reverse(sort.IntSlice(clientVersions)))
-
- // set the old un-versioned fields as if they were versioned plugins
- if opts.VersionedPlugins == nil {
- opts.VersionedPlugins = make(map[int]PluginSet)
- }
-
- if pluginSet != nil {
- opts.VersionedPlugins[protoVersion] = pluginSet
- }
-
- // Sort the version to make sure we match the latest first
- var versions []int
- for v := range opts.VersionedPlugins {
- versions = append(versions, v)
- }
-
- sort.Sort(sort.Reverse(sort.IntSlice(versions)))
-
- // See if we have multiple versions of Plugins to choose from
- for _, version := range versions {
- // Record each version, since we guarantee that this returns valid
- // values even if they are not a protocol match.
- protoVersion = version
- pluginSet = opts.VersionedPlugins[version]
-
- // If we have a configured gRPC server we should select a protocol
- if opts.GRPCServer != nil {
- // All plugins in a set must use the same transport, so check the first
- // for the protocol type
- for _, p := range pluginSet {
- switch p.(type) {
- case GRPCPlugin:
- protoType = ProtocolGRPC
- default:
- protoType = ProtocolNetRPC
- }
- break
- }
- }
-
- for _, clientVersion := range clientVersions {
- if clientVersion == protoVersion {
- return protoVersion, protoType, pluginSet
- }
- }
- }
-
- // Return the lowest version as the fallback.
- // Since we iterated over all the versions in reverse order above, these
- // values are from the lowest version number plugins (which may be from
- // a combination of the Handshake.ProtocolVersion and ServeConfig.Plugins
- // fields). This allows serving the oldest version of our plugins to a
- // legacy client that did not send a PLUGIN_PROTOCOL_VERSIONS list.
- return protoVersion, protoType, pluginSet
-}
-
-// Serve serves the plugins given by ServeConfig.
-//
-// Serve doesn't return until the plugin is done being executed. Any
-// fixable errors will be output to os.Stderr and the process will
-// exit with a status code of 1. Serve will panic for unexpected
-// conditions where a user's fix is unknown.
-//
-// This is the method that plugins should call in their main() functions.
-func Serve(opts *ServeConfig) {
- exitCode := -1
- // We use this to trigger an `os.Exit` so that we can execute our other
- // deferred functions. In test mode, we just output the err to stderr
- // and return.
- defer func() {
- if opts.Test == nil && exitCode >= 0 {
- os.Exit(exitCode)
- }
-
- if opts.Test != nil && opts.Test.CloseCh != nil {
- close(opts.Test.CloseCh)
- }
- }()
-
- if opts.Test == nil {
- // Validate the handshake config
- if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" {
- fmt.Fprintf(os.Stderr,
- "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+
- "key or value was set. Please notify the plugin author and report\n"+
- "this as a bug.\n")
- exitCode = 1
- return
- }
-
- // First check the cookie
- if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue {
- fmt.Fprintf(os.Stderr,
- "This binary is a plugin. These are not meant to be executed directly.\n"+
- "Please execute the program that consumes these plugins, which will\n"+
- "load any plugins automatically\n")
- exitCode = 1
- return
- }
- }
-
- // negotiate the version and plugins
- // start with default version in the handshake config
- protoVersion, protoType, pluginSet := protocolVersion(opts)
-
- logger := opts.Logger
- if logger == nil {
- // internal logger to os.Stderr
- logger = hclog.New(&hclog.LoggerOptions{
- Level: hclog.Trace,
- Output: os.Stderr,
- JSONFormat: true,
- })
- }
-
- // Register a listener so we can accept a connection
- listener, err := serverListener(unixSocketConfigFromEnv())
- if err != nil {
- logger.Error("plugin init error", "error", err)
- return
- }
-
- // Close the listener on return. We wrap this in a func() on purpose
- // because the "listener" reference may change to TLS.
- defer func() {
- listener.Close()
- }()
-
- var tlsConfig *tls.Config
- if opts.TLSProvider != nil {
- tlsConfig, err = opts.TLSProvider()
- if err != nil {
- logger.Error("plugin tls init", "error", err)
- return
- }
- }
-
- var serverCert string
- clientCert := os.Getenv("PLUGIN_CLIENT_CERT")
- // If the client is configured using AutoMTLS, the certificate will be here,
- // and we need to generate our own in response.
- if tlsConfig == nil && clientCert != "" {
- logger.Info("configuring server automatic mTLS")
- clientCertPool := x509.NewCertPool()
- if !clientCertPool.AppendCertsFromPEM([]byte(clientCert)) {
- logger.Error("client cert provided but failed to parse", "cert", clientCert)
- }
-
- certPEM, keyPEM, err := generateCert()
- if err != nil {
- logger.Error("failed to generate server certificate", "error", err)
- panic(err)
- }
-
- cert, err := tls.X509KeyPair(certPEM, keyPEM)
- if err != nil {
- logger.Error("failed to parse server certificate", "error", err)
- panic(err)
- }
-
- tlsConfig = &tls.Config{
- Certificates: []tls.Certificate{cert},
- ClientAuth: tls.RequireAndVerifyClientCert,
- ClientCAs: clientCertPool,
- MinVersion: tls.VersionTLS12,
- RootCAs: clientCertPool,
- ServerName: "localhost",
- }
-
- // We send back the raw leaf cert data for the client rather than the
- // PEM, since the protocol can't handle newlines.
- serverCert = base64.RawStdEncoding.EncodeToString(cert.Certificate[0])
- }
-
- // Create the channel to tell us when we're done
- doneCh := make(chan struct{})
-
- // Create our new stdout, stderr files. These will override our built-in
- // stdout/stderr so that it works across the stream boundary.
- var stdout_r, stderr_r io.Reader
- stdout_r, stdout_w, err := os.Pipe()
- if err != nil {
- fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err)
- os.Exit(1)
- }
- stderr_r, stderr_w, err := os.Pipe()
- if err != nil {
- fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err)
- os.Exit(1)
- }
-
- // If we're in test mode, we tee off the reader and write the data
- // as-is to our normal Stdout and Stderr so that they continue working
- // while stdio works. This is because in test mode, we assume we're running
- // in `go test` or some equivalent and we want output to go to standard
- // locations.
- if opts.Test != nil {
- // TODO(mitchellh): This isn't super ideal because a TeeReader
- // only works if the reader side is actively read. If we never
- // connect via a plugin client, the output still gets swallowed.
- stdout_r = io.TeeReader(stdout_r, os.Stdout)
- stderr_r = io.TeeReader(stderr_r, os.Stderr)
- }
-
- // Build the server type
- var server ServerProtocol
- switch protoType {
- case ProtocolNetRPC:
- // If we have a TLS configuration then we wrap the listener
- // ourselves and do it at that level.
- if tlsConfig != nil {
- listener = tls.NewListener(listener, tlsConfig)
- }
-
- // Create the RPC server to dispense
- server = &RPCServer{
- Plugins: pluginSet,
- Stdout: stdout_r,
- Stderr: stderr_r,
- DoneCh: doneCh,
- }
-
- case ProtocolGRPC:
- var muxer *grpcmux.GRPCServerMuxer
- if multiplex, _ := strconv.ParseBool(os.Getenv(envMultiplexGRPC)); multiplex {
- muxer = grpcmux.NewGRPCServerMuxer(logger, listener)
- listener = muxer
- }
-
- // Create the gRPC server
- server = &GRPCServer{
- Plugins: pluginSet,
- Server: opts.GRPCServer,
- TLS: tlsConfig,
- Stdout: stdout_r,
- Stderr: stderr_r,
- DoneCh: doneCh,
- logger: logger,
- muxer: muxer,
- }
-
- default:
- panic("unknown server protocol: " + protoType)
- }
-
- // Initialize the servers
- if err := server.Init(); err != nil {
- logger.Error("protocol init", "error", err)
- return
- }
-
- logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String())
-
- // Output the address and service name to stdout so that the client can
- // bring it up. In test mode, we don't do this because clients will
- // attach via a reattach config.
- if opts.Test == nil {
- const grpcBrokerMultiplexingSupported = true
- protocolLine := fmt.Sprintf("%d|%d|%s|%s|%s|%s",
- CoreProtocolVersion,
- protoVersion,
- listener.Addr().Network(),
- listener.Addr().String(),
- protoType,
- serverCert)
-
- // Old clients will error with new plugins if we blindly append the
- // seventh segment for gRPC broker multiplexing support, because old
- // client code uses strings.SplitN(line, "|", 6), which means a seventh
- // segment will get appended to the sixth segment as "sixthpart|true".
- //
- // If the environment variable is set, we assume the client is new enough
- // to handle a seventh segment, as it should now use
- // strings.Split(line, "|") and always handle each segment individually.
- if os.Getenv(envMultiplexGRPC) != "" {
- protocolLine += fmt.Sprintf("|%v", grpcBrokerMultiplexingSupported)
- }
- fmt.Printf("%s\n", protocolLine)
- os.Stdout.Sync()
- } else if ch := opts.Test.ReattachConfigCh; ch != nil {
- // Send back the reattach config that can be used. This isn't
- // quite ready if they connect immediately but the client should
- // retry a few times.
- ch <- &ReattachConfig{
- Protocol: protoType,
- ProtocolVersion: protoVersion,
- Addr: listener.Addr(),
- Pid: os.Getpid(),
- Test: true,
- }
- }
-
- // Eat the interrupts. In test mode we disable this so that go test
- // can be cancelled properly.
- if opts.Test == nil {
- ch := make(chan os.Signal, 1)
- signal.Notify(ch, os.Interrupt)
- go func() {
- count := 0
- for {
- <-ch
- count++
- logger.Trace("plugin received interrupt signal, ignoring", "count", count)
- }
- }()
- }
-
- // Set our stdout, stderr to the stdio stream that clients can retrieve
- // using ClientConfig.SyncStdout/err. We only do this for non-test mode
- // or if the test mode explicitly requests it.
- //
- // In test mode, we use a multiwriter so that the data continues going
- // to the normal stdout/stderr so output can show up in test logs. We
- // also send to the stdio stream so that clients can continue working
- // if they depend on that.
- if opts.Test == nil || opts.Test.SyncStdio {
- if opts.Test != nil {
- // In test mode we need to maintain the original values so we can
- // reset it.
- defer func(out, err *os.File) {
- os.Stdout = out
- os.Stderr = err
- }(os.Stdout, os.Stderr)
- }
- os.Stdout = stdout_w
- os.Stderr = stderr_w
- }
-
- // Accept connections and wait for completion
- go server.Serve(listener)
-
- ctx := context.Background()
- if opts.Test != nil && opts.Test.Context != nil {
- ctx = opts.Test.Context
- }
- select {
- case <-ctx.Done():
- // Cancellation. We can stop the server by closing the listener.
- // This isn't graceful at all but this is currently only used by
- // tests and its our only way to stop.
- listener.Close()
-
- // If this is a grpc server, then we also ask the server itself to
- // end which will kill all connections. There isn't an easy way to do
- // this for net/rpc currently but net/rpc is more and more unused.
- if s, ok := server.(*GRPCServer); ok {
- s.Stop()
- }
-
- // Wait for the server itself to shut down
- <-doneCh
-
- case <-doneCh:
- // Note that given the documentation of Serve we should probably be
- // setting exitCode = 0 and using os.Exit here. That's how it used to
- // work before extracting this library. However, for years we've done
- // this so we'll keep this functionality.
- }
-}
-
-func serverListener(unixSocketCfg UnixSocketConfig) (net.Listener, error) {
- if runtime.GOOS == "windows" {
- return serverListener_tcp()
- }
-
- return serverListener_unix(unixSocketCfg)
-}
-
-func serverListener_tcp() (net.Listener, error) {
- envMinPort := os.Getenv("PLUGIN_MIN_PORT")
- envMaxPort := os.Getenv("PLUGIN_MAX_PORT")
-
- var minPort, maxPort int64
- var err error
-
- switch {
- case len(envMinPort) == 0:
- minPort = 0
- default:
- minPort, err = strconv.ParseInt(envMinPort, 10, 32)
- if err != nil {
- return nil, fmt.Errorf("Couldn't get value from PLUGIN_MIN_PORT: %v", err)
- }
- }
-
- switch {
- case len(envMaxPort) == 0:
- maxPort = 0
- default:
- maxPort, err = strconv.ParseInt(envMaxPort, 10, 32)
- if err != nil {
- return nil, fmt.Errorf("Couldn't get value from PLUGIN_MAX_PORT: %v", err)
- }
- }
-
- if minPort > maxPort {
- return nil, fmt.Errorf("PLUGIN_MIN_PORT value of %d is greater than PLUGIN_MAX_PORT value of %d", minPort, maxPort)
- }
-
- for port := minPort; port <= maxPort; port++ {
- address := fmt.Sprintf("127.0.0.1:%d", port)
- listener, err := net.Listen("tcp", address)
- if err == nil {
- return listener, nil
- }
- }
-
- return nil, errors.New("Couldn't bind plugin TCP listener")
-}
-
-func serverListener_unix(unixSocketCfg UnixSocketConfig) (net.Listener, error) {
- tf, err := os.CreateTemp(unixSocketCfg.socketDir, "plugin")
- if err != nil {
- return nil, err
- }
- path := tf.Name()
-
- // Close the file and remove it because it has to not exist for
- // the domain socket.
- if err := tf.Close(); err != nil {
- return nil, err
- }
- if err := os.Remove(path); err != nil {
- return nil, err
- }
-
- l, err := net.Listen("unix", path)
- if err != nil {
- return nil, err
- }
-
- // By default, unix sockets are only writable by the owner. Set up a custom
- // group owner and group write permissions if configured.
- if unixSocketCfg.Group != "" {
- err = setGroupWritable(path, unixSocketCfg.Group, 0o660)
- if err != nil {
- return nil, err
- }
- }
-
- // Wrap the listener in rmListener so that the Unix domain socket file
- // is removed on close.
- return newDeleteFileListener(l, path), nil
-}
-
-func setGroupWritable(path, groupString string, mode os.FileMode) error {
- groupID, err := strconv.Atoi(groupString)
- if err != nil {
- group, err := user.LookupGroup(groupString)
- if err != nil {
- return fmt.Errorf("failed to find gid from %q: %w", groupString, err)
- }
- groupID, err = strconv.Atoi(group.Gid)
- if err != nil {
- return fmt.Errorf("failed to parse %q group's gid as an integer: %w", groupString, err)
- }
- }
-
- err = os.Chown(path, -1, groupID)
- if err != nil {
- return err
- }
-
- err = os.Chmod(path, mode)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// rmListener is an implementation of net.Listener that forwards most
-// calls to the listener but also calls an additional close function. We
-// use this to cleanup the unix domain socket on close, as well as clean
-// up multiplexed listeners.
-type rmListener struct {
- net.Listener
- close func() error
-}
-
-func newDeleteFileListener(ln net.Listener, path string) *rmListener {
- return &rmListener{
- Listener: ln,
- close: func() error {
- return os.Remove(path)
- },
- }
-}
-
-func (l *rmListener) Close() error {
- // Close the listener itself
- if err := l.Listener.Close(); err != nil {
- return err
- }
-
- // Remove the file
- return l.close()
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/server_mux.go b/vendor/github.com/hashicorp/go-plugin/server_mux.go
deleted file mode 100644
index 6b14b0c2..00000000
--- a/vendor/github.com/hashicorp/go-plugin/server_mux.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package plugin
-
-import (
- "fmt"
- "os"
-)
-
-// ServeMuxMap is the type that is used to configure ServeMux
-type ServeMuxMap map[string]*ServeConfig
-
-// ServeMux is like Serve, but serves multiple types of plugins determined
-// by the argument given on the command-line.
-//
-// This command doesn't return until the plugin is done being executed. Any
-// errors are logged or output to stderr.
-func ServeMux(m ServeMuxMap) {
- if len(os.Args) != 2 {
- fmt.Fprintf(os.Stderr,
- "Invoked improperly. This is an internal command that shouldn't\n"+
- "be manually invoked.\n")
- os.Exit(1)
- }
-
- opts, ok := m[os.Args[1]]
- if !ok {
- fmt.Fprintf(os.Stderr, "Unknown plugin: %s\n", os.Args[1])
- os.Exit(1)
- }
-
- Serve(opts)
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/stream.go b/vendor/github.com/hashicorp/go-plugin/stream.go
deleted file mode 100644
index a2348642..00000000
--- a/vendor/github.com/hashicorp/go-plugin/stream.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package plugin
-
-import (
- "io"
- "log"
-)
-
-func copyStream(name string, dst io.Writer, src io.Reader) {
- if src == nil {
- panic(name + ": src is nil")
- }
- if dst == nil {
- panic(name + ": dst is nil")
- }
- if _, err := io.Copy(dst, src); err != nil && err != io.EOF {
- log.Printf("[ERR] plugin: stream copy '%s' error: %s", name, err)
- }
-}
diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go
deleted file mode 100644
index a8735dfc..00000000
--- a/vendor/github.com/hashicorp/go-plugin/testing.go
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package plugin
-
-import (
- "bytes"
- "context"
- "io"
- "net"
- "net/rpc"
-
- hclog "github.com/hashicorp/go-hclog"
- "github.com/hashicorp/go-plugin/internal/grpcmux"
- "github.com/mitchellh/go-testing-interface"
- "google.golang.org/grpc"
-)
-
-// TestOptions allows specifying options that can affect the behavior of the
-// test functions
-type TestOptions struct {
- //ServerStdout causes the given value to be used in place of a blank buffer
- //for RPCServer's Stdout
- ServerStdout io.ReadCloser
-
- //ServerStderr causes the given value to be used in place of a blank buffer
- //for RPCServer's Stderr
- ServerStderr io.ReadCloser
-}
-
-// The testing file contains test helpers that you can use outside of
-// this package for making it easier to test plugins themselves.
-
-// TestConn is a helper function for returning a client and server
-// net.Conn connected to each other.
-func TestConn(t testing.T) (net.Conn, net.Conn) {
- // Listen to any local port. This listener will be closed
- // after a single connection is established.
- l, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Start a goroutine to accept our client connection
- var serverConn net.Conn
- doneCh := make(chan struct{})
- go func() {
- defer close(doneCh)
- defer l.Close()
- var err error
- serverConn, err = l.Accept()
- if err != nil {
- t.Fatalf("err: %s", err)
- }
- }()
-
- // Connect to the server
- clientConn, err := net.Dial("tcp", l.Addr().String())
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Wait for the server side to acknowledge it has connected
- <-doneCh
-
- return clientConn, serverConn
-}
-
-// TestRPCConn returns a rpc client and server connected to each other.
-func TestRPCConn(t testing.T) (*rpc.Client, *rpc.Server) {
- clientConn, serverConn := TestConn(t)
-
- server := rpc.NewServer()
- go server.ServeConn(serverConn)
-
- client := rpc.NewClient(clientConn)
- return client, server
-}
-
-// TestPluginRPCConn returns a plugin RPC client and server that are connected
-// together and configured.
-func TestPluginRPCConn(t testing.T, ps map[string]Plugin, opts *TestOptions) (*RPCClient, *RPCServer) {
- // Create two net.Conns we can use to shuttle our control connection
- clientConn, serverConn := TestConn(t)
-
- // Start up the server
- server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)}
- if opts != nil {
- if opts.ServerStdout != nil {
- server.Stdout = opts.ServerStdout
- }
- if opts.ServerStderr != nil {
- server.Stderr = opts.ServerStderr
- }
- }
- go server.ServeConn(serverConn)
-
- // Connect the client to the server
- client, err := NewRPCClient(clientConn, ps)
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- return client, server
-}
-
-// TestGRPCConn returns a gRPC client conn and grpc server that are connected
-// together and configured. The register function is used to register services
-// prior to the Serve call. This is used to test gRPC connections.
-func TestGRPCConn(t testing.T, register func(*grpc.Server)) (*grpc.ClientConn, *grpc.Server) {
- // Create a listener
- l, err := net.Listen("tcp", "127.0.0.1:0")
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- server := grpc.NewServer()
- register(server)
- go server.Serve(l)
-
- // Connect to the server
- conn, err := grpc.Dial(
- l.Addr().String(),
- grpc.WithBlock(),
- grpc.WithInsecure())
- if err != nil {
- t.Fatalf("err: %s", err)
- }
-
- // Connection successful, close the listener
- l.Close()
-
- return conn, server
-}
-
-// TestPluginGRPCConn returns a plugin gRPC client and server that are connected
-// together and configured. This is used to test gRPC connections.
-func TestPluginGRPCConn(t testing.T, multiplex bool, ps map[string]Plugin) (*GRPCClient, *GRPCServer) {
- // Create a listener
- ln, err := serverListener(UnixSocketConfig{})
- if err != nil {
- t.Fatal(err)
- }
-
- logger := hclog.New(&hclog.LoggerOptions{
- Level: hclog.Debug,
- })
-
- // Start up the server
- var muxer *grpcmux.GRPCServerMuxer
- if multiplex {
- muxer = grpcmux.NewGRPCServerMuxer(logger, ln)
- ln = muxer
- }
- server := &GRPCServer{
- Plugins: ps,
- DoneCh: make(chan struct{}),
- Server: DefaultGRPCServer,
- Stdout: new(bytes.Buffer),
- Stderr: new(bytes.Buffer),
- logger: logger,
- muxer: muxer,
- }
- if err := server.Init(); err != nil {
- t.Fatalf("err: %s", err)
- }
- go server.Serve(ln)
-
- client := &Client{
- address: ln.Addr(),
- protocol: ProtocolGRPC,
- config: &ClientConfig{
- Plugins: ps,
- GRPCBrokerMultiplex: multiplex,
- },
- logger: logger,
- }
-
- grpcClient, err := newGRPCClient(context.Background(), client)
- if err != nil {
- t.Fatal(err)
- }
-
- return grpcClient, server
-}
diff --git a/vendor/github.com/hashicorp/go-uuid/.travis.yml b/vendor/github.com/hashicorp/go-uuid/.travis.yml
deleted file mode 100644
index 76984907..00000000
--- a/vendor/github.com/hashicorp/go-uuid/.travis.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-language: go
-
-sudo: false
-
-go:
- - 1.4
- - 1.5
- - 1.6
- - tip
-
-script:
- - go test -bench . -benchmem -v ./...
diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE
deleted file mode 100644
index a320b309..00000000
--- a/vendor/github.com/hashicorp/go-uuid/LICENSE
+++ /dev/null
@@ -1,365 +0,0 @@
-Copyright © 2015-2022 HashiCorp, Inc.
-
-Mozilla Public License, version 2.0
-
-1. Definitions
-
-1.1. "Contributor"
-
- means each individual or legal entity that creates, contributes to the
- creation of, or owns Covered Software.
-
-1.2. "Contributor Version"
-
- means the combination of the Contributions of others (if any) used by a
- Contributor and that particular Contributor's Contribution.
-
-1.3. "Contribution"
-
- means Covered Software of a particular Contributor.
-
-1.4. "Covered Software"
-
- means Source Code Form to which the initial Contributor has attached the
- notice in Exhibit A, the Executable Form of such Source Code Form, and
- Modifications of such Source Code Form, in each case including portions
- thereof.
-
-1.5. "Incompatible With Secondary Licenses"
- means
-
- a. that the initial Contributor has attached the notice described in
- Exhibit B to the Covered Software; or
-
- b. that the Covered Software was made available under the terms of
- version 1.1 or earlier of the License, but not also under the terms of
- a Secondary License.
-
-1.6. "Executable Form"
-
- means any form of the work other than Source Code Form.
-
-1.7. "Larger Work"
-
- means a work that combines Covered Software with other material, in a
- separate file or files, that is not Covered Software.
-
-1.8. "License"
-
- means this document.
-
-1.9. "Licensable"
-
- means having the right to grant, to the maximum extent possible, whether
- at the time of the initial grant or subsequently, any and all of the
- rights conveyed by this License.
-
-1.10. "Modifications"
-
- means any of the following:
-
- a. any file in Source Code Form that results from an addition to,
- deletion from, or modification of the contents of Covered Software; or
-
- b. any new file in Source Code Form that contains any Covered Software.
-
-1.11. "Patent Claims" of a Contributor
-
- means any patent claim(s), including without limitation, method,
- process, and apparatus claims, in any patent Licensable by such
- Contributor that would be infringed, but for the grant of the License,
- by the making, using, selling, offering for sale, having made, import,
- or transfer of either its Contributions or its Contributor Version.
-
-1.12. "Secondary License"
-
- means either the GNU General Public License, Version 2.0, the GNU Lesser
- General Public License, Version 2.1, the GNU Affero General Public
- License, Version 3.0, or any later versions of those licenses.
-
-1.13. "Source Code Form"
-
- means the form of the work preferred for making modifications.
-
-1.14. "You" (or "Your")
-
- means an individual or a legal entity exercising rights under this
- License. For legal entities, "You" includes any entity that controls, is
- controlled by, or is under common control with You. For purposes of this
- definition, "control" means (a) the power, direct or indirect, to cause
- the direction or management of such entity, whether by contract or
- otherwise, or (b) ownership of more than fifty percent (50%) of the
- outstanding shares or beneficial ownership of such entity.
-
-
-2. License Grants and Conditions
-
-2.1. Grants
-
- Each Contributor hereby grants You a world-wide, royalty-free,
- non-exclusive license:
-
- a. under intellectual property rights (other than patent or trademark)
- Licensable by such Contributor to use, reproduce, make available,
- modify, display, perform, distribute, and otherwise exploit its
- Contributions, either on an unmodified basis, with Modifications, or
- as part of a Larger Work; and
-
- b. under Patent Claims of such Contributor to make, use, sell, offer for
- sale, have made, import, and otherwise transfer either its
- Contributions or its Contributor Version.
-
-2.2. Effective Date
-
- The licenses granted in Section 2.1 with respect to any Contribution
- become effective for each Contribution on the date the Contributor first
- distributes such Contribution.
-
-2.3. Limitations on Grant Scope
-
- The licenses granted in this Section 2 are the only rights granted under
- this License. No additional rights or licenses will be implied from the
- distribution or licensing of Covered Software under this License.
- Notwithstanding Section 2.1(b) above, no patent license is granted by a
- Contributor:
-
- a. for any code that a Contributor has removed from Covered Software; or
-
- b. for infringements caused by: (i) Your and any other third party's
- modifications of Covered Software, or (ii) the combination of its
- Contributions with other software (except as part of its Contributor
- Version); or
-
- c. under Patent Claims infringed by Covered Software in the absence of
- its Contributions.
-
- This License does not grant any rights in the trademarks, service marks,
- or logos of any Contributor (except as may be necessary to comply with
- the notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
- No Contributor makes additional grants as a result of Your choice to
- distribute the Covered Software under a subsequent version of this
- License (see Section 10.2) or under the terms of a Secondary License (if
- permitted under the terms of Section 3.3).
-
-2.5. Representation
-
- Each Contributor represents that the Contributor believes its
- Contributions are its original creation(s) or it has sufficient rights to
- grant the rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
- This License is not intended to limit any rights You have under
- applicable copyright doctrines of fair use, fair dealing, or other
- equivalents.
-
-2.7. Conditions
-
- Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
- Section 2.1.
-
-
-3. Responsibilities
-
-3.1. Distribution of Source Form
-
- All distribution of Covered Software in Source Code Form, including any
- Modifications that You create or to which You contribute, must be under
- the terms of this License. You must inform recipients that the Source
- Code Form of the Covered Software is governed by the terms of this
- License, and how they can obtain a copy of this License. You may not
- attempt to alter or restrict the recipients' rights in the Source Code
- Form.
-
-3.2. Distribution of Executable Form
-
- If You distribute Covered Software in Executable Form then:
-
- a. such Covered Software must also be made available in Source Code Form,
- as described in Section 3.1, and You must inform recipients of the
- Executable Form how they can obtain a copy of such Source Code Form by
- reasonable means in a timely manner, at a charge no more than the cost
- of distribution to the recipient; and
-
- b. You may distribute such Executable Form under the terms of this
- License, or sublicense it under different terms, provided that the
- license for the Executable Form does not attempt to limit or alter the
- recipients' rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
- You may create and distribute a Larger Work under terms of Your choice,
- provided that You also comply with the requirements of this License for
- the Covered Software. If the Larger Work is a combination of Covered
- Software with a work governed by one or more Secondary Licenses, and the
- Covered Software is not Incompatible With Secondary Licenses, this
- License permits You to additionally distribute such Covered Software
- under the terms of such Secondary License(s), so that the recipient of
- the Larger Work may, at their option, further distribute the Covered
- Software under the terms of either this License or such Secondary
- License(s).
-
-3.4. Notices
-
- You may not remove or alter the substance of any license notices
- (including copyright notices, patent notices, disclaimers of warranty, or
- limitations of liability) contained within the Source Code Form of the
- Covered Software, except that You may alter any license notices to the
- extent required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
- You may choose to offer, and to charge a fee for, warranty, support,
- indemnity or liability obligations to one or more recipients of Covered
- Software. However, You may do so only on Your own behalf, and not on
- behalf of any Contributor. You must make it absolutely clear that any
- such warranty, support, indemnity, or liability obligation is offered by
- You alone, and You hereby agree to indemnify every Contributor for any
- liability incurred by such Contributor as a result of warranty, support,
- indemnity or liability terms You offer. You may include additional
- disclaimers of warranty and limitations of liability specific to any
- jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
-
- If it is impossible for You to comply with any of the terms of this License
- with respect to some or all of the Covered Software due to statute,
- judicial order, or regulation then You must: (a) comply with the terms of
- this License to the maximum extent possible; and (b) describe the
- limitations and the code they affect. Such description must be placed in a
- text file included with all distributions of the Covered Software under
- this License. Except to the extent prohibited by statute or regulation,
- such description must be sufficiently detailed for a recipient of ordinary
- skill to be able to understand it.
-
-5. Termination
-
-5.1. The rights granted under this License will terminate automatically if You
- fail to comply with any of its terms. However, if You become compliant,
- then the rights granted under this License from a particular Contributor
- are reinstated (a) provisionally, unless and until such Contributor
- explicitly and finally terminates Your grants, and (b) on an ongoing
- basis, if such Contributor fails to notify You of the non-compliance by
- some reasonable means prior to 60 days after You have come back into
- compliance. Moreover, Your grants from a particular Contributor are
- reinstated on an ongoing basis if such Contributor notifies You of the
- non-compliance by some reasonable means, this is the first time You have
- received notice of non-compliance with this License from such
- Contributor, and You become compliant prior to 30 days after Your receipt
- of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
- infringement claim (excluding declaratory judgment actions,
- counter-claims, and cross-claims) alleging that a Contributor Version
- directly or indirectly infringes any patent, then the rights granted to
- You by any and all Contributors for the Covered Software under Section
- 2.1 of this License shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
- license agreements (excluding distributors and resellers) which have been
- validly granted by You or Your distributors under this License prior to
- termination shall survive termination.
-
-6. Disclaimer of Warranty
-
- Covered Software is provided under this License on an "as is" basis,
- without warranty of any kind, either expressed, implied, or statutory,
- including, without limitation, warranties that the Covered Software is free
- of defects, merchantable, fit for a particular purpose or non-infringing.
- The entire risk as to the quality and performance of the Covered Software
- is with You. Should any Covered Software prove defective in any respect,
- You (not any Contributor) assume the cost of any necessary servicing,
- repair, or correction. This disclaimer of warranty constitutes an essential
- part of this License. No use of any Covered Software is authorized under
- this License except under this disclaimer.
-
-7. Limitation of Liability
-
- Under no circumstances and under no legal theory, whether tort (including
- negligence), contract, or otherwise, shall any Contributor, or anyone who
- distributes Covered Software as permitted above, be liable to You for any
- direct, indirect, special, incidental, or consequential damages of any
- character including, without limitation, damages for lost profits, loss of
- goodwill, work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses, even if such party shall have been
- informed of the possibility of such damages. This limitation of liability
- shall not apply to liability for death or personal injury resulting from
- such party's negligence to the extent applicable law prohibits such
- limitation. Some jurisdictions do not allow the exclusion or limitation of
- incidental or consequential damages, so this exclusion and limitation may
- not apply to You.
-
-8. Litigation
-
- Any litigation relating to this License may be brought only in the courts
- of a jurisdiction where the defendant maintains its principal place of
- business and such litigation shall be governed by laws of that
- jurisdiction, without reference to its conflict-of-law provisions. Nothing
- in this Section shall prevent a party's ability to bring cross-claims or
- counter-claims.
-
-9. Miscellaneous
-
- This License represents the complete agreement concerning the subject
- matter hereof. If any provision of this License is held to be
- unenforceable, such provision shall be reformed only to the extent
- necessary to make it enforceable. Any law or regulation which provides that
- the language of a contract shall be construed against the drafter shall not
- be used to construe this License against a Contributor.
-
-
-10. Versions of the License
-
-10.1. New Versions
-
- Mozilla Foundation is the license steward. Except as provided in Section
- 10.3, no one other than the license steward has the right to modify or
- publish new versions of this License. Each version will be given a
- distinguishing version number.
-
-10.2. Effect of New Versions
-
- You may distribute the Covered Software under the terms of the version
- of the License under which You originally received the Covered Software,
- or under the terms of any subsequent version published by the license
- steward.
-
-10.3. Modified Versions
-
- If you create software not governed by this License, and you want to
- create a new license for such software, you may create and use a
- modified version of this License if you rename the license and remove
- any references to the name of the license steward (except to note that
- such modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary
- Licenses If You choose to distribute Source Code Form that is
- Incompatible With Secondary Licenses under the terms of this version of
- the License, the notice described in Exhibit B of this License must be
- attached.
-
-Exhibit A - Source Code Form License Notice
-
- This Source Code Form is subject to the
- terms of the Mozilla Public License, v.
- 2.0. If a copy of the MPL was not
- distributed with this file, You can
- obtain one at
- http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular file,
-then You may include the notice in a location (such as a LICENSE file in a
-relevant directory) where a recipient would be likely to look for such a
-notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - "Incompatible With Secondary Licenses" Notice
-
- This Source Code Form is "Incompatible
- With Secondary Licenses", as defined by
- the Mozilla Public License, v. 2.0.
-
diff --git a/vendor/github.com/hashicorp/go-uuid/README.md b/vendor/github.com/hashicorp/go-uuid/README.md
deleted file mode 100644
index fbde8b9a..00000000
--- a/vendor/github.com/hashicorp/go-uuid/README.md
+++ /dev/null
@@ -1,8 +0,0 @@
-# uuid [](https://travis-ci.org/hashicorp/go-uuid)
-
-Generates UUID-format strings using high quality, _purely random_ bytes. It is **not** intended to be RFC compliant, merely to use a well-understood string representation of a 128-bit value. It can also parse UUID-format strings into their component bytes.
-
-Documentation
-=============
-
-The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-uuid).
diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go
deleted file mode 100644
index 0c10c4e9..00000000
--- a/vendor/github.com/hashicorp/go-uuid/uuid.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package uuid
-
-import (
- "crypto/rand"
- "encoding/hex"
- "fmt"
- "io"
-)
-
-// GenerateRandomBytes is used to generate random bytes of given size.
-func GenerateRandomBytes(size int) ([]byte, error) {
- return GenerateRandomBytesWithReader(size, rand.Reader)
-}
-
-// GenerateRandomBytesWithReader is used to generate random bytes of given size read from a given reader.
-func GenerateRandomBytesWithReader(size int, reader io.Reader) ([]byte, error) {
- if reader == nil {
- return nil, fmt.Errorf("provided reader is nil")
- }
- buf := make([]byte, size)
- if _, err := io.ReadFull(reader, buf); err != nil {
- return nil, fmt.Errorf("failed to read random bytes: %v", err)
- }
- return buf, nil
-}
-
-
-const uuidLen = 16
-
-// GenerateUUID is used to generate a random UUID
-func GenerateUUID() (string, error) {
- return GenerateUUIDWithReader(rand.Reader)
-}
-
-// GenerateUUIDWithReader is used to generate a random UUID with a given Reader
-func GenerateUUIDWithReader(reader io.Reader) (string, error) {
- if reader == nil {
- return "", fmt.Errorf("provided reader is nil")
- }
- buf, err := GenerateRandomBytesWithReader(uuidLen, reader)
- if err != nil {
- return "", err
- }
- return FormatUUID(buf)
-}
-
-func FormatUUID(buf []byte) (string, error) {
- if buflen := len(buf); buflen != uuidLen {
- return "", fmt.Errorf("wrong length byte slice (%d)", buflen)
- }
-
- return fmt.Sprintf("%x-%x-%x-%x-%x",
- buf[0:4],
- buf[4:6],
- buf[6:8],
- buf[8:10],
- buf[10:16]), nil
-}
-
-func ParseUUID(uuid string) ([]byte, error) {
- if len(uuid) != 2 * uuidLen + 4 {
- return nil, fmt.Errorf("uuid string is wrong length")
- }
-
- if uuid[8] != '-' ||
- uuid[13] != '-' ||
- uuid[18] != '-' ||
- uuid[23] != '-' {
- return nil, fmt.Errorf("uuid is improperly formatted")
- }
-
- hexStr := uuid[0:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:36]
-
- ret, err := hex.DecodeString(hexStr)
- if err != nil {
- return nil, err
- }
- if len(ret) != uuidLen {
- return nil, fmt.Errorf("decoded hex is the wrong length")
- }
-
- return ret, nil
-}
diff --git a/vendor/github.com/hashicorp/go-version/CHANGELOG.md b/vendor/github.com/hashicorp/go-version/CHANGELOG.md
deleted file mode 100644
index 6d48174b..00000000
--- a/vendor/github.com/hashicorp/go-version/CHANGELOG.md
+++ /dev/null
@@ -1,64 +0,0 @@
-# 1.7.0 (May 24, 2024)
-
-ENHANCEMENTS:
-
-- Remove `reflect` dependency ([#91](https://github.com/hashicorp/go-version/pull/91))
-- Implement the `database/sql.Scanner` and `database/sql/driver.Value` interfaces for `Version` ([#133](https://github.com/hashicorp/go-version/pull/133))
-
-INTERNAL:
-
-- [COMPLIANCE] Add Copyright and License Headers ([#115](https://github.com/hashicorp/go-version/pull/115))
-- [COMPLIANCE] Update MPL-2.0 LICENSE ([#105](https://github.com/hashicorp/go-version/pull/105))
-- Bump actions/cache from 3.0.11 to 3.2.5 ([#116](https://github.com/hashicorp/go-version/pull/116))
-- Bump actions/checkout from 3.2.0 to 3.3.0 ([#111](https://github.com/hashicorp/go-version/pull/111))
-- Bump actions/upload-artifact from 3.1.1 to 3.1.2 ([#112](https://github.com/hashicorp/go-version/pull/112))
-- GHA Migration ([#103](https://github.com/hashicorp/go-version/pull/103))
-- github: Pin external GitHub Actions to hashes ([#107](https://github.com/hashicorp/go-version/pull/107))
-- SEC-090: Automated trusted workflow pinning (2023-04-05) ([#124](https://github.com/hashicorp/go-version/pull/124))
-- update readme ([#104](https://github.com/hashicorp/go-version/pull/104))
-
-# 1.6.0 (June 28, 2022)
-
-FEATURES:
-
-- Add `Prerelease` function to `Constraint` to return true if the version includes a prerelease field ([#100](https://github.com/hashicorp/go-version/pull/100))
-
-# 1.5.0 (May 18, 2022)
-
-FEATURES:
-
-- Use `encoding` `TextMarshaler` & `TextUnmarshaler` instead of JSON equivalents ([#95](https://github.com/hashicorp/go-version/pull/95))
-- Add JSON handlers to allow parsing from/to JSON ([#93](https://github.com/hashicorp/go-version/pull/93))
-
-# 1.4.0 (January 5, 2022)
-
-FEATURES:
-
- - Introduce `MustConstraints()` ([#87](https://github.com/hashicorp/go-version/pull/87))
- - `Constraints`: Introduce `Equals()` and `sort.Interface` methods ([#88](https://github.com/hashicorp/go-version/pull/88))
-
-# 1.3.0 (March 31, 2021)
-
-Please note that CHANGELOG.md does not exist in the source code prior to this release.
-
-FEATURES:
- - Add `Core` function to return a version without prerelease or metadata ([#85](https://github.com/hashicorp/go-version/pull/85))
-
-# 1.2.1 (June 17, 2020)
-
-BUG FIXES:
- - Prevent `Version.Equal` method from panicking on `nil` encounter ([#73](https://github.com/hashicorp/go-version/pull/73))
-
-# 1.2.0 (April 23, 2019)
-
-FEATURES:
- - Add `GreaterThanOrEqual` and `LessThanOrEqual` helper methods ([#53](https://github.com/hashicorp/go-version/pull/53))
-
-# 1.1.0 (Jan 07, 2019)
-
-FEATURES:
- - Add `NewSemver` constructor ([#45](https://github.com/hashicorp/go-version/pull/45))
-
-# 1.0.0 (August 24, 2018)
-
-Initial release.
diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE
deleted file mode 100644
index 1409d6ab..00000000
--- a/vendor/github.com/hashicorp/go-version/LICENSE
+++ /dev/null
@@ -1,356 +0,0 @@
-Copyright (c) 2014 HashiCorp, Inc.
-
-Mozilla Public License, version 2.0
-
-1. Definitions
-
-1.1. “Contributor”
-
- means each individual or legal entity that creates, contributes to the
- creation of, or owns Covered Software.
-
-1.2. “Contributor Version”
-
- means the combination of the Contributions of others (if any) used by a
- Contributor and that particular Contributor’s Contribution.
-
-1.3. “Contribution”
-
- means Covered Software of a particular Contributor.
-
-1.4. “Covered Software”
-
- means Source Code Form to which the initial Contributor has attached the
- notice in Exhibit A, the Executable Form of such Source Code Form, and
- Modifications of such Source Code Form, in each case including portions
- thereof.
-
-1.5. “Incompatible With Secondary Licenses”
- means
-
- a. that the initial Contributor has attached the notice described in
- Exhibit B to the Covered Software; or
-
- b. that the Covered Software was made available under the terms of version
- 1.1 or earlier of the License, but not also under the terms of a
- Secondary License.
-
-1.6. “Executable Form”
-
- means any form of the work other than Source Code Form.
-
-1.7. “Larger Work”
-
- means a work that combines Covered Software with other material, in a separate
- file or files, that is not Covered Software.
-
-1.8. “License”
-
- means this document.
-
-1.9. “Licensable”
-
- means having the right to grant, to the maximum extent possible, whether at the
- time of the initial grant or subsequently, any and all of the rights conveyed by
- this License.
-
-1.10. “Modifications”
-
- means any of the following:
-
- a. any file in Source Code Form that results from an addition to, deletion
- from, or modification of the contents of Covered Software; or
-
- b. any new file in Source Code Form that contains any Covered Software.
-
-1.11. “Patent Claims” of a Contributor
-
- means any patent claim(s), including without limitation, method, process,
- and apparatus claims, in any patent Licensable by such Contributor that
- would be infringed, but for the grant of the License, by the making,
- using, selling, offering for sale, having made, import, or transfer of
- either its Contributions or its Contributor Version.
-
-1.12. “Secondary License”
-
- means either the GNU General Public License, Version 2.0, the GNU Lesser
- General Public License, Version 2.1, the GNU Affero General Public
- License, Version 3.0, or any later versions of those licenses.
-
-1.13. “Source Code Form”
-
- means the form of the work preferred for making modifications.
-
-1.14. “You” (or “Your”)
-
- means an individual or a legal entity exercising rights under this
- License. For legal entities, “You” includes any entity that controls, is
- controlled by, or is under common control with You. For purposes of this
- definition, “control” means (a) the power, direct or indirect, to cause
- the direction or management of such entity, whether by contract or
- otherwise, or (b) ownership of more than fifty percent (50%) of the
- outstanding shares or beneficial ownership of such entity.
-
-
-2. License Grants and Conditions
-
-2.1. Grants
-
- Each Contributor hereby grants You a world-wide, royalty-free,
- non-exclusive license:
-
- a. under intellectual property rights (other than patent or trademark)
- Licensable by such Contributor to use, reproduce, make available,
- modify, display, perform, distribute, and otherwise exploit its
- Contributions, either on an unmodified basis, with Modifications, or as
- part of a Larger Work; and
-
- b. under Patent Claims of such Contributor to make, use, sell, offer for
- sale, have made, import, and otherwise transfer either its Contributions
- or its Contributor Version.
-
-2.2. Effective Date
-
- The licenses granted in Section 2.1 with respect to any Contribution become
- effective for each Contribution on the date the Contributor first distributes
- such Contribution.
-
-2.3. Limitations on Grant Scope
-
- The licenses granted in this Section 2 are the only rights granted under this
- License. No additional rights or licenses will be implied from the distribution
- or licensing of Covered Software under this License. Notwithstanding Section
- 2.1(b) above, no patent license is granted by a Contributor:
-
- a. for any code that a Contributor has removed from Covered Software; or
-
- b. for infringements caused by: (i) Your and any other third party’s
- modifications of Covered Software, or (ii) the combination of its
- Contributions with other software (except as part of its Contributor
- Version); or
-
- c. under Patent Claims infringed by Covered Software in the absence of its
- Contributions.
-
- This License does not grant any rights in the trademarks, service marks, or
- logos of any Contributor (except as may be necessary to comply with the
- notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
- No Contributor makes additional grants as a result of Your choice to
- distribute the Covered Software under a subsequent version of this License
- (see Section 10.2) or under the terms of a Secondary License (if permitted
- under the terms of Section 3.3).
-
-2.5. Representation
-
- Each Contributor represents that the Contributor believes its Contributions
- are its original creation(s) or it has sufficient rights to grant the
- rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
- This License is not intended to limit any rights You have under applicable
- copyright doctrines of fair use, fair dealing, or other equivalents.
-
-2.7. Conditions
-
- Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
- Section 2.1.
-
-
-3. Responsibilities
-
-3.1. Distribution of Source Form
-
- All distribution of Covered Software in Source Code Form, including any
- Modifications that You create or to which You contribute, must be under the
- terms of this License. You must inform recipients that the Source Code Form
- of the Covered Software is governed by the terms of this License, and how
- they can obtain a copy of this License. You may not attempt to alter or
- restrict the recipients’ rights in the Source Code Form.
-
-3.2. Distribution of Executable Form
-
- If You distribute Covered Software in Executable Form then:
-
- a. such Covered Software must also be made available in Source Code Form,
- as described in Section 3.1, and You must inform recipients of the
- Executable Form how they can obtain a copy of such Source Code Form by
- reasonable means in a timely manner, at a charge no more than the cost
- of distribution to the recipient; and
-
- b. You may distribute such Executable Form under the terms of this License,
- or sublicense it under different terms, provided that the license for
- the Executable Form does not attempt to limit or alter the recipients’
- rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
- You may create and distribute a Larger Work under terms of Your choice,
- provided that You also comply with the requirements of this License for the
- Covered Software. If the Larger Work is a combination of Covered Software
- with a work governed by one or more Secondary Licenses, and the Covered
- Software is not Incompatible With Secondary Licenses, this License permits
- You to additionally distribute such Covered Software under the terms of
- such Secondary License(s), so that the recipient of the Larger Work may, at
- their option, further distribute the Covered Software under the terms of
- either this License or such Secondary License(s).
-
-3.4. Notices
-
- You may not remove or alter the substance of any license notices (including
- copyright notices, patent notices, disclaimers of warranty, or limitations
- of liability) contained within the Source Code Form of the Covered
- Software, except that You may alter any license notices to the extent
- required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
- You may choose to offer, and to charge a fee for, warranty, support,
- indemnity or liability obligations to one or more recipients of Covered
- Software. However, You may do so only on Your own behalf, and not on behalf
- of any Contributor. You must make it absolutely clear that any such
- warranty, support, indemnity, or liability obligation is offered by You
- alone, and You hereby agree to indemnify every Contributor for any
- liability incurred by such Contributor as a result of warranty, support,
- indemnity or liability terms You offer. You may include additional
- disclaimers of warranty and limitations of liability specific to any
- jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
-
- If it is impossible for You to comply with any of the terms of this License
- with respect to some or all of the Covered Software due to statute, judicial
- order, or regulation then You must: (a) comply with the terms of this License
- to the maximum extent possible; and (b) describe the limitations and the code
- they affect. Such description must be placed in a text file included with all
- distributions of the Covered Software under this License. Except to the
- extent prohibited by statute or regulation, such description must be
- sufficiently detailed for a recipient of ordinary skill to be able to
- understand it.
-
-5. Termination
-
-5.1. The rights granted under this License will terminate automatically if You
- fail to comply with any of its terms. However, if You become compliant,
- then the rights granted under this License from a particular Contributor
- are reinstated (a) provisionally, unless and until such Contributor
- explicitly and finally terminates Your grants, and (b) on an ongoing basis,
- if such Contributor fails to notify You of the non-compliance by some
- reasonable means prior to 60 days after You have come back into compliance.
- Moreover, Your grants from a particular Contributor are reinstated on an
- ongoing basis if such Contributor notifies You of the non-compliance by
- some reasonable means, this is the first time You have received notice of
- non-compliance with this License from such Contributor, and You become
- compliant prior to 30 days after Your receipt of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
- infringement claim (excluding declaratory judgment actions, counter-claims,
- and cross-claims) alleging that a Contributor Version directly or
- indirectly infringes any patent, then the rights granted to You by any and
- all Contributors for the Covered Software under Section 2.1 of this License
- shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
- license agreements (excluding distributors and resellers) which have been
- validly granted by You or Your distributors under this License prior to
- termination shall survive termination.
-
-6. Disclaimer of Warranty
-
- Covered Software is provided under this License on an “as is” basis, without
- warranty of any kind, either expressed, implied, or statutory, including,
- without limitation, warranties that the Covered Software is free of defects,
- merchantable, fit for a particular purpose or non-infringing. The entire
- risk as to the quality and performance of the Covered Software is with You.
- Should any Covered Software prove defective in any respect, You (not any
- Contributor) assume the cost of any necessary servicing, repair, or
- correction. This disclaimer of warranty constitutes an essential part of this
- License. No use of any Covered Software is authorized under this License
- except under this disclaimer.
-
-7. Limitation of Liability
-
- Under no circumstances and under no legal theory, whether tort (including
- negligence), contract, or otherwise, shall any Contributor, or anyone who
- distributes Covered Software as permitted above, be liable to You for any
- direct, indirect, special, incidental, or consequential damages of any
- character including, without limitation, damages for lost profits, loss of
- goodwill, work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses, even if such party shall have been
- informed of the possibility of such damages. This limitation of liability
- shall not apply to liability for death or personal injury resulting from such
- party’s negligence to the extent applicable law prohibits such limitation.
- Some jurisdictions do not allow the exclusion or limitation of incidental or
- consequential damages, so this exclusion and limitation may not apply to You.
-
-8. Litigation
-
- Any litigation relating to this License may be brought only in the courts of
- a jurisdiction where the defendant maintains its principal place of business
- and such litigation shall be governed by laws of that jurisdiction, without
- reference to its conflict-of-law provisions. Nothing in this Section shall
- prevent a party’s ability to bring cross-claims or counter-claims.
-
-9. Miscellaneous
-
- This License represents the complete agreement concerning the subject matter
- hereof. If any provision of this License is held to be unenforceable, such
- provision shall be reformed only to the extent necessary to make it
- enforceable. Any law or regulation which provides that the language of a
- contract shall be construed against the drafter shall not be used to construe
- this License against a Contributor.
-
-
-10. Versions of the License
-
-10.1. New Versions
-
- Mozilla Foundation is the license steward. Except as provided in Section
- 10.3, no one other than the license steward has the right to modify or
- publish new versions of this License. Each version will be given a
- distinguishing version number.
-
-10.2. Effect of New Versions
-
- You may distribute the Covered Software under the terms of the version of
- the License under which You originally received the Covered Software, or
- under the terms of any subsequent version published by the license
- steward.
-
-10.3. Modified Versions
-
- If you create software not governed by this License, and you want to
- create a new license for such software, you may create and use a modified
- version of this License if you rename the license and remove any
- references to the name of the license steward (except to note that such
- modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
- If You choose to distribute Source Code Form that is Incompatible With
- Secondary Licenses under the terms of this version of the License, the
- notice described in Exhibit B of this License must be attached.
-
-Exhibit A - Source Code Form License Notice
-
- This Source Code Form is subject to the
- terms of the Mozilla Public License, v.
- 2.0. If a copy of the MPL was not
- distributed with this file, You can
- obtain one at
- http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular file, then
-You may include the notice in a location (such as a LICENSE file in a relevant
-directory) where a recipient would be likely to look for such a notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - “Incompatible With Secondary Licenses” Notice
-
- This Source Code Form is “Incompatible
- With Secondary Licenses”, as defined by
- the Mozilla Public License, v. 2.0.
-
diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md
deleted file mode 100644
index 4b7806cd..00000000
--- a/vendor/github.com/hashicorp/go-version/README.md
+++ /dev/null
@@ -1,66 +0,0 @@
-# Versioning Library for Go
-
-[](https://godoc.org/github.com/hashicorp/go-version)
-
-go-version is a library for parsing versions and version constraints,
-and verifying versions against a set of constraints. go-version
-can sort a collection of versions properly, handles prerelease/beta
-versions, can increment versions, etc.
-
-Versions used with go-version must follow [SemVer](http://semver.org/).
-
-## Installation and Usage
-
-Package documentation can be found on
-[GoDoc](http://godoc.org/github.com/hashicorp/go-version).
-
-Installation can be done with a normal `go get`:
-
-```
-$ go get github.com/hashicorp/go-version
-```
-
-#### Version Parsing and Comparison
-
-```go
-v1, err := version.NewVersion("1.2")
-v2, err := version.NewVersion("1.5+metadata")
-
-// Comparison example. There is also GreaterThan, Equal, and just
-// a simple Compare that returns an int allowing easy >=, <=, etc.
-if v1.LessThan(v2) {
- fmt.Printf("%s is less than %s", v1, v2)
-}
-```
-
-#### Version Constraints
-
-```go
-v1, err := version.NewVersion("1.2")
-
-// Constraints example.
-constraints, err := version.NewConstraint(">= 1.0, < 1.4")
-if constraints.Check(v1) {
- fmt.Printf("%s satisfies constraints %s", v1, constraints)
-}
-```
-
-#### Version Sorting
-
-```go
-versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"}
-versions := make([]*version.Version, len(versionsRaw))
-for i, raw := range versionsRaw {
- v, _ := version.NewVersion(raw)
- versions[i] = v
-}
-
-// After this, the versions are properly sorted
-sort.Sort(version.Collection(versions))
-```
-
-## Issues and Contributing
-
-If you find an issue with this library, please report an issue. If you'd
-like, we welcome any contributions. Fork this library and submit a pull
-request.
diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go
deleted file mode 100644
index 29bdc4d2..00000000
--- a/vendor/github.com/hashicorp/go-version/constraint.go
+++ /dev/null
@@ -1,298 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package version
-
-import (
- "fmt"
- "regexp"
- "sort"
- "strings"
-)
-
-// Constraint represents a single constraint for a version, such as
-// ">= 1.0".
-type Constraint struct {
- f constraintFunc
- op operator
- check *Version
- original string
-}
-
-func (c *Constraint) Equals(con *Constraint) bool {
- return c.op == con.op && c.check.Equal(con.check)
-}
-
-// Constraints is a slice of constraints. We make a custom type so that
-// we can add methods to it.
-type Constraints []*Constraint
-
-type constraintFunc func(v, c *Version) bool
-
-var constraintOperators map[string]constraintOperation
-
-type constraintOperation struct {
- op operator
- f constraintFunc
-}
-
-var constraintRegexp *regexp.Regexp
-
-func init() {
- constraintOperators = map[string]constraintOperation{
- "": {op: equal, f: constraintEqual},
- "=": {op: equal, f: constraintEqual},
- "!=": {op: notEqual, f: constraintNotEqual},
- ">": {op: greaterThan, f: constraintGreaterThan},
- "<": {op: lessThan, f: constraintLessThan},
- ">=": {op: greaterThanEqual, f: constraintGreaterThanEqual},
- "<=": {op: lessThanEqual, f: constraintLessThanEqual},
- "~>": {op: pessimistic, f: constraintPessimistic},
- }
-
- ops := make([]string, 0, len(constraintOperators))
- for k := range constraintOperators {
- ops = append(ops, regexp.QuoteMeta(k))
- }
-
- constraintRegexp = regexp.MustCompile(fmt.Sprintf(
- `^\s*(%s)\s*(%s)\s*$`,
- strings.Join(ops, "|"),
- VersionRegexpRaw))
-}
-
-// NewConstraint will parse one or more constraints from the given
-// constraint string. The string must be a comma-separated list of
-// constraints.
-func NewConstraint(v string) (Constraints, error) {
- vs := strings.Split(v, ",")
- result := make([]*Constraint, len(vs))
- for i, single := range vs {
- c, err := parseSingle(single)
- if err != nil {
- return nil, err
- }
-
- result[i] = c
- }
-
- return Constraints(result), nil
-}
-
-// MustConstraints is a helper that wraps a call to a function
-// returning (Constraints, error) and panics if error is non-nil.
-func MustConstraints(c Constraints, err error) Constraints {
- if err != nil {
- panic(err)
- }
-
- return c
-}
-
-// Check tests if a version satisfies all the constraints.
-func (cs Constraints) Check(v *Version) bool {
- for _, c := range cs {
- if !c.Check(v) {
- return false
- }
- }
-
- return true
-}
-
-// Equals compares Constraints with other Constraints
-// for equality. This may not represent logical equivalence
-// of compared constraints.
-// e.g. even though '>0.1,>0.2' is logically equivalent
-// to '>0.2' it is *NOT* treated as equal.
-//
-// Missing operator is treated as equal to '=', whitespaces
-// are ignored and constraints are sorted before comaparison.
-func (cs Constraints) Equals(c Constraints) bool {
- if len(cs) != len(c) {
- return false
- }
-
- // make copies to retain order of the original slices
- left := make(Constraints, len(cs))
- copy(left, cs)
- sort.Stable(left)
- right := make(Constraints, len(c))
- copy(right, c)
- sort.Stable(right)
-
- // compare sorted slices
- for i, con := range left {
- if !con.Equals(right[i]) {
- return false
- }
- }
-
- return true
-}
-
-func (cs Constraints) Len() int {
- return len(cs)
-}
-
-func (cs Constraints) Less(i, j int) bool {
- if cs[i].op < cs[j].op {
- return true
- }
- if cs[i].op > cs[j].op {
- return false
- }
-
- return cs[i].check.LessThan(cs[j].check)
-}
-
-func (cs Constraints) Swap(i, j int) {
- cs[i], cs[j] = cs[j], cs[i]
-}
-
-// Returns the string format of the constraints
-func (cs Constraints) String() string {
- csStr := make([]string, len(cs))
- for i, c := range cs {
- csStr[i] = c.String()
- }
-
- return strings.Join(csStr, ",")
-}
-
-// Check tests if a constraint is validated by the given version.
-func (c *Constraint) Check(v *Version) bool {
- return c.f(v, c.check)
-}
-
-// Prerelease returns true if the version underlying this constraint
-// contains a prerelease field.
-func (c *Constraint) Prerelease() bool {
- return len(c.check.Prerelease()) > 0
-}
-
-func (c *Constraint) String() string {
- return c.original
-}
-
-func parseSingle(v string) (*Constraint, error) {
- matches := constraintRegexp.FindStringSubmatch(v)
- if matches == nil {
- return nil, fmt.Errorf("Malformed constraint: %s", v)
- }
-
- check, err := NewVersion(matches[2])
- if err != nil {
- return nil, err
- }
-
- cop := constraintOperators[matches[1]]
-
- return &Constraint{
- f: cop.f,
- op: cop.op,
- check: check,
- original: v,
- }, nil
-}
-
-func prereleaseCheck(v, c *Version) bool {
- switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; {
- case cPre && vPre:
- // A constraint with a pre-release can only match a pre-release version
- // with the same base segments.
- return v.equalSegments(c)
-
- case !cPre && vPre:
- // A constraint without a pre-release can only match a version without a
- // pre-release.
- return false
-
- case cPre && !vPre:
- // OK, except with the pessimistic operator
- case !cPre && !vPre:
- // OK
- }
- return true
-}
-
-//-------------------------------------------------------------------
-// Constraint functions
-//-------------------------------------------------------------------
-
-type operator rune
-
-const (
- equal operator = '='
- notEqual operator = '≠'
- greaterThan operator = '>'
- lessThan operator = '<'
- greaterThanEqual operator = '≥'
- lessThanEqual operator = '≤'
- pessimistic operator = '~'
-)
-
-func constraintEqual(v, c *Version) bool {
- return v.Equal(c)
-}
-
-func constraintNotEqual(v, c *Version) bool {
- return !v.Equal(c)
-}
-
-func constraintGreaterThan(v, c *Version) bool {
- return prereleaseCheck(v, c) && v.Compare(c) == 1
-}
-
-func constraintLessThan(v, c *Version) bool {
- return prereleaseCheck(v, c) && v.Compare(c) == -1
-}
-
-func constraintGreaterThanEqual(v, c *Version) bool {
- return prereleaseCheck(v, c) && v.Compare(c) >= 0
-}
-
-func constraintLessThanEqual(v, c *Version) bool {
- return prereleaseCheck(v, c) && v.Compare(c) <= 0
-}
-
-func constraintPessimistic(v, c *Version) bool {
- // Using a pessimistic constraint with a pre-release, restricts versions to pre-releases
- if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") {
- return false
- }
-
- // If the version being checked is naturally less than the constraint, then there
- // is no way for the version to be valid against the constraint
- if v.LessThan(c) {
- return false
- }
- // We'll use this more than once, so grab the length now so it's a little cleaner
- // to write the later checks
- cs := len(c.segments)
-
- // If the version being checked has less specificity than the constraint, then there
- // is no way for the version to be valid against the constraint
- if cs > len(v.segments) {
- return false
- }
-
- // Check the segments in the constraint against those in the version. If the version
- // being checked, at any point, does not have the same values in each index of the
- // constraints segments, then it cannot be valid against the constraint.
- for i := 0; i < c.si-1; i++ {
- if v.segments[i] != c.segments[i] {
- return false
- }
- }
-
- // Check the last part of the segment in the constraint. If the version segment at
- // this index is less than the constraints segment at this index, then it cannot
- // be valid against the constraint
- if c.segments[cs-1] > v.segments[cs-1] {
- return false
- }
-
- // If nothing has rejected the version by now, it's valid
- return true
-}
diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go
deleted file mode 100644
index 7c683c28..00000000
--- a/vendor/github.com/hashicorp/go-version/version.go
+++ /dev/null
@@ -1,441 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package version
-
-import (
- "bytes"
- "database/sql/driver"
- "fmt"
- "regexp"
- "strconv"
- "strings"
-)
-
-// The compiled regular expression used to test the validity of a version.
-var (
- versionRegexp *regexp.Regexp
- semverRegexp *regexp.Regexp
-)
-
-// The raw regular expression string used for testing the validity
-// of a version.
-const (
- VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` +
- `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` +
- `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` +
- `?`
-
- // SemverRegexpRaw requires a separator between version and prerelease
- SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` +
- `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` +
- `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` +
- `?`
-)
-
-// Version represents a single version.
-type Version struct {
- metadata string
- pre string
- segments []int64
- si int
- original string
-}
-
-func init() {
- versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$")
- semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$")
-}
-
-// NewVersion parses the given version and returns a new
-// Version.
-func NewVersion(v string) (*Version, error) {
- return newVersion(v, versionRegexp)
-}
-
-// NewSemver parses the given version and returns a new
-// Version that adheres strictly to SemVer specs
-// https://semver.org/
-func NewSemver(v string) (*Version, error) {
- return newVersion(v, semverRegexp)
-}
-
-func newVersion(v string, pattern *regexp.Regexp) (*Version, error) {
- matches := pattern.FindStringSubmatch(v)
- if matches == nil {
- return nil, fmt.Errorf("Malformed version: %s", v)
- }
- segmentsStr := strings.Split(matches[1], ".")
- segments := make([]int64, len(segmentsStr))
- for i, str := range segmentsStr {
- val, err := strconv.ParseInt(str, 10, 64)
- if err != nil {
- return nil, fmt.Errorf(
- "Error parsing version: %s", err)
- }
-
- segments[i] = val
- }
-
- // Even though we could support more than three segments, if we
- // got less than three, pad it with 0s. This is to cover the basic
- // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum
- for i := len(segments); i < 3; i++ {
- segments = append(segments, 0)
- }
-
- pre := matches[7]
- if pre == "" {
- pre = matches[4]
- }
-
- return &Version{
- metadata: matches[10],
- pre: pre,
- segments: segments,
- si: len(segmentsStr),
- original: v,
- }, nil
-}
-
-// Must is a helper that wraps a call to a function returning (*Version, error)
-// and panics if error is non-nil.
-func Must(v *Version, err error) *Version {
- if err != nil {
- panic(err)
- }
-
- return v
-}
-
-// Compare compares this version to another version. This
-// returns -1, 0, or 1 if this version is smaller, equal,
-// or larger than the other version, respectively.
-//
-// If you want boolean results, use the LessThan, Equal,
-// GreaterThan, GreaterThanOrEqual or LessThanOrEqual methods.
-func (v *Version) Compare(other *Version) int {
- // A quick, efficient equality check
- if v.String() == other.String() {
- return 0
- }
-
- // If the segments are the same, we must compare on prerelease info
- if v.equalSegments(other) {
- preSelf := v.Prerelease()
- preOther := other.Prerelease()
- if preSelf == "" && preOther == "" {
- return 0
- }
- if preSelf == "" {
- return 1
- }
- if preOther == "" {
- return -1
- }
-
- return comparePrereleases(preSelf, preOther)
- }
-
- segmentsSelf := v.Segments64()
- segmentsOther := other.Segments64()
- // Get the highest specificity (hS), or if they're equal, just use segmentSelf length
- lenSelf := len(segmentsSelf)
- lenOther := len(segmentsOther)
- hS := lenSelf
- if lenSelf < lenOther {
- hS = lenOther
- }
- // Compare the segments
- // Because a constraint could have more/less specificity than the version it's
- // checking, we need to account for a lopsided or jagged comparison
- for i := 0; i < hS; i++ {
- if i > lenSelf-1 {
- // This means Self had the lower specificity
- // Check to see if the remaining segments in Other are all zeros
- if !allZero(segmentsOther[i:]) {
- // if not, it means that Other has to be greater than Self
- return -1
- }
- break
- } else if i > lenOther-1 {
- // this means Other had the lower specificity
- // Check to see if the remaining segments in Self are all zeros -
- if !allZero(segmentsSelf[i:]) {
- // if not, it means that Self has to be greater than Other
- return 1
- }
- break
- }
- lhs := segmentsSelf[i]
- rhs := segmentsOther[i]
- if lhs == rhs {
- continue
- } else if lhs < rhs {
- return -1
- }
- // Otherwis, rhs was > lhs, they're not equal
- return 1
- }
-
- // if we got this far, they're equal
- return 0
-}
-
-func (v *Version) equalSegments(other *Version) bool {
- segmentsSelf := v.Segments64()
- segmentsOther := other.Segments64()
-
- if len(segmentsSelf) != len(segmentsOther) {
- return false
- }
- for i, v := range segmentsSelf {
- if v != segmentsOther[i] {
- return false
- }
- }
- return true
-}
-
-func allZero(segs []int64) bool {
- for _, s := range segs {
- if s != 0 {
- return false
- }
- }
- return true
-}
-
-func comparePart(preSelf string, preOther string) int {
- if preSelf == preOther {
- return 0
- }
-
- var selfInt int64
- selfNumeric := true
- selfInt, err := strconv.ParseInt(preSelf, 10, 64)
- if err != nil {
- selfNumeric = false
- }
-
- var otherInt int64
- otherNumeric := true
- otherInt, err = strconv.ParseInt(preOther, 10, 64)
- if err != nil {
- otherNumeric = false
- }
-
- // if a part is empty, we use the other to decide
- if preSelf == "" {
- if otherNumeric {
- return -1
- }
- return 1
- }
-
- if preOther == "" {
- if selfNumeric {
- return 1
- }
- return -1
- }
-
- if selfNumeric && !otherNumeric {
- return -1
- } else if !selfNumeric && otherNumeric {
- return 1
- } else if !selfNumeric && !otherNumeric && preSelf > preOther {
- return 1
- } else if selfInt > otherInt {
- return 1
- }
-
- return -1
-}
-
-func comparePrereleases(v string, other string) int {
- // the same pre release!
- if v == other {
- return 0
- }
-
- // split both pre releases for analyse their parts
- selfPreReleaseMeta := strings.Split(v, ".")
- otherPreReleaseMeta := strings.Split(other, ".")
-
- selfPreReleaseLen := len(selfPreReleaseMeta)
- otherPreReleaseLen := len(otherPreReleaseMeta)
-
- biggestLen := otherPreReleaseLen
- if selfPreReleaseLen > otherPreReleaseLen {
- biggestLen = selfPreReleaseLen
- }
-
- // loop for parts to find the first difference
- for i := 0; i < biggestLen; i = i + 1 {
- partSelfPre := ""
- if i < selfPreReleaseLen {
- partSelfPre = selfPreReleaseMeta[i]
- }
-
- partOtherPre := ""
- if i < otherPreReleaseLen {
- partOtherPre = otherPreReleaseMeta[i]
- }
-
- compare := comparePart(partSelfPre, partOtherPre)
- // if parts are equals, continue the loop
- if compare != 0 {
- return compare
- }
- }
-
- return 0
-}
-
-// Core returns a new version constructed from only the MAJOR.MINOR.PATCH
-// segments of the version, without prerelease or metadata.
-func (v *Version) Core() *Version {
- segments := v.Segments64()
- segmentsOnly := fmt.Sprintf("%d.%d.%d", segments[0], segments[1], segments[2])
- return Must(NewVersion(segmentsOnly))
-}
-
-// Equal tests if two versions are equal.
-func (v *Version) Equal(o *Version) bool {
- if v == nil || o == nil {
- return v == o
- }
-
- return v.Compare(o) == 0
-}
-
-// GreaterThan tests if this version is greater than another version.
-func (v *Version) GreaterThan(o *Version) bool {
- return v.Compare(o) > 0
-}
-
-// GreaterThanOrEqual tests if this version is greater than or equal to another version.
-func (v *Version) GreaterThanOrEqual(o *Version) bool {
- return v.Compare(o) >= 0
-}
-
-// LessThan tests if this version is less than another version.
-func (v *Version) LessThan(o *Version) bool {
- return v.Compare(o) < 0
-}
-
-// LessThanOrEqual tests if this version is less than or equal to another version.
-func (v *Version) LessThanOrEqual(o *Version) bool {
- return v.Compare(o) <= 0
-}
-
-// Metadata returns any metadata that was part of the version
-// string.
-//
-// Metadata is anything that comes after the "+" in the version.
-// For example, with "1.2.3+beta", the metadata is "beta".
-func (v *Version) Metadata() string {
- return v.metadata
-}
-
-// Prerelease returns any prerelease data that is part of the version,
-// or blank if there is no prerelease data.
-//
-// Prerelease information is anything that comes after the "-" in the
-// version (but before any metadata). For example, with "1.2.3-beta",
-// the prerelease information is "beta".
-func (v *Version) Prerelease() string {
- return v.pre
-}
-
-// Segments returns the numeric segments of the version as a slice of ints.
-//
-// This excludes any metadata or pre-release information. For example,
-// for a version "1.2.3-beta", segments will return a slice of
-// 1, 2, 3.
-func (v *Version) Segments() []int {
- segmentSlice := make([]int, len(v.segments))
- for i, v := range v.segments {
- segmentSlice[i] = int(v)
- }
- return segmentSlice
-}
-
-// Segments64 returns the numeric segments of the version as a slice of int64s.
-//
-// This excludes any metadata or pre-release information. For example,
-// for a version "1.2.3-beta", segments will return a slice of
-// 1, 2, 3.
-func (v *Version) Segments64() []int64 {
- result := make([]int64, len(v.segments))
- copy(result, v.segments)
- return result
-}
-
-// String returns the full version string included pre-release
-// and metadata information.
-//
-// This value is rebuilt according to the parsed segments and other
-// information. Therefore, ambiguities in the version string such as
-// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and
-// missing parts (1.0 => 1.0.0) will be made into a canonicalized form
-// as shown in the parenthesized examples.
-func (v *Version) String() string {
- var buf bytes.Buffer
- fmtParts := make([]string, len(v.segments))
- for i, s := range v.segments {
- // We can ignore err here since we've pre-parsed the values in segments
- str := strconv.FormatInt(s, 10)
- fmtParts[i] = str
- }
- fmt.Fprintf(&buf, strings.Join(fmtParts, "."))
- if v.pre != "" {
- fmt.Fprintf(&buf, "-%s", v.pre)
- }
- if v.metadata != "" {
- fmt.Fprintf(&buf, "+%s", v.metadata)
- }
-
- return buf.String()
-}
-
-// Original returns the original parsed version as-is, including any
-// potential whitespace, `v` prefix, etc.
-func (v *Version) Original() string {
- return v.original
-}
-
-// UnmarshalText implements encoding.TextUnmarshaler interface.
-func (v *Version) UnmarshalText(b []byte) error {
- temp, err := NewVersion(string(b))
- if err != nil {
- return err
- }
-
- *v = *temp
-
- return nil
-}
-
-// MarshalText implements encoding.TextMarshaler interface.
-func (v *Version) MarshalText() ([]byte, error) {
- return []byte(v.String()), nil
-}
-
-// Scan implements the sql.Scanner interface.
-func (v *Version) Scan(src interface{}) error {
- switch src := src.(type) {
- case string:
- return v.UnmarshalText([]byte(src))
- case nil:
- return nil
- default:
- return fmt.Errorf("cannot scan %T as Version", src)
- }
-}
-
-// Value implements the driver.Valuer interface.
-func (v *Version) Value() (driver.Value, error) {
- return v.String(), nil
-}
diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go
deleted file mode 100644
index 83547fe1..00000000
--- a/vendor/github.com/hashicorp/go-version/version_collection.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package version
-
-// Collection is a type that implements the sort.Interface interface
-// so that versions can be sorted.
-type Collection []*Version
-
-func (v Collection) Len() int {
- return len(v)
-}
-
-func (v Collection) Less(i, j int) bool {
- return v[i].LessThan(v[j])
-}
-
-func (v Collection) Swap(i, j int) {
- v[i], v[j] = v[j], v[i]
-}
diff --git a/vendor/github.com/hashicorp/hc-install/.copywrite.hcl b/vendor/github.com/hashicorp/hc-install/.copywrite.hcl
deleted file mode 100644
index 45ec82c7..00000000
--- a/vendor/github.com/hashicorp/hc-install/.copywrite.hcl
+++ /dev/null
@@ -1,7 +0,0 @@
-schema_version = 1
-
-project {
- license = "MPL-2.0"
- copyright_year = 2020
- header_ignore = []
-}
diff --git a/vendor/github.com/hashicorp/hc-install/.go-version b/vendor/github.com/hashicorp/hc-install/.go-version
deleted file mode 100644
index ce2dd535..00000000
--- a/vendor/github.com/hashicorp/hc-install/.go-version
+++ /dev/null
@@ -1 +0,0 @@
-1.21.5
diff --git a/vendor/github.com/hashicorp/hc-install/LICENSE b/vendor/github.com/hashicorp/hc-install/LICENSE
deleted file mode 100644
index c121cee6..00000000
--- a/vendor/github.com/hashicorp/hc-install/LICENSE
+++ /dev/null
@@ -1,375 +0,0 @@
-Copyright (c) 2020 HashiCorp, Inc.
-
-Mozilla Public License Version 2.0
-==================================
-
-1. Definitions
---------------
-
-1.1. "Contributor"
- means each individual or legal entity that creates, contributes to
- the creation of, or owns Covered Software.
-
-1.2. "Contributor Version"
- means the combination of the Contributions of others (if any) used
- by a Contributor and that particular Contributor's Contribution.
-
-1.3. "Contribution"
- means Covered Software of a particular Contributor.
-
-1.4. "Covered Software"
- means Source Code Form to which the initial Contributor has attached
- the notice in Exhibit A, the Executable Form of such Source Code
- Form, and Modifications of such Source Code Form, in each case
- including portions thereof.
-
-1.5. "Incompatible With Secondary Licenses"
- means
-
- (a) that the initial Contributor has attached the notice described
- in Exhibit B to the Covered Software; or
-
- (b) that the Covered Software was made available under the terms of
- version 1.1 or earlier of the License, but not also under the
- terms of a Secondary License.
-
-1.6. "Executable Form"
- means any form of the work other than Source Code Form.
-
-1.7. "Larger Work"
- means a work that combines Covered Software with other material, in
- a separate file or files, that is not Covered Software.
-
-1.8. "License"
- means this document.
-
-1.9. "Licensable"
- means having the right to grant, to the maximum extent possible,
- whether at the time of the initial grant or subsequently, any and
- all of the rights conveyed by this License.
-
-1.10. "Modifications"
- means any of the following:
-
- (a) any file in Source Code Form that results from an addition to,
- deletion from, or modification of the contents of Covered
- Software; or
-
- (b) any new file in Source Code Form that contains any Covered
- Software.
-
-1.11. "Patent Claims" of a Contributor
- means any patent claim(s), including without limitation, method,
- process, and apparatus claims, in any patent Licensable by such
- Contributor that would be infringed, but for the grant of the
- License, by the making, using, selling, offering for sale, having
- made, import, or transfer of either its Contributions or its
- Contributor Version.
-
-1.12. "Secondary License"
- means either the GNU General Public License, Version 2.0, the GNU
- Lesser General Public License, Version 2.1, the GNU Affero General
- Public License, Version 3.0, or any later versions of those
- licenses.
-
-1.13. "Source Code Form"
- means the form of the work preferred for making modifications.
-
-1.14. "You" (or "Your")
- means an individual or a legal entity exercising rights under this
- License. For legal entities, "You" includes any entity that
- controls, is controlled by, or is under common control with You. For
- purposes of this definition, "control" means (a) the power, direct
- or indirect, to cause the direction or management of such entity,
- whether by contract or otherwise, or (b) ownership of more than
- fifty percent (50%) of the outstanding shares or beneficial
- ownership of such entity.
-
-2. License Grants and Conditions
---------------------------------
-
-2.1. Grants
-
-Each Contributor hereby grants You a world-wide, royalty-free,
-non-exclusive license:
-
-(a) under intellectual property rights (other than patent or trademark)
- Licensable by such Contributor to use, reproduce, make available,
- modify, display, perform, distribute, and otherwise exploit its
- Contributions, either on an unmodified basis, with Modifications, or
- as part of a Larger Work; and
-
-(b) under Patent Claims of such Contributor to make, use, sell, offer
- for sale, have made, import, and otherwise transfer either its
- Contributions or its Contributor Version.
-
-2.2. Effective Date
-
-The licenses granted in Section 2.1 with respect to any Contribution
-become effective for each Contribution on the date the Contributor first
-distributes such Contribution.
-
-2.3. Limitations on Grant Scope
-
-The licenses granted in this Section 2 are the only rights granted under
-this License. No additional rights or licenses will be implied from the
-distribution or licensing of Covered Software under this License.
-Notwithstanding Section 2.1(b) above, no patent license is granted by a
-Contributor:
-
-(a) for any code that a Contributor has removed from Covered Software;
- or
-
-(b) for infringements caused by: (i) Your and any other third party's
- modifications of Covered Software, or (ii) the combination of its
- Contributions with other software (except as part of its Contributor
- Version); or
-
-(c) under Patent Claims infringed by Covered Software in the absence of
- its Contributions.
-
-This License does not grant any rights in the trademarks, service marks,
-or logos of any Contributor (except as may be necessary to comply with
-the notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
-No Contributor makes additional grants as a result of Your choice to
-distribute the Covered Software under a subsequent version of this
-License (see Section 10.2) or under the terms of a Secondary License (if
-permitted under the terms of Section 3.3).
-
-2.5. Representation
-
-Each Contributor represents that the Contributor believes its
-Contributions are its original creation(s) or it has sufficient rights
-to grant the rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
-This License is not intended to limit any rights You have under
-applicable copyright doctrines of fair use, fair dealing, or other
-equivalents.
-
-2.7. Conditions
-
-Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
-in Section 2.1.
-
-3. Responsibilities
--------------------
-
-3.1. Distribution of Source Form
-
-All distribution of Covered Software in Source Code Form, including any
-Modifications that You create or to which You contribute, must be under
-the terms of this License. You must inform recipients that the Source
-Code Form of the Covered Software is governed by the terms of this
-License, and how they can obtain a copy of this License. You may not
-attempt to alter or restrict the recipients' rights in the Source Code
-Form.
-
-3.2. Distribution of Executable Form
-
-If You distribute Covered Software in Executable Form then:
-
-(a) such Covered Software must also be made available in Source Code
- Form, as described in Section 3.1, and You must inform recipients of
- the Executable Form how they can obtain a copy of such Source Code
- Form by reasonable means in a timely manner, at a charge no more
- than the cost of distribution to the recipient; and
-
-(b) You may distribute such Executable Form under the terms of this
- License, or sublicense it under different terms, provided that the
- license for the Executable Form does not attempt to limit or alter
- the recipients' rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
-You may create and distribute a Larger Work under terms of Your choice,
-provided that You also comply with the requirements of this License for
-the Covered Software. If the Larger Work is a combination of Covered
-Software with a work governed by one or more Secondary Licenses, and the
-Covered Software is not Incompatible With Secondary Licenses, this
-License permits You to additionally distribute such Covered Software
-under the terms of such Secondary License(s), so that the recipient of
-the Larger Work may, at their option, further distribute the Covered
-Software under the terms of either this License or such Secondary
-License(s).
-
-3.4. Notices
-
-You may not remove or alter the substance of any license notices
-(including copyright notices, patent notices, disclaimers of warranty,
-or limitations of liability) contained within the Source Code Form of
-the Covered Software, except that You may alter any license notices to
-the extent required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
-You may choose to offer, and to charge a fee for, warranty, support,
-indemnity or liability obligations to one or more recipients of Covered
-Software. However, You may do so only on Your own behalf, and not on
-behalf of any Contributor. You must make it absolutely clear that any
-such warranty, support, indemnity, or liability obligation is offered by
-You alone, and You hereby agree to indemnify every Contributor for any
-liability incurred by such Contributor as a result of warranty, support,
-indemnity or liability terms You offer. You may include additional
-disclaimers of warranty and limitations of liability specific to any
-jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
----------------------------------------------------
-
-If it is impossible for You to comply with any of the terms of this
-License with respect to some or all of the Covered Software due to
-statute, judicial order, or regulation then You must: (a) comply with
-the terms of this License to the maximum extent possible; and (b)
-describe the limitations and the code they affect. Such description must
-be placed in a text file included with all distributions of the Covered
-Software under this License. Except to the extent prohibited by statute
-or regulation, such description must be sufficiently detailed for a
-recipient of ordinary skill to be able to understand it.
-
-5. Termination
---------------
-
-5.1. The rights granted under this License will terminate automatically
-if You fail to comply with any of its terms. However, if You become
-compliant, then the rights granted under this License from a particular
-Contributor are reinstated (a) provisionally, unless and until such
-Contributor explicitly and finally terminates Your grants, and (b) on an
-ongoing basis, if such Contributor fails to notify You of the
-non-compliance by some reasonable means prior to 60 days after You have
-come back into compliance. Moreover, Your grants from a particular
-Contributor are reinstated on an ongoing basis if such Contributor
-notifies You of the non-compliance by some reasonable means, this is the
-first time You have received notice of non-compliance with this License
-from such Contributor, and You become compliant prior to 30 days after
-Your receipt of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
-infringement claim (excluding declaratory judgment actions,
-counter-claims, and cross-claims) alleging that a Contributor Version
-directly or indirectly infringes any patent, then the rights granted to
-You by any and all Contributors for the Covered Software under Section
-2.1 of this License shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all
-end user license agreements (excluding distributors and resellers) which
-have been validly granted by You or Your distributors under this License
-prior to termination shall survive termination.
-
-************************************************************************
-* *
-* 6. Disclaimer of Warranty *
-* ------------------------- *
-* *
-* Covered Software is provided under this License on an "as is" *
-* basis, without warranty of any kind, either expressed, implied, or *
-* statutory, including, without limitation, warranties that the *
-* Covered Software is free of defects, merchantable, fit for a *
-* particular purpose or non-infringing. The entire risk as to the *
-* quality and performance of the Covered Software is with You. *
-* Should any Covered Software prove defective in any respect, You *
-* (not any Contributor) assume the cost of any necessary servicing, *
-* repair, or correction. This disclaimer of warranty constitutes an *
-* essential part of this License. No use of any Covered Software is *
-* authorized under this License except under this disclaimer. *
-* *
-************************************************************************
-
-************************************************************************
-* *
-* 7. Limitation of Liability *
-* -------------------------- *
-* *
-* Under no circumstances and under no legal theory, whether tort *
-* (including negligence), contract, or otherwise, shall any *
-* Contributor, or anyone who distributes Covered Software as *
-* permitted above, be liable to You for any direct, indirect, *
-* special, incidental, or consequential damages of any character *
-* including, without limitation, damages for lost profits, loss of *
-* goodwill, work stoppage, computer failure or malfunction, or any *
-* and all other commercial damages or losses, even if such party *
-* shall have been informed of the possibility of such damages. This *
-* limitation of liability shall not apply to liability for death or *
-* personal injury resulting from such party's negligence to the *
-* extent applicable law prohibits such limitation. Some *
-* jurisdictions do not allow the exclusion or limitation of *
-* incidental or consequential damages, so this exclusion and *
-* limitation may not apply to You. *
-* *
-************************************************************************
-
-8. Litigation
--------------
-
-Any litigation relating to this License may be brought only in the
-courts of a jurisdiction where the defendant maintains its principal
-place of business and such litigation shall be governed by laws of that
-jurisdiction, without reference to its conflict-of-law provisions.
-Nothing in this Section shall prevent a party's ability to bring
-cross-claims or counter-claims.
-
-9. Miscellaneous
-----------------
-
-This License represents the complete agreement concerning the subject
-matter hereof. If any provision of this License is held to be
-unenforceable, such provision shall be reformed only to the extent
-necessary to make it enforceable. Any law or regulation which provides
-that the language of a contract shall be construed against the drafter
-shall not be used to construe this License against a Contributor.
-
-10. Versions of the License
----------------------------
-
-10.1. New Versions
-
-Mozilla Foundation is the license steward. Except as provided in Section
-10.3, no one other than the license steward has the right to modify or
-publish new versions of this License. Each version will be given a
-distinguishing version number.
-
-10.2. Effect of New Versions
-
-You may distribute the Covered Software under the terms of the version
-of the License under which You originally received the Covered Software,
-or under the terms of any subsequent version published by the license
-steward.
-
-10.3. Modified Versions
-
-If you create software not governed by this License, and you want to
-create a new license for such software, you may create and use a
-modified version of this License if you rename the license and remove
-any references to the name of the license steward (except to note that
-such modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary
-Licenses
-
-If You choose to distribute Source Code Form that is Incompatible With
-Secondary Licenses under the terms of this version of the License, the
-notice described in Exhibit B of this License must be attached.
-
-Exhibit A - Source Code Form License Notice
--------------------------------------------
-
- This Source Code Form is subject to the terms of the Mozilla Public
- License, v. 2.0. If a copy of the MPL was not distributed with this
- file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular
-file, then You may include the notice in a location (such as a LICENSE
-file in a relevant directory) where a recipient would be likely to look
-for such a notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - "Incompatible With Secondary Licenses" Notice
----------------------------------------------------------
-
- This Source Code Form is "Incompatible With Secondary Licenses", as
- defined by the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/hc-install/README.md b/vendor/github.com/hashicorp/hc-install/README.md
deleted file mode 100644
index 6e78b5a6..00000000
--- a/vendor/github.com/hashicorp/hc-install/README.md
+++ /dev/null
@@ -1,120 +0,0 @@
-# hc-install
-
-An **experimental** Go module for downloading or locating HashiCorp binaries, verifying signatures and checksums, and asserting version constraints.
-
-This module is a successor to tfinstall, available in pre-1.0 versions of [terraform-exec](https://github.com/hashicorp/terraform-exec). Current users of tfinstall are advised to move to hc-install before upgrading terraform-exec to v1.0.0.
-
-## hc-install is not a package manager
-
-This library is intended for use within Go programs or automated environments (such as CIs)
-which have some business downloading or otherwise locating HashiCorp binaries.
-
-The included command-line utility, `hc-install`, is a convenient way of using
-the library in ad-hoc or CI shell scripting outside of Go.
-
-`hc-install` does **not**:
-
- - Determine suitable installation path based on target system. e.g. in `/usr/bin` or `/usr/local/bin` on Unix based system.
- - Deal with execution of installed binaries (via service files or otherwise).
- - Upgrade existing binaries on your system.
- - Add nor link downloaded binaries to your `$PATH`.
-
-## API
-
-The `Installer` offers a few high-level methods:
-
- - `Ensure(context.Context, []src.Source)` to find, install, or build a product version
- - `Install(context.Context, []src.Installable)` to install a product version
-
-### Sources
-
-The `Installer` methods accept number of different `Source` types.
-Each comes with different trade-offs described below.
-
- - `fs.{AnyVersion,ExactVersion,Version}` - Finds a binary in `$PATH` (or additional paths)
- - **Pros:**
- - This is most convenient when you already have the product installed on your system
- which you already manage.
- - **Cons:**
- - Only relies on a single version, expects _you_ to manage the installation
- - _Not recommended_ for any environment where product installation is not controlled or managed by you (e.g. default GitHub Actions image managed by GitHub)
- - `releases.{LatestVersion,ExactVersion}` - Downloads, verifies & installs any known product from `releases.hashicorp.com`
- - **Pros:**
- - Fast and reliable way of obtaining any pre-built version of any product
- - Allows installation of enterprise versions
- - **Cons:**
- - Installation may consume some bandwidth, disk space and a little time
- - Potentially less stable builds (see `checkpoint` below)
- - `checkpoint.LatestVersion` - Downloads, verifies & installs any known product available in HashiCorp Checkpoint
- - **Pros:**
- - Checkpoint typically contains only product versions considered stable
- - **Cons:**
- - Installation may consume some bandwidth, disk space and a little time
- - Currently doesn't allow installation of old versions or enterprise versions (see `releases` above)
- - `build.GitRevision` - Clones raw source code and builds the product from it
- - **Pros:**
- - Useful for catching bugs and incompatibilities as early as possible (prior to product release).
- - **Cons:**
- - Building from scratch can consume significant amount of time & resources (CPU, memory, bandwith, disk space)
- - There are no guarantees that build instructions will always be up-to-date
- - There's increased likelihood of build containing bugs prior to release
- - Any CI builds relying on this are likely to be fragile
-
-## Example Usage
-
-See examples at https://pkg.go.dev/github.com/hashicorp/hc-install#example-Installer.
-
-## CLI
-
-In addition to the Go library, which is the intended primary use case of `hc-install`, we also distribute CLI.
-
-The CLI comes with some trade-offs:
-
- - more limited interface compared to the flexible Go API (installs specific versions of products via `releases.ExactVersion`)
- - minimal environment pre-requisites (no need to compile Go code)
- - see ["hc-install is not a package manager"](https://github.com/hashicorp/hc-install#hc-install-is-not-a-package-manager)
-
-### Installation
-
-Given that one of the key roles of the CLI/library is integrity checking, you should choose the installation method which involves the same level of integrity checks, and/or perform these checks yourself. `go install` provides only minimal to no integrity checks, depending on exact use. We recommend any of the installation methods documented below.
-
-#### Homebrew (macOS / Linux)
-
-[Homebrew](https://brew.sh)
-
-```
-brew install hashicorp/tap/hc-install
-```
-
-#### Linux
-
-We support Debian & Ubuntu via apt and RHEL, CentOS, Fedora and Amazon Linux via RPM.
-
-You can follow the instructions in the [Official Packaging Guide](https://www.hashicorp.com/official-packaging-guide) to install the package from the official HashiCorp-maintained repositories. The package name is `hc-install` in all repositories.
-
-#### Other platforms
-
-1. [Download for the latest version](https://releases.hashicorp.com/hc-install/) relevant for your operating system and architecture.
-2. Verify integrity by comparing the SHA256 checksums which are part of the release (called `hc-install__SHA256SUMS`).
-3. Install it by unzipping it and moving it to a directory included in your system's `PATH`.
-4. Check that you have installed it correctly via `hc-install --version`.
- You should see the latest version printed to your terminal.
-
-### Usage
-
-```
-Usage: hc-install install [options] -version
-
- This command installs a HashiCorp product.
- Options:
- -version [REQUIRED] Version of product to install.
- -path Path to directory where the product will be installed. Defaults
- to current working directory.
-```
-```sh
-hc-install install -version 1.3.7 terraform
-```
-```
-hc-install: will install terraform@1.3.7
-installed terraform@1.3.7 to /current/working/dir/terraform
-```
diff --git a/vendor/github.com/hashicorp/hc-install/checkpoint/latest_version.go b/vendor/github.com/hashicorp/hc-install/checkpoint/latest_version.go
deleted file mode 100644
index 2cd5379f..00000000
--- a/vendor/github.com/hashicorp/hc-install/checkpoint/latest_version.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package checkpoint
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "log"
- "os"
- "path/filepath"
- "runtime"
- "time"
-
- checkpoint "github.com/hashicorp/go-checkpoint"
- "github.com/hashicorp/go-version"
- "github.com/hashicorp/hc-install/internal/pubkey"
- rjson "github.com/hashicorp/hc-install/internal/releasesjson"
- isrc "github.com/hashicorp/hc-install/internal/src"
- "github.com/hashicorp/hc-install/internal/validators"
- "github.com/hashicorp/hc-install/product"
-)
-
-var (
- defaultTimeout = 30 * time.Second
- discardLogger = log.New(ioutil.Discard, "", 0)
-)
-
-// LatestVersion installs the latest version known to Checkpoint
-// to OS temp directory, or to InstallDir (if not empty)
-type LatestVersion struct {
- Product product.Product
- Timeout time.Duration
- SkipChecksumVerification bool
- InstallDir string
-
- // ArmoredPublicKey is a public PGP key in ASCII/armor format to use
- // instead of built-in pubkey to verify signature of downloaded checksums
- ArmoredPublicKey string
-
- logger *log.Logger
- pathsToRemove []string
-}
-
-func (*LatestVersion) IsSourceImpl() isrc.InstallSrcSigil {
- return isrc.InstallSrcSigil{}
-}
-
-func (lv *LatestVersion) SetLogger(logger *log.Logger) {
- lv.logger = logger
-}
-
-func (lv *LatestVersion) log() *log.Logger {
- if lv.logger == nil {
- return discardLogger
- }
- return lv.logger
-}
-
-func (lv *LatestVersion) Validate() error {
- if !validators.IsProductNameValid(lv.Product.Name) {
- return fmt.Errorf("invalid product name: %q", lv.Product.Name)
- }
- if !validators.IsBinaryNameValid(lv.Product.BinaryName()) {
- return fmt.Errorf("invalid binary name: %q", lv.Product.BinaryName())
- }
-
- return nil
-}
-
-func (lv *LatestVersion) Install(ctx context.Context) (string, error) {
- timeout := defaultTimeout
- if lv.Timeout > 0 {
- timeout = lv.Timeout
- }
- ctx, cancelFunc := context.WithTimeout(ctx, timeout)
- defer cancelFunc()
-
- // TODO: Introduce CheckWithContext to allow for cancellation
- resp, err := checkpoint.Check(&checkpoint.CheckParams{
- Product: lv.Product.Name,
- OS: runtime.GOOS,
- Arch: runtime.GOARCH,
- Force: true,
- })
- if err != nil {
- return "", err
- }
-
- latestVersion, err := version.NewVersion(resp.CurrentVersion)
- if err != nil {
- return "", err
- }
-
- if lv.pathsToRemove == nil {
- lv.pathsToRemove = make([]string, 0)
- }
-
- dstDir := lv.InstallDir
- if dstDir == "" {
- var err error
- dirName := fmt.Sprintf("%s_*", lv.Product.Name)
- dstDir, err = ioutil.TempDir("", dirName)
- if err != nil {
- return "", err
- }
- lv.pathsToRemove = append(lv.pathsToRemove, dstDir)
- lv.log().Printf("created new temp dir at %s", dstDir)
- }
- lv.log().Printf("will install into dir at %s", dstDir)
-
- rels := rjson.NewReleases()
- rels.SetLogger(lv.log())
- pv, err := rels.GetProductVersion(ctx, lv.Product.Name, latestVersion)
- if err != nil {
- return "", err
- }
-
- d := &rjson.Downloader{
- Logger: lv.log(),
- VerifyChecksum: !lv.SkipChecksumVerification,
- ArmoredPublicKey: pubkey.DefaultPublicKey,
- BaseURL: rels.BaseURL,
- }
- if lv.ArmoredPublicKey != "" {
- d.ArmoredPublicKey = lv.ArmoredPublicKey
- }
- zipFilePath, err := d.DownloadAndUnpack(ctx, pv, dstDir, "")
- if zipFilePath != "" {
- lv.pathsToRemove = append(lv.pathsToRemove, zipFilePath)
- }
- if err != nil {
- return "", err
- }
-
- execPath := filepath.Join(dstDir, lv.Product.BinaryName())
-
- lv.pathsToRemove = append(lv.pathsToRemove, execPath)
-
- lv.log().Printf("changing perms of %s", execPath)
- err = os.Chmod(execPath, 0o700)
- if err != nil {
- return "", err
- }
-
- return execPath, nil
-}
-
-func (lv *LatestVersion) Remove(ctx context.Context) error {
- if lv.pathsToRemove != nil {
- for _, path := range lv.pathsToRemove {
- err := os.RemoveAll(path)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
diff --git a/vendor/github.com/hashicorp/hc-install/errors/errors.go b/vendor/github.com/hashicorp/hc-install/errors/errors.go
deleted file mode 100644
index 15d51b60..00000000
--- a/vendor/github.com/hashicorp/hc-install/errors/errors.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package errors
-
-type skippableErr struct {
- Err error
-}
-
-func (e skippableErr) Error() string {
- return e.Err.Error()
-}
-
-func SkippableErr(err error) skippableErr {
- return skippableErr{Err: err}
-}
-
-func IsErrorSkippable(err error) bool {
- _, ok := err.(skippableErr)
- return ok
-}
diff --git a/vendor/github.com/hashicorp/hc-install/fs/any_version.go b/vendor/github.com/hashicorp/hc-install/fs/any_version.go
deleted file mode 100644
index 8071dfcf..00000000
--- a/vendor/github.com/hashicorp/hc-install/fs/any_version.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package fs
-
-import (
- "context"
- "fmt"
- "log"
- "path/filepath"
-
- "github.com/hashicorp/hc-install/errors"
- "github.com/hashicorp/hc-install/internal/src"
- "github.com/hashicorp/hc-install/internal/validators"
- "github.com/hashicorp/hc-install/product"
-)
-
-// AnyVersion finds an executable binary of any version
-// either defined by ExactBinPath, or as part of Product.
-//
-// When ExactBinPath is used, the source is skipped when
-// the binary is not found or accessible/executable.
-//
-// When Product is used, binary name is looked up within system $PATH
-// and any declared ExtraPaths (which are *appended* to
-// any directories in $PATH). Source is skipped if no binary
-// is found or accessible/executable.
-type AnyVersion struct {
- // Product represents the product (its binary name to look up),
- // conflicts with ExactBinPath
- Product *product.Product
-
- // ExtraPaths represents additional dir paths to be appended to
- // the default system $PATH, conflicts with ExactBinPath
- ExtraPaths []string
-
- // ExactBinPath represents exact path to the binary,
- // conflicts with Product and ExtraPaths
- ExactBinPath string
-
- logger *log.Logger
-}
-
-func (*AnyVersion) IsSourceImpl() src.InstallSrcSigil {
- return src.InstallSrcSigil{}
-}
-
-func (av *AnyVersion) Validate() error {
- if av.ExactBinPath == "" && av.Product == nil {
- return fmt.Errorf("must use either ExactBinPath or Product + ExtraPaths")
- }
- if av.ExactBinPath != "" && (av.Product != nil || len(av.ExtraPaths) > 0) {
- return fmt.Errorf("use either ExactBinPath or Product + ExtraPaths, not both")
- }
- if av.ExactBinPath != "" && !filepath.IsAbs(av.ExactBinPath) {
- return fmt.Errorf("expected ExactBinPath (%q) to be an absolute path", av.ExactBinPath)
- }
- if av.Product != nil && !validators.IsBinaryNameValid(av.Product.BinaryName()) {
- return fmt.Errorf("invalid binary name: %q", av.Product.BinaryName())
- }
- return nil
-}
-
-func (av *AnyVersion) SetLogger(logger *log.Logger) {
- av.logger = logger
-}
-
-func (av *AnyVersion) log() *log.Logger {
- if av.logger == nil {
- return discardLogger
- }
- return av.logger
-}
-
-func (av *AnyVersion) Find(ctx context.Context) (string, error) {
- if av.ExactBinPath != "" {
- err := checkExecutable(av.ExactBinPath)
- if err != nil {
- return "", errors.SkippableErr(err)
- }
-
- return av.ExactBinPath, nil
- }
-
- execPath, err := findFile(lookupDirs(av.ExtraPaths), av.Product.BinaryName(), checkExecutable)
- if err != nil {
- return "", errors.SkippableErr(err)
- }
-
- if !filepath.IsAbs(execPath) {
- var err error
- execPath, err = filepath.Abs(execPath)
- if err != nil {
- return "", errors.SkippableErr(err)
- }
- }
- return execPath, nil
-}
diff --git a/vendor/github.com/hashicorp/hc-install/fs/exact_version.go b/vendor/github.com/hashicorp/hc-install/fs/exact_version.go
deleted file mode 100644
index c3cc49bf..00000000
--- a/vendor/github.com/hashicorp/hc-install/fs/exact_version.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package fs
-
-import (
- "context"
- "fmt"
- "log"
- "path/filepath"
- "time"
-
- "github.com/hashicorp/go-version"
- "github.com/hashicorp/hc-install/errors"
- "github.com/hashicorp/hc-install/internal/src"
- "github.com/hashicorp/hc-install/internal/validators"
- "github.com/hashicorp/hc-install/product"
-)
-
-// ExactVersion finds the first executable binary of the product name
-// which matches the Version within system $PATH and any declared ExtraPaths
-// (which are *appended* to any directories in $PATH)
-type ExactVersion struct {
- Product product.Product
- Version *version.Version
- ExtraPaths []string
- Timeout time.Duration
-
- logger *log.Logger
-}
-
-func (*ExactVersion) IsSourceImpl() src.InstallSrcSigil {
- return src.InstallSrcSigil{}
-}
-
-func (ev *ExactVersion) SetLogger(logger *log.Logger) {
- ev.logger = logger
-}
-
-func (ev *ExactVersion) log() *log.Logger {
- if ev.logger == nil {
- return discardLogger
- }
- return ev.logger
-}
-
-func (ev *ExactVersion) Validate() error {
- if !validators.IsBinaryNameValid(ev.Product.BinaryName()) {
- return fmt.Errorf("invalid binary name: %q", ev.Product.BinaryName())
- }
- if ev.Version == nil {
- return fmt.Errorf("undeclared version")
- }
- if ev.Product.GetVersion == nil {
- return fmt.Errorf("undeclared version getter")
- }
- return nil
-}
-
-func (ev *ExactVersion) Find(ctx context.Context) (string, error) {
- timeout := defaultTimeout
- if ev.Timeout > 0 {
- timeout = ev.Timeout
- }
- ctx, cancelFunc := context.WithTimeout(ctx, timeout)
- defer cancelFunc()
-
- execPath, err := findFile(lookupDirs(ev.ExtraPaths), ev.Product.BinaryName(), func(file string) error {
- err := checkExecutable(file)
- if err != nil {
- return err
- }
-
- v, err := ev.Product.GetVersion(ctx, file)
- if err != nil {
- return err
- }
-
- if !ev.Version.Equal(v) {
- return fmt.Errorf("version (%s) doesn't match %s", v, ev.Version)
- }
-
- return nil
- })
- if err != nil {
- return "", errors.SkippableErr(err)
- }
-
- if !filepath.IsAbs(execPath) {
- var err error
- execPath, err = filepath.Abs(execPath)
- if err != nil {
- return "", errors.SkippableErr(err)
- }
- }
-
- return execPath, nil
-}
diff --git a/vendor/github.com/hashicorp/hc-install/fs/fs.go b/vendor/github.com/hashicorp/hc-install/fs/fs.go
deleted file mode 100644
index 216df2c2..00000000
--- a/vendor/github.com/hashicorp/hc-install/fs/fs.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package fs
-
-import (
- "io/ioutil"
- "log"
- "time"
-)
-
-var (
- defaultTimeout = 10 * time.Second
- discardLogger = log.New(ioutil.Discard, "", 0)
-)
-
-type fileCheckFunc func(path string) error
diff --git a/vendor/github.com/hashicorp/hc-install/fs/fs_unix.go b/vendor/github.com/hashicorp/hc-install/fs/fs_unix.go
deleted file mode 100644
index eebd98b8..00000000
--- a/vendor/github.com/hashicorp/hc-install/fs/fs_unix.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-//go:build !windows
-// +build !windows
-
-package fs
-
-import (
- "fmt"
- "os"
- "os/exec"
- "path/filepath"
-)
-
-func lookupDirs(extraDirs []string) []string {
- pathVar := os.Getenv("PATH")
- dirs := filepath.SplitList(pathVar)
- for _, ep := range extraDirs {
- dirs = append(dirs, ep)
- }
- return dirs
-}
-
-func findFile(dirs []string, file string, f fileCheckFunc) (string, error) {
- for _, dir := range dirs {
- if dir == "" {
- // Unix shell semantics: path element "" means "."
- dir = "."
- }
- path := filepath.Join(dir, file)
- if err := f(path); err == nil {
- return path, nil
- }
- }
- return "", fmt.Errorf("%s: %w", file, exec.ErrNotFound)
-}
-
-func checkExecutable(file string) error {
- d, err := os.Stat(file)
- if err != nil {
- return err
- }
- if m := d.Mode(); !m.IsDir() && m&0111 != 0 {
- return nil
- }
- return os.ErrPermission
-}
diff --git a/vendor/github.com/hashicorp/hc-install/fs/fs_windows.go b/vendor/github.com/hashicorp/hc-install/fs/fs_windows.go
deleted file mode 100644
index e2e4e73f..00000000
--- a/vendor/github.com/hashicorp/hc-install/fs/fs_windows.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package fs
-
-import (
- "fmt"
- "io/fs"
- "os"
- "os/exec"
- "path/filepath"
- "strings"
-)
-
-func lookupDirs(extraDirs []string) []string {
- pathVar := os.Getenv("path")
- dirs := filepath.SplitList(pathVar)
- for _, ep := range extraDirs {
- dirs = append(dirs, ep)
- }
- return dirs
-}
-
-func findFile(dirs []string, file string, f fileCheckFunc) (string, error) {
- for _, dir := range dirs {
- path := filepath.Join(dir, file)
- if err := f(path); err == nil {
- return path, nil
- }
- }
- return "", fmt.Errorf("%s: %w", file, exec.ErrNotFound)
-}
-
-func checkExecutable(file string) error {
- var exts []string
- x := os.Getenv(`PATHEXT`)
- if x != "" {
- for _, e := range strings.Split(strings.ToLower(x), `;`) {
- if e == "" {
- continue
- }
- if e[0] != '.' {
- e = "." + e
- }
- exts = append(exts, e)
- }
- } else {
- exts = []string{".com", ".exe", ".bat", ".cmd"}
- }
-
- if len(exts) == 0 {
- return chkStat(file)
- }
- if hasExt(file) {
- if chkStat(file) == nil {
- return nil
- }
- }
- for _, e := range exts {
- if f := file + e; chkStat(f) == nil {
- return nil
- }
- }
- return fs.ErrNotExist
-}
-
-func chkStat(file string) error {
- d, err := os.Stat(file)
- if err != nil {
- return err
- }
- if d.IsDir() {
- return fs.ErrPermission
- }
- return nil
-}
-
-func hasExt(file string) bool {
- i := strings.LastIndex(file, ".")
- if i < 0 {
- return false
- }
- return strings.LastIndexAny(file, `:\/`) < i
-}
diff --git a/vendor/github.com/hashicorp/hc-install/fs/version.go b/vendor/github.com/hashicorp/hc-install/fs/version.go
deleted file mode 100644
index 39efb52d..00000000
--- a/vendor/github.com/hashicorp/hc-install/fs/version.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package fs
-
-import (
- "context"
- "fmt"
- "log"
- "path/filepath"
- "time"
-
- "github.com/hashicorp/go-version"
- "github.com/hashicorp/hc-install/errors"
- "github.com/hashicorp/hc-install/internal/src"
- "github.com/hashicorp/hc-install/internal/validators"
- "github.com/hashicorp/hc-install/product"
-)
-
-// Version finds the first executable binary of the product name
-// which matches the version constraint within system $PATH and any declared ExtraPaths
-// (which are *appended* to any directories in $PATH)
-type Version struct {
- Product product.Product
- Constraints version.Constraints
- ExtraPaths []string
- Timeout time.Duration
-
- logger *log.Logger
-}
-
-func (*Version) IsSourceImpl() src.InstallSrcSigil {
- return src.InstallSrcSigil{}
-}
-
-func (v *Version) SetLogger(logger *log.Logger) {
- v.logger = logger
-}
-
-func (v *Version) log() *log.Logger {
- if v.logger == nil {
- return discardLogger
- }
- return v.logger
-}
-
-func (v *Version) Validate() error {
- if !validators.IsBinaryNameValid(v.Product.BinaryName()) {
- return fmt.Errorf("invalid binary name: %q", v.Product.BinaryName())
- }
- if len(v.Constraints) == 0 {
- return fmt.Errorf("undeclared version constraints")
- }
- if v.Product.GetVersion == nil {
- return fmt.Errorf("undeclared version getter")
- }
- return nil
-}
-
-func (v *Version) Find(ctx context.Context) (string, error) {
- timeout := defaultTimeout
- if v.Timeout > 0 {
- timeout = v.Timeout
- }
- ctx, cancelFunc := context.WithTimeout(ctx, timeout)
- defer cancelFunc()
-
- execPath, err := findFile(lookupDirs(v.ExtraPaths), v.Product.BinaryName(), func(file string) error {
- err := checkExecutable(file)
- if err != nil {
- return err
- }
-
- ver, err := v.Product.GetVersion(ctx, file)
- if err != nil {
- return err
- }
-
- for _, vc := range v.Constraints {
- if !vc.Check(ver) {
- return fmt.Errorf("version (%s) doesn't meet constraints %s", ver, vc.String())
- }
- }
-
- return nil
- })
- if err != nil {
- return "", errors.SkippableErr(err)
- }
-
- if !filepath.IsAbs(execPath) {
- var err error
- execPath, err = filepath.Abs(execPath)
- if err != nil {
- return "", errors.SkippableErr(err)
- }
- }
-
- return execPath, nil
-}
diff --git a/vendor/github.com/hashicorp/hc-install/installer.go b/vendor/github.com/hashicorp/hc-install/installer.go
deleted file mode 100644
index 6c704eed..00000000
--- a/vendor/github.com/hashicorp/hc-install/installer.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package install
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "log"
-
- "github.com/hashicorp/go-multierror"
- "github.com/hashicorp/hc-install/errors"
- "github.com/hashicorp/hc-install/src"
-)
-
-type Installer struct {
- logger *log.Logger
-
- removableSources []src.Removable
-}
-
-type RemoveFunc func(ctx context.Context) error
-
-func NewInstaller() *Installer {
- discardLogger := log.New(ioutil.Discard, "", 0)
- return &Installer{
- logger: discardLogger,
- }
-}
-
-func (i *Installer) SetLogger(logger *log.Logger) {
- i.logger = logger
-}
-
-func (i *Installer) Ensure(ctx context.Context, sources []src.Source) (string, error) {
- var errs *multierror.Error
-
- for _, source := range sources {
- if srcWithLogger, ok := source.(src.LoggerSettable); ok {
- srcWithLogger.SetLogger(i.logger)
- }
-
- if srcValidatable, ok := source.(src.Validatable); ok {
- err := srcValidatable.Validate()
- if err != nil {
- errs = multierror.Append(errs, err)
- }
- }
- }
-
- if errs.ErrorOrNil() != nil {
- return "", errs
- }
-
- i.removableSources = make([]src.Removable, 0)
-
- for _, source := range sources {
- if s, ok := source.(src.Removable); ok {
- i.removableSources = append(i.removableSources, s)
- }
-
- switch s := source.(type) {
- case src.Findable:
- execPath, err := s.Find(ctx)
- if err != nil {
- if errors.IsErrorSkippable(err) {
- errs = multierror.Append(errs, err)
- continue
- }
- return "", err
- }
-
- return execPath, nil
- case src.Installable:
- execPath, err := s.Install(ctx)
- if err != nil {
- if errors.IsErrorSkippable(err) {
- errs = multierror.Append(errs, err)
- continue
- }
- return "", err
- }
-
- return execPath, nil
- case src.Buildable:
- execPath, err := s.Build(ctx)
- if err != nil {
- if errors.IsErrorSkippable(err) {
- errs = multierror.Append(errs, err)
- continue
- }
- return "", err
- }
-
- return execPath, nil
- default:
- return "", fmt.Errorf("unknown source: %T", s)
- }
- }
-
- return "", fmt.Errorf("unable to find, install, or build from %d sources: %s",
- len(sources), errs.ErrorOrNil())
-}
-
-func (i *Installer) Install(ctx context.Context, sources []src.Installable) (string, error) {
- var errs *multierror.Error
-
- i.removableSources = make([]src.Removable, 0)
-
- for _, source := range sources {
- if srcWithLogger, ok := source.(src.LoggerSettable); ok {
- srcWithLogger.SetLogger(i.logger)
- }
-
- if srcValidatable, ok := source.(src.Validatable); ok {
- err := srcValidatable.Validate()
- if err != nil {
- errs = multierror.Append(errs, err)
- continue
- }
- }
-
- if s, ok := source.(src.Removable); ok {
- i.removableSources = append(i.removableSources, s)
- }
-
- execPath, err := source.Install(ctx)
- if err != nil {
- if errors.IsErrorSkippable(err) {
- errs = multierror.Append(errs, err)
- continue
- }
- return "", err
- }
-
- return execPath, nil
- }
-
- return "", fmt.Errorf("unable install from %d sources: %s",
- len(sources), errs.ErrorOrNil())
-}
-
-func (i *Installer) Remove(ctx context.Context) error {
- var errs *multierror.Error
-
- if i.removableSources != nil {
- for _, rs := range i.removableSources {
- err := rs.Remove(ctx)
- if err != nil {
- errs = multierror.Append(errs, err)
- }
- }
- }
-
- return errs.ErrorOrNil()
-}
diff --git a/vendor/github.com/hashicorp/hc-install/internal/build/get_go_version.go b/vendor/github.com/hashicorp/hc-install/internal/build/get_go_version.go
deleted file mode 100644
index 858f8ab2..00000000
--- a/vendor/github.com/hashicorp/hc-install/internal/build/get_go_version.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package build
-
-import (
- "context"
- "fmt"
- "os/exec"
- "regexp"
- "strings"
-
- "github.com/hashicorp/go-version"
-)
-
-// GetGoVersion obtains version of locally installed Go via "go version"
-func GetGoVersion(ctx context.Context) (*version.Version, error) {
- cmd := exec.CommandContext(ctx, "go", "version")
- out, err := cmd.CombinedOutput()
- if err != nil {
- return nil, fmt.Errorf("unable to build: %w\n%s", err, out)
- }
-
- output := strings.TrimSpace(string(out))
-
- // e.g. "go version go1.15"
- re := regexp.MustCompile(`^go version go([0-9.]+)\s+`)
- matches := re.FindStringSubmatch(output)
- if len(matches) != 2 {
- return nil, fmt.Errorf("unexpected go version output: %q", output)
- }
-
- rawGoVersion := matches[1]
- v, err := version.NewVersion(rawGoVersion)
- if err != nil {
- return nil, fmt.Errorf("unexpected go version output: %w", err)
- }
-
- return v, nil
-}
diff --git a/vendor/github.com/hashicorp/hc-install/internal/build/go_build.go b/vendor/github.com/hashicorp/hc-install/internal/build/go_build.go
deleted file mode 100644
index 504bf45a..00000000
--- a/vendor/github.com/hashicorp/hc-install/internal/build/go_build.go
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package build
-
-import (
- "bytes"
- "context"
- "fmt"
- "io/ioutil"
- "log"
- "os"
- "os/exec"
- "path/filepath"
-
- "github.com/hashicorp/go-version"
- "golang.org/x/mod/modfile"
-)
-
-var discardLogger = log.New(ioutil.Discard, "", 0)
-
-// GoBuild represents a Go builder (to run "go build")
-type GoBuild struct {
- Version *version.Version
- DetectVendoring bool
-
- pathToRemove string
- logger *log.Logger
-}
-
-func (gb *GoBuild) SetLogger(logger *log.Logger) {
- gb.logger = logger
-}
-
-func (gb *GoBuild) log() *log.Logger {
- if gb.logger == nil {
- return discardLogger
- }
- return gb.logger
-}
-
-// Build runs "go build" within a given repo to produce binaryName in targetDir
-func (gb *GoBuild) Build(ctx context.Context, repoDir, targetDir, binaryName string) (string, error) {
- reqGo, err := gb.ensureRequiredGoVersion(ctx, repoDir)
- if err != nil {
- return "", err
- }
- defer reqGo.CleanupFunc(ctx)
-
- if reqGo.Version == nil {
- gb.logger.Println("building using default available Go")
- } else {
- gb.logger.Printf("building using Go %s", reqGo.Version)
- }
-
- // `go build` would download dependencies as a side effect, but we attempt
- // to do it early in a separate step, such that we can easily distinguish
- // network failures from build failures.
- //
- // Note, that `go mod download` was introduced in Go 1.11
- // See https://github.com/golang/go/commit/9f4ea6c2
- minGoVersion := version.Must(version.NewVersion("1.11"))
- if reqGo.Version.GreaterThanOrEqual(minGoVersion) {
- downloadArgs := []string{"mod", "download"}
- gb.log().Printf("executing %s %q in %q", reqGo.Cmd, downloadArgs, repoDir)
- cmd := exec.CommandContext(ctx, reqGo.Cmd, downloadArgs...)
- cmd.Dir = repoDir
- out, err := cmd.CombinedOutput()
- if err != nil {
- return "", fmt.Errorf("unable to download dependencies: %w\n%s", err, out)
- }
- }
-
- buildArgs := []string{"build", "-o", filepath.Join(targetDir, binaryName)}
-
- if gb.DetectVendoring {
- vendorDir := filepath.Join(repoDir, "vendor")
- if fi, err := os.Stat(vendorDir); err == nil && fi.IsDir() {
- buildArgs = append(buildArgs, "-mod", "vendor")
- }
- }
-
- gb.log().Printf("executing %s %q in %q", reqGo.Cmd, buildArgs, repoDir)
- cmd := exec.CommandContext(ctx, reqGo.Cmd, buildArgs...)
- cmd.Dir = repoDir
- out, err := cmd.CombinedOutput()
- if err != nil {
- return "", fmt.Errorf("unable to build: %w\n%s", err, out)
- }
-
- binPath := filepath.Join(targetDir, binaryName)
-
- gb.pathToRemove = binPath
-
- return binPath, nil
-}
-
-func (gb *GoBuild) Remove(ctx context.Context) error {
- return os.RemoveAll(gb.pathToRemove)
-}
-
-type Go struct {
- Cmd string
- CleanupFunc CleanupFunc
- Version *version.Version
-}
-
-func (gb *GoBuild) ensureRequiredGoVersion(ctx context.Context, repoDir string) (Go, error) {
- cmdName := "go"
- noopCleanupFunc := func(context.Context) {}
-
- var installedVersion *version.Version
-
- if gb.Version != nil {
- gb.logger.Printf("attempting to satisfy explicit requirement for Go %s", gb.Version)
- goVersion, err := GetGoVersion(ctx)
- if err != nil {
- return Go{
- Cmd: cmdName,
- CleanupFunc: noopCleanupFunc,
- }, err
- }
-
- if !goVersion.GreaterThanOrEqual(gb.Version) {
- // found incompatible version, try downloading the desired one
- return gb.installGoVersion(ctx, gb.Version)
- }
- installedVersion = goVersion
- }
-
- if requiredVersion, ok := guessRequiredGoVersion(repoDir); ok {
- gb.logger.Printf("attempting to satisfy guessed Go requirement %s", requiredVersion)
- goVersion, err := GetGoVersion(ctx)
- if err != nil {
- return Go{
- Cmd: cmdName,
- CleanupFunc: noopCleanupFunc,
- }, err
- }
-
- if !goVersion.GreaterThanOrEqual(requiredVersion) {
- // found incompatible version, try downloading the desired one
- return gb.installGoVersion(ctx, requiredVersion)
- }
- installedVersion = goVersion
- } else {
- gb.logger.Println("unable to guess Go requirement")
- }
-
- return Go{
- Cmd: cmdName,
- CleanupFunc: noopCleanupFunc,
- Version: installedVersion,
- }, nil
-}
-
-// CleanupFunc represents a function to be called once Go is no longer needed
-// e.g. to remove any version installed temporarily per requirements
-type CleanupFunc func(context.Context)
-
-func guessRequiredGoVersion(repoDir string) (*version.Version, bool) {
- goEnvFile := filepath.Join(repoDir, ".go-version")
- if fi, err := os.Stat(goEnvFile); err == nil && !fi.IsDir() {
- b, err := ioutil.ReadFile(goEnvFile)
- if err != nil {
- return nil, false
- }
- requiredVersion, err := version.NewVersion(string(bytes.TrimSpace(b)))
- if err != nil {
- return nil, false
- }
- return requiredVersion, true
- }
-
- goModFile := filepath.Join(repoDir, "go.mod")
- if fi, err := os.Stat(goModFile); err == nil && !fi.IsDir() {
- b, err := ioutil.ReadFile(goModFile)
- if err != nil {
- return nil, false
- }
- f, err := modfile.ParseLax(fi.Name(), b, nil)
- if err != nil {
- return nil, false
- }
- if f.Go == nil {
- return nil, false
- }
- requiredVersion, err := version.NewVersion(f.Go.Version)
- if err != nil {
- return nil, false
- }
- return requiredVersion, true
- }
-
- return nil, false
-}
diff --git a/vendor/github.com/hashicorp/hc-install/internal/build/go_is_installed.go b/vendor/github.com/hashicorp/hc-install/internal/build/go_is_installed.go
deleted file mode 100644
index 00165fff..00000000
--- a/vendor/github.com/hashicorp/hc-install/internal/build/go_is_installed.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package build
-
-import (
- "context"
- "fmt"
-
- "github.com/hashicorp/go-version"
-)
-
-// GoIsInstalled represents a checker of whether Go is installed locally
-type GoIsInstalled struct {
- RequiredVersion version.Constraints
-}
-
-// Check checks whether any Go version is installed locally
-func (gii *GoIsInstalled) Check(ctx context.Context) error {
- goVersion, err := GetGoVersion(ctx)
- if err != nil {
- return err
- }
-
- if gii.RequiredVersion != nil && !gii.RequiredVersion.Check(goVersion) {
- return fmt.Errorf("go %s required (%s available)",
- gii.RequiredVersion, goVersion)
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/hc-install/internal/build/install_go_version.go b/vendor/github.com/hashicorp/hc-install/internal/build/install_go_version.go
deleted file mode 100644
index 9dc070d7..00000000
--- a/vendor/github.com/hashicorp/hc-install/internal/build/install_go_version.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package build
-
-import (
- "context"
- "fmt"
- "os"
- "os/exec"
- "strings"
-
- "github.com/hashicorp/go-version"
-)
-
-var v1_21 = version.Must(version.NewVersion("1.21"))
-
-// installGoVersion installs given version of Go using Go
-// according to https://golang.org/doc/manage-install
-func (gb *GoBuild) installGoVersion(ctx context.Context, v *version.Version) (Go, error) {
- goVersion := v.String()
-
- // trim 0 patch versions as that's how Go does it
- // for versions prior to 1.21
- // See https://github.com/golang/go/issues/62136
- if v.LessThan(v1_21) {
- versionString := v.Core().String()
- goVersion = strings.TrimSuffix(versionString, ".0")
- }
- pkgURL := fmt.Sprintf("golang.org/dl/go%s", goVersion)
-
- gb.log().Printf("go getting %q", pkgURL)
- cmd := exec.CommandContext(ctx, "go", "get", pkgURL)
- out, err := cmd.CombinedOutput()
- if err != nil {
- return Go{}, fmt.Errorf("unable to get Go %s: %w\n%s", v, err, out)
- }
-
- gb.log().Printf("go installing %q", pkgURL)
- cmd = exec.CommandContext(ctx, "go", "install", pkgURL)
- out, err = cmd.CombinedOutput()
- if err != nil {
- return Go{}, fmt.Errorf("unable to install Go %s: %w\n%s", v, err, out)
- }
-
- cmdName := fmt.Sprintf("go%s", goVersion)
-
- gb.log().Printf("downloading go %q", v)
- cmd = exec.CommandContext(ctx, cmdName, "download")
- out, err = cmd.CombinedOutput()
- if err != nil {
- return Go{}, fmt.Errorf("unable to download Go %s: %w\n%s", v, err, out)
- }
- gb.log().Printf("download of go %q finished", v)
-
- cleanupFunc := func(ctx context.Context) {
- cmd = exec.CommandContext(ctx, cmdName, "env", "GOROOT")
- out, err = cmd.CombinedOutput()
- if err != nil {
- return
- }
- rootPath := strings.TrimSpace(string(out))
-
- // run some extra checks before deleting, just to be sure
- if rootPath != "" && strings.HasSuffix(rootPath, v.String()) {
- os.RemoveAll(rootPath)
- }
- }
-
- return Go{
- Cmd: cmdName,
- CleanupFunc: cleanupFunc,
- Version: v,
- }, nil
-}
diff --git a/vendor/github.com/hashicorp/hc-install/internal/httpclient/httpclient.go b/vendor/github.com/hashicorp/hc-install/internal/httpclient/httpclient.go
deleted file mode 100644
index a9503dfd..00000000
--- a/vendor/github.com/hashicorp/hc-install/internal/httpclient/httpclient.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package httpclient
-
-import (
- "fmt"
- "net/http"
-
- "github.com/hashicorp/go-cleanhttp"
- "github.com/hashicorp/hc-install/version"
-)
-
-// NewHTTPClient provides a pre-configured http.Client
-// e.g. with relevant User-Agent header
-func NewHTTPClient() *http.Client {
- client := cleanhttp.DefaultClient()
-
- userAgent := fmt.Sprintf("hc-install/%s", version.Version())
-
- cli := cleanhttp.DefaultPooledClient()
- cli.Transport = &userAgentRoundTripper{
- userAgent: userAgent,
- inner: cli.Transport,
- }
-
- return client
-}
-
-type userAgentRoundTripper struct {
- inner http.RoundTripper
- userAgent string
-}
-
-func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
- if _, ok := req.Header["User-Agent"]; !ok {
- req.Header.Set("User-Agent", rt.userAgent)
- }
- return rt.inner.RoundTrip(req)
-}
diff --git a/vendor/github.com/hashicorp/hc-install/internal/pubkey/pubkey.go b/vendor/github.com/hashicorp/hc-install/internal/pubkey/pubkey.go
deleted file mode 100644
index d06f1045..00000000
--- a/vendor/github.com/hashicorp/hc-install/internal/pubkey/pubkey.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package pubkey
-
-const (
- // See https://www.hashicorp.com/security
- DefaultPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
-
-mQINBGB9+xkBEACabYZOWKmgZsHTdRDiyPJxhbuUiKX65GUWkyRMJKi/1dviVxOX
-PG6hBPtF48IFnVgxKpIb7G6NjBousAV+CuLlv5yqFKpOZEGC6sBV+Gx8Vu1CICpl
-Zm+HpQPcIzwBpN+Ar4l/exCG/f/MZq/oxGgH+TyRF3XcYDjG8dbJCpHO5nQ5Cy9h
-QIp3/Bh09kET6lk+4QlofNgHKVT2epV8iK1cXlbQe2tZtfCUtxk+pxvU0UHXp+AB
-0xc3/gIhjZp/dePmCOyQyGPJbp5bpO4UeAJ6frqhexmNlaw9Z897ltZmRLGq1p4a
-RnWL8FPkBz9SCSKXS8uNyV5oMNVn4G1obCkc106iWuKBTibffYQzq5TG8FYVJKrh
-RwWB6piacEB8hl20IIWSxIM3J9tT7CPSnk5RYYCTRHgA5OOrqZhC7JefudrP8n+M
-pxkDgNORDu7GCfAuisrf7dXYjLsxG4tu22DBJJC0c/IpRpXDnOuJN1Q5e/3VUKKW
-mypNumuQpP5lc1ZFG64TRzb1HR6oIdHfbrVQfdiQXpvdcFx+Fl57WuUraXRV6qfb
-4ZmKHX1JEwM/7tu21QE4F1dz0jroLSricZxfaCTHHWNfvGJoZ30/MZUrpSC0IfB3
-iQutxbZrwIlTBt+fGLtm3vDtwMFNWM+Rb1lrOxEQd2eijdxhvBOHtlIcswARAQAB
-tERIYXNoaUNvcnAgU2VjdXJpdHkgKGhhc2hpY29ycC5jb20vc2VjdXJpdHkpIDxz
-ZWN1cml0eUBoYXNoaWNvcnAuY29tPokCVAQTAQoAPhYhBMh0AR8KtAURDQIQVTQ2
-XZRy10aPBQJgffsZAhsDBQkJZgGABQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJ
-EDQ2XZRy10aPtpcP/0PhJKiHtC1zREpRTrjGizoyk4Sl2SXpBZYhkdrG++abo6zs
-buaAG7kgWWChVXBo5E20L7dbstFK7OjVs7vAg/OLgO9dPD8n2M19rpqSbbvKYWvp
-0NSgvFTT7lbyDhtPj0/bzpkZEhmvQaDWGBsbDdb2dBHGitCXhGMpdP0BuuPWEix+
-QnUMaPwU51q9GM2guL45Tgks9EKNnpDR6ZdCeWcqo1IDmklloidxT8aKL21UOb8t
-cD+Bg8iPaAr73bW7Jh8TdcV6s6DBFub+xPJEB/0bVPmq3ZHs5B4NItroZ3r+h3ke
-VDoSOSIZLl6JtVooOJ2la9ZuMqxchO3mrXLlXxVCo6cGcSuOmOdQSz4OhQE5zBxx
-LuzA5ASIjASSeNZaRnffLIHmht17BPslgNPtm6ufyOk02P5XXwa69UCjA3RYrA2P
-QNNC+OWZ8qQLnzGldqE4MnRNAxRxV6cFNzv14ooKf7+k686LdZrP/3fQu2p3k5rY
-0xQUXKh1uwMUMtGR867ZBYaxYvwqDrg9XB7xi3N6aNyNQ+r7zI2lt65lzwG1v9hg
-FG2AHrDlBkQi/t3wiTS3JOo/GCT8BjN0nJh0lGaRFtQv2cXOQGVRW8+V/9IpqEJ1
-qQreftdBFWxvH7VJq2mSOXUJyRsoUrjkUuIivaA9Ocdipk2CkP8bpuGz7ZF4uQIN
-BGB9+xkBEACoklYsfvWRCjOwS8TOKBTfl8myuP9V9uBNbyHufzNETbhYeT33Cj0M
-GCNd9GdoaknzBQLbQVSQogA+spqVvQPz1MND18GIdtmr0BXENiZE7SRvu76jNqLp
-KxYALoK2Pc3yK0JGD30HcIIgx+lOofrVPA2dfVPTj1wXvm0rbSGA4Wd4Ng3d2AoR
-G/wZDAQ7sdZi1A9hhfugTFZwfqR3XAYCk+PUeoFrkJ0O7wngaon+6x2GJVedVPOs
-2x/XOR4l9ytFP3o+5ILhVnsK+ESVD9AQz2fhDEU6RhvzaqtHe+sQccR3oVLoGcat
-ma5rbfzH0Fhj0JtkbP7WreQf9udYgXxVJKXLQFQgel34egEGG+NlbGSPG+qHOZtY
-4uWdlDSvmo+1P95P4VG/EBteqyBbDDGDGiMs6lAMg2cULrwOsbxWjsWka8y2IN3z
-1stlIJFvW2kggU+bKnQ+sNQnclq3wzCJjeDBfucR3a5WRojDtGoJP6Fc3luUtS7V
-5TAdOx4dhaMFU9+01OoH8ZdTRiHZ1K7RFeAIslSyd4iA/xkhOhHq89F4ECQf3Bt4
-ZhGsXDTaA/VgHmf3AULbrC94O7HNqOvTWzwGiWHLfcxXQsr+ijIEQvh6rHKmJK8R
-9NMHqc3L18eMO6bqrzEHW0Xoiu9W8Yj+WuB3IKdhclT3w0pO4Pj8gQARAQABiQI8
-BBgBCgAmFiEEyHQBHwq0BRENAhBVNDZdlHLXRo8FAmB9+xkCGwwFCQlmAYAACgkQ
-NDZdlHLXRo9ZnA/7BmdpQLeTjEiXEJyW46efxlV1f6THn9U50GWcE9tebxCXgmQf
-u+Uju4hreltx6GDi/zbVVV3HCa0yaJ4JVvA4LBULJVe3ym6tXXSYaOfMdkiK6P1v
-JgfpBQ/b/mWB0yuWTUtWx18BQQwlNEQWcGe8n1lBbYsH9g7QkacRNb8tKUrUbWlQ
-QsU8wuFgly22m+Va1nO2N5C/eE/ZEHyN15jEQ+QwgQgPrK2wThcOMyNMQX/VNEr1
-Y3bI2wHfZFjotmek3d7ZfP2VjyDudnmCPQ5xjezWpKbN1kvjO3as2yhcVKfnvQI5
-P5Frj19NgMIGAp7X6pF5Csr4FX/Vw316+AFJd9Ibhfud79HAylvFydpcYbvZpScl
-7zgtgaXMCVtthe3GsG4gO7IdxxEBZ/Fm4NLnmbzCIWOsPMx/FxH06a539xFq/1E2
-1nYFjiKg8a5JFmYU/4mV9MQs4bP/3ip9byi10V+fEIfp5cEEmfNeVeW5E7J8PqG9
-t4rLJ8FR4yJgQUa2gs2SNYsjWQuwS/MJvAv4fDKlkQjQmYRAOp1SszAnyaplvri4
-ncmfDsf0r65/sd6S40g5lHH8LIbGxcOIN6kwthSTPWX89r42CbY8GzjTkaeejNKx
-v1aCrO58wAtursO1DiXCvBY7+NdafMRnoHwBk50iPqrVkNA8fv+auRyB2/G5Ag0E
-YH3+JQEQALivllTjMolxUW2OxrXb+a2Pt6vjCBsiJzrUj0Pa63U+lT9jldbCCfgP
-wDpcDuO1O05Q8k1MoYZ6HddjWnqKG7S3eqkV5c3ct3amAXp513QDKZUfIDylOmhU
-qvxjEgvGjdRjz6kECFGYr6Vnj/p6AwWv4/FBRFlrq7cnQgPynbIH4hrWvewp3Tqw
-GVgqm5RRofuAugi8iZQVlAiQZJo88yaztAQ/7VsXBiHTn61ugQ8bKdAsr8w/ZZU5
-HScHLqRolcYg0cKN91c0EbJq9k1LUC//CakPB9mhi5+aUVUGusIM8ECShUEgSTCi
-KQiJUPZ2CFbbPE9L5o9xoPCxjXoX+r7L/WyoCPTeoS3YRUMEnWKvc42Yxz3meRb+
-BmaqgbheNmzOah5nMwPupJYmHrjWPkX7oyyHxLSFw4dtoP2j6Z7GdRXKa2dUYdk2
-x3JYKocrDoPHh3Q0TAZujtpdjFi1BS8pbxYFb3hHmGSdvz7T7KcqP7ChC7k2RAKO
-GiG7QQe4NX3sSMgweYpl4OwvQOn73t5CVWYp/gIBNZGsU3Pto8g27vHeWyH9mKr4
-cSepDhw+/X8FGRNdxNfpLKm7Vc0Sm9Sof8TRFrBTqX+vIQupYHRi5QQCuYaV6OVr
-ITeegNK3So4m39d6ajCR9QxRbmjnx9UcnSYYDmIB6fpBuwT0ogNtABEBAAGJBHIE
-GAEKACYCGwIWIQTIdAEfCrQFEQ0CEFU0Nl2UctdGjwUCYH4bgAUJAeFQ2wJAwXQg
-BBkBCgAdFiEEs2y6kaLAcwxDX8KAsLRBCXaFtnYFAmB9/iUACgkQsLRBCXaFtnYX
-BhAAlxejyFXoQwyGo9U+2g9N6LUb/tNtH29RHYxy4A3/ZUY7d/FMkArmh4+dfjf0
-p9MJz98Zkps20kaYP+2YzYmaizO6OA6RIddcEXQDRCPHmLts3097mJ/skx9qLAf6
-rh9J7jWeSqWO6VW6Mlx8j9m7sm3Ae1OsjOx/m7lGZOhY4UYfY627+Jf7WQ5103Qs
-lgQ09es/vhTCx0g34SYEmMW15Tc3eCjQ21b1MeJD/V26npeakV8iCZ1kHZHawPq/
-aCCuYEcCeQOOteTWvl7HXaHMhHIx7jjOd8XX9V+UxsGz2WCIxX/j7EEEc7CAxwAN
-nWp9jXeLfxYfjrUB7XQZsGCd4EHHzUyCf7iRJL7OJ3tz5Z+rOlNjSgci+ycHEccL
-YeFAEV+Fz+sj7q4cFAferkr7imY1XEI0Ji5P8p/uRYw/n8uUf7LrLw5TzHmZsTSC
-UaiL4llRzkDC6cVhYfqQWUXDd/r385OkE4oalNNE+n+txNRx92rpvXWZ5qFYfv7E
-95fltvpXc0iOugPMzyof3lwo3Xi4WZKc1CC/jEviKTQhfn3WZukuF5lbz3V1PQfI
-xFsYe9WYQmp25XGgezjXzp89C/OIcYsVB1KJAKihgbYdHyUN4fRCmOszmOUwEAKR
-3k5j4X8V5bk08sA69NVXPn2ofxyk3YYOMYWW8ouObnXoS8QJEDQ2XZRy10aPMpsQ
-AIbwX21erVqUDMPn1uONP6o4NBEq4MwG7d+fT85rc1U0RfeKBwjucAE/iStZDQoM
-ZKWvGhFR+uoyg1LrXNKuSPB82unh2bpvj4zEnJsJadiwtShTKDsikhrfFEK3aCK8
-Zuhpiu3jxMFDhpFzlxsSwaCcGJqcdwGhWUx0ZAVD2X71UCFoOXPjF9fNnpy80YNp
-flPjj2RnOZbJyBIM0sWIVMd8F44qkTASf8K5Qb47WFN5tSpePq7OCm7s8u+lYZGK
-wR18K7VliundR+5a8XAOyUXOL5UsDaQCK4Lj4lRaeFXunXl3DJ4E+7BKzZhReJL6
-EugV5eaGonA52TWtFdB8p+79wPUeI3KcdPmQ9Ll5Zi/jBemY4bzasmgKzNeMtwWP
-fk6WgrvBwptqohw71HDymGxFUnUP7XYYjic2sVKhv9AevMGycVgwWBiWroDCQ9Ja
-btKfxHhI2p+g+rcywmBobWJbZsujTNjhtme+kNn1mhJsD3bKPjKQfAxaTskBLb0V
-wgV21891TS1Dq9kdPLwoS4XNpYg2LLB4p9hmeG3fu9+OmqwY5oKXsHiWc43dei9Y
-yxZ1AAUOIaIdPkq+YG/PhlGE4YcQZ4RPpltAr0HfGgZhmXWigbGS+66pUj+Ojysc
-j0K5tCVxVu0fhhFpOlHv0LWaxCbnkgkQH9jfMEJkAWMOuQINBGCAXCYBEADW6RNr
-ZVGNXvHVBqSiOWaxl1XOiEoiHPt50Aijt25yXbG+0kHIFSoR+1g6Lh20JTCChgfQ
-kGGjzQvEuG1HTw07YhsvLc0pkjNMfu6gJqFox/ogc53mz69OxXauzUQ/TZ27GDVp
-UBu+EhDKt1s3OtA6Bjz/csop/Um7gT0+ivHyvJ/jGdnPEZv8tNuSE/Uo+hn/Q9hg
-8SbveZzo3C+U4KcabCESEFl8Gq6aRi9vAfa65oxD5jKaIz7cy+pwb0lizqlW7H9t
-Qlr3dBfdIcdzgR55hTFC5/XrcwJ6/nHVH/xGskEasnfCQX8RYKMuy0UADJy72TkZ
-bYaCx+XXIcVB8GTOmJVoAhrTSSVLAZspfCnjwnSxisDn3ZzsYrq3cV6sU8b+QlIX
-7VAjurE+5cZiVlaxgCjyhKqlGgmonnReWOBacCgL/UvuwMmMp5TTLmiLXLT7uxeG
-ojEyoCk4sMrqrU1jevHyGlDJH9Taux15GILDwnYFfAvPF9WCid4UZ4Ouwjcaxfys
-3LxNiZIlUsXNKwS3mhiMRL4TRsbs4k4QE+LIMOsauIvcvm8/frydvQ/kUwIhVTH8
-0XGOH909bYtJvY3fudK7ShIwm7ZFTduBJUG473E/Fn3VkhTmBX6+PjOC50HR/Hyb
-waRCzfDruMe3TAcE/tSP5CUOb9C7+P+hPzQcDwARAQABiQRyBBgBCgAmFiEEyHQB
-Hwq0BRENAhBVNDZdlHLXRo8FAmCAXCYCGwIFCQlmAYACQAkQNDZdlHLXRo/BdCAE
-GQEKAB0WIQQ3TsdbSFkTYEqDHMfIIMbVzSerhwUCYIBcJgAKCRDIIMbVzSerh0Xw
-D/9ghnUsoNCu1OulcoJdHboMazJvDt/znttdQSnULBVElgM5zk0Uyv87zFBzuCyQ
-JWL3bWesQ2uFx5fRWEPDEfWVdDrjpQGb1OCCQyz1QlNPV/1M1/xhKGS9EeXrL8Dw
-F6KTGkRwn1yXiP4BGgfeFIQHmJcKXEZ9HkrpNb8mcexkROv4aIPAwn+IaE+NHVtt
-IBnufMXLyfpkWJQtJa9elh9PMLlHHnuvnYLvuAoOkhuvs7fXDMpfFZ01C+QSv1dz
-Hm52GSStERQzZ51w4c0rYDneYDniC/sQT1x3dP5Xf6wzO+EhRMabkvoTbMqPsTEP
-xyWr2pNtTBYp7pfQjsHxhJpQF0xjGN9C39z7f3gJG8IJhnPeulUqEZjhRFyVZQ6/
-siUeq7vu4+dM/JQL+i7KKe7Lp9UMrG6NLMH+ltaoD3+lVm8fdTUxS5MNPoA/I8cK
-1OWTJHkrp7V/XaY7mUtvQn5V1yET5b4bogz4nME6WLiFMd+7x73gB+YJ6MGYNuO8
-e/NFK67MfHbk1/AiPTAJ6s5uHRQIkZcBPG7y5PpfcHpIlwPYCDGYlTajZXblyKrw
-BttVnYKvKsnlysv11glSg0DphGxQJbXzWpvBNyhMNH5dffcfvd3eXJAxnD81GD2z
-ZAriMJ4Av2TfeqQ2nxd2ddn0jX4WVHtAvLXfCgLM2Gveho4jD/9sZ6PZz/rEeTvt
-h88t50qPcBa4bb25X0B5FO3TeK2LL3VKLuEp5lgdcHVonrcdqZFobN1CgGJua8TW
-SprIkh+8ATZ/FXQTi01NzLhHXT1IQzSpFaZw0gb2f5ruXwvTPpfXzQrs2omY+7s7
-fkCwGPesvpSXPKn9v8uhUwD7NGW/Dm+jUM+QtC/FqzX7+/Q+OuEPjClUh1cqopCZ
-EvAI3HjnavGrYuU6DgQdjyGT/UDbuwbCXqHxHojVVkISGzCTGpmBcQYQqhcFRedJ
-yJlu6PSXlA7+8Ajh52oiMJ3ez4xSssFgUQAyOB16432tm4erpGmCyakkoRmMUn3p
-wx+QIppxRlsHznhcCQKR3tcblUqH3vq5i4/ZAihusMCa0YrShtxfdSb13oKX+pFr
-aZXvxyZlCa5qoQQBV1sowmPL1N2j3dR9TVpdTyCFQSv4KeiExmowtLIjeCppRBEK
-eeYHJnlfkyKXPhxTVVO6H+dU4nVu0ASQZ07KiQjbI+zTpPKFLPp3/0sPRJM57r1+
-aTS71iR7nZNZ1f8LZV2OvGE6fJVtgJ1J4Nu02K54uuIhU3tg1+7Xt+IqwRc9rbVr
-pHH/hFCYBPW2D2dxB+k2pQlg5NI+TpsXj5Zun8kRw5RtVb+dLuiH/xmxArIee8Jq
-ZF5q4h4I33PSGDdSvGXn9UMY5Isjpg==
-=7pIB
------END PGP PUBLIC KEY BLOCK-----`
-)
diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go
deleted file mode 100644
index 843de8cd..00000000
--- a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/checksum_downloader.go
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package releasesjson
-
-import (
- "context"
- "crypto/sha256"
- "encoding/hex"
- "fmt"
- "io"
- "log"
- "net/http"
- "net/url"
- "strings"
-
- "github.com/ProtonMail/go-crypto/openpgp"
- "github.com/hashicorp/hc-install/internal/httpclient"
-)
-
-type ChecksumDownloader struct {
- ProductVersion *ProductVersion
- Logger *log.Logger
- ArmoredPublicKey string
-
- BaseURL string
-}
-
-type ChecksumFileMap map[string]HashSum
-
-type HashSum []byte
-
-func (hs HashSum) Size() int {
- return len(hs)
-}
-
-func (hs HashSum) String() string {
- return hex.EncodeToString(hs)
-}
-
-func HashSumFromHexDigest(hexDigest string) (HashSum, error) {
- sumBytes, err := hex.DecodeString(hexDigest)
- if err != nil {
- return nil, err
- }
- return HashSum(sumBytes), nil
-}
-
-func (cd *ChecksumDownloader) DownloadAndVerifyChecksums(ctx context.Context) (ChecksumFileMap, error) {
- sigFilename, err := cd.findSigFilename(cd.ProductVersion)
- if err != nil {
- return nil, err
- }
-
- client := httpclient.NewHTTPClient()
- sigURL := fmt.Sprintf("%s/%s/%s/%s", cd.BaseURL,
- url.PathEscape(cd.ProductVersion.Name),
- url.PathEscape(cd.ProductVersion.RawVersion),
- url.PathEscape(sigFilename))
- cd.Logger.Printf("downloading signature from %s", sigURL)
-
- req, err := http.NewRequestWithContext(ctx, http.MethodGet, sigURL, nil)
- if err != nil {
- return nil, fmt.Errorf("failed to create request for %q: %w", sigURL, err)
- }
- sigResp, err := client.Do(req)
- if err != nil {
- return nil, err
- }
-
- if sigResp.StatusCode != 200 {
- return nil, fmt.Errorf("failed to download signature from %q: %s", sigURL, sigResp.Status)
- }
-
- defer sigResp.Body.Close()
-
- shasumsURL := fmt.Sprintf("%s/%s/%s/%s", cd.BaseURL,
- url.PathEscape(cd.ProductVersion.Name),
- url.PathEscape(cd.ProductVersion.RawVersion),
- url.PathEscape(cd.ProductVersion.SHASUMS))
- cd.Logger.Printf("downloading checksums from %s", shasumsURL)
-
- req, err = http.NewRequestWithContext(ctx, http.MethodGet, shasumsURL, nil)
- if err != nil {
- return nil, fmt.Errorf("failed to create request for %q: %w", shasumsURL, err)
- }
- sumsResp, err := client.Do(req)
- if err != nil {
- return nil, err
- }
-
- if sumsResp.StatusCode != 200 {
- return nil, fmt.Errorf("failed to download checksums from %q: %s", shasumsURL, sumsResp.Status)
- }
-
- defer sumsResp.Body.Close()
-
- var shaSums strings.Builder
- sumsReader := io.TeeReader(sumsResp.Body, &shaSums)
-
- err = cd.verifySumsSignature(sumsReader, sigResp.Body)
- if err != nil {
- return nil, err
- }
-
- return fileMapFromChecksums(shaSums)
-}
-
-func fileMapFromChecksums(checksums strings.Builder) (ChecksumFileMap, error) {
- csMap := make(ChecksumFileMap, 0)
-
- lines := strings.Split(checksums.String(), "\n")
- for _, line := range lines {
- line = strings.TrimSpace(line)
- if line == "" {
- continue
- }
- parts := strings.Fields(line)
- if len(parts) != 2 {
- return nil, fmt.Errorf("unexpected checksum line format: %q", line)
- }
-
- h, err := HashSumFromHexDigest(parts[0])
- if err != nil {
- return nil, err
- }
-
- if h.Size() != sha256.Size {
- return nil, fmt.Errorf("unexpected sha256 format (len: %d, expected: %d)",
- h.Size(), sha256.Size)
- }
-
- csMap[parts[1]] = h
- }
- return csMap, nil
-}
-
-func (cd *ChecksumDownloader) verifySumsSignature(checksums, signature io.Reader) error {
- el, err := cd.keyEntityList()
- if err != nil {
- return err
- }
-
- _, err = openpgp.CheckDetachedSignature(el, checksums, signature, nil)
- if err != nil {
- return fmt.Errorf("unable to verify checksums signature: %w", err)
- }
-
- cd.Logger.Printf("checksum signature is valid")
-
- return nil
-}
-
-func (cd *ChecksumDownloader) findSigFilename(pv *ProductVersion) (string, error) {
- sigFiles := pv.SHASUMSSigs
- if len(sigFiles) == 0 {
- sigFiles = []string{pv.SHASUMSSig}
- }
-
- keyIds, err := cd.pubKeyIds()
- if err != nil {
- return "", err
- }
-
- for _, filename := range sigFiles {
- for _, keyID := range keyIds {
- if strings.HasSuffix(filename, fmt.Sprintf("_SHA256SUMS.%s.sig", keyID)) {
- return filename, nil
- }
- }
- if strings.HasSuffix(filename, "_SHA256SUMS.sig") {
- return filename, nil
- }
- }
-
- return "", fmt.Errorf("no suitable sig file found")
-}
-
-func (cd *ChecksumDownloader) pubKeyIds() ([]string, error) {
- entityList, err := cd.keyEntityList()
- if err != nil {
- return nil, err
- }
-
- fingerprints := make([]string, 0)
- for _, entity := range entityList {
- fingerprints = append(fingerprints, entity.PrimaryKey.KeyIdShortString())
- }
-
- return fingerprints, nil
-}
-
-func (cd *ChecksumDownloader) keyEntityList() (openpgp.EntityList, error) {
- if cd.ArmoredPublicKey == "" {
- return nil, fmt.Errorf("no public key provided")
- }
- return openpgp.ReadArmoredKeyRing(strings.NewReader(cd.ArmoredPublicKey))
-}
diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go
deleted file mode 100644
index 146c1cf0..00000000
--- a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/downloader.go
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package releasesjson
-
-import (
- "archive/zip"
- "bytes"
- "context"
- "crypto/sha256"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "net/http"
- "net/url"
- "os"
- "path/filepath"
- "runtime"
- "strings"
-
- "github.com/hashicorp/hc-install/internal/httpclient"
-)
-
-type Downloader struct {
- Logger *log.Logger
- VerifyChecksum bool
- ArmoredPublicKey string
- BaseURL string
-}
-
-func (d *Downloader) DownloadAndUnpack(ctx context.Context, pv *ProductVersion, binDir string, licenseDir string) (zipFilePath string, err error) {
- if len(pv.Builds) == 0 {
- return "", fmt.Errorf("no builds found for %s %s", pv.Name, pv.Version)
- }
-
- pb, ok := pv.Builds.FilterBuild(runtime.GOOS, runtime.GOARCH, "zip")
- if !ok {
- return "", fmt.Errorf("no ZIP archive found for %s %s %s/%s",
- pv.Name, pv.Version, runtime.GOOS, runtime.GOARCH)
- }
-
- var verifiedChecksum HashSum
- if d.VerifyChecksum {
- v := &ChecksumDownloader{
- BaseURL: d.BaseURL,
- ProductVersion: pv,
- Logger: d.Logger,
- ArmoredPublicKey: d.ArmoredPublicKey,
- }
- verifiedChecksums, err := v.DownloadAndVerifyChecksums(ctx)
- if err != nil {
- return "", err
- }
- var ok bool
- verifiedChecksum, ok = verifiedChecksums[pb.Filename]
- if !ok {
- return "", fmt.Errorf("no checksum found for %q", pb.Filename)
- }
- }
-
- client := httpclient.NewHTTPClient()
-
- archiveURL := pb.URL
- if d.BaseURL != "" {
- // ensure that absolute download links from mocked responses
- // are still pointing to the mock server if one is set
- baseURL, err := url.Parse(d.BaseURL)
- if err != nil {
- return "", err
- }
-
- u, err := url.Parse(archiveURL)
- if err != nil {
- return "", err
- }
- u.Scheme = baseURL.Scheme
- u.Host = baseURL.Host
- archiveURL = u.String()
- }
-
- d.Logger.Printf("downloading archive from %s", archiveURL)
-
- req, err := http.NewRequestWithContext(ctx, http.MethodGet, archiveURL, nil)
- if err != nil {
- return "", fmt.Errorf("failed to create request for %q: %w", archiveURL, err)
- }
- resp, err := client.Do(req)
- if err != nil {
- return "", err
- }
-
- if resp.StatusCode != 200 {
- return "", fmt.Errorf("failed to download ZIP archive from %q: %s", archiveURL, resp.Status)
- }
-
- defer resp.Body.Close()
-
- pkgReader := resp.Body
-
- contentType := resp.Header.Get("content-type")
- if !contentTypeIsZip(contentType) {
- return "", fmt.Errorf("unexpected content-type: %s (expected any of %q)",
- contentType, zipMimeTypes)
- }
-
- expectedSize := resp.ContentLength
-
- pkgFile, err := ioutil.TempFile("", pb.Filename)
- if err != nil {
- return "", err
- }
- defer pkgFile.Close()
- pkgFilePath, err := filepath.Abs(pkgFile.Name())
-
- d.Logger.Printf("copying %q (%d bytes) to %s", pb.Filename, expectedSize, pkgFile.Name())
-
- var bytesCopied int64
- if d.VerifyChecksum {
- d.Logger.Printf("verifying checksum of %q", pb.Filename)
- h := sha256.New()
- r := io.TeeReader(resp.Body, pkgFile)
-
- bytesCopied, err = io.Copy(h, r)
- if err != nil {
- return "", err
- }
-
- calculatedSum := h.Sum(nil)
- if !bytes.Equal(calculatedSum, verifiedChecksum) {
- return pkgFilePath, fmt.Errorf(
- "checksum mismatch (expected: %x, got: %x)",
- verifiedChecksum, calculatedSum,
- )
- }
- } else {
- bytesCopied, err = io.Copy(pkgFile, pkgReader)
- if err != nil {
- return pkgFilePath, err
- }
- }
-
- d.Logger.Printf("copied %d bytes to %s", bytesCopied, pkgFile.Name())
-
- if expectedSize != 0 && bytesCopied != int64(expectedSize) {
- return pkgFilePath, fmt.Errorf(
- "unexpected size (downloaded: %d, expected: %d)",
- bytesCopied, expectedSize,
- )
- }
-
- r, err := zip.OpenReader(pkgFile.Name())
- if err != nil {
- return pkgFilePath, err
- }
- defer r.Close()
-
- for _, f := range r.File {
- if strings.Contains(f.Name, "..") {
- // While we generally trust the source ZIP file
- // we still reject path traversal attempts as a precaution.
- continue
- }
- srcFile, err := f.Open()
- if err != nil {
- return pkgFilePath, err
- }
-
- // Determine the appropriate destination file path
- dstDir := binDir
- if isLicenseFile(f.Name) && licenseDir != "" {
- dstDir = licenseDir
- }
-
- d.Logger.Printf("unpacking %s to %s", f.Name, dstDir)
- dstPath := filepath.Join(dstDir, f.Name)
- dstFile, err := os.Create(dstPath)
- if err != nil {
- return pkgFilePath, err
- }
-
- _, err = io.Copy(dstFile, srcFile)
- if err != nil {
- return pkgFilePath, err
- }
- srcFile.Close()
- dstFile.Close()
- }
-
- return pkgFilePath, nil
-}
-
-// The production release site uses consistent single mime type
-// but mime types are platform-dependent
-// and we may use different OS under test
-var zipMimeTypes = []string{
- "application/x-zip-compressed", // Windows
- "application/zip", // Unix
-}
-
-func contentTypeIsZip(contentType string) bool {
- for _, mt := range zipMimeTypes {
- if mt == contentType {
- return true
- }
- }
- return false
-}
-
-// Enterprise products have a few additional license files
-// that need to be extracted to a separate directory
-var licenseFiles = []string{
- "EULA.txt",
- "TermsOfEvaluation.txt",
-}
-
-func isLicenseFile(filename string) bool {
- for _, lf := range licenseFiles {
- if lf == filename {
- return true
- }
- }
- return false
-}
diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/product_version.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/product_version.go
deleted file mode 100644
index 99b811a6..00000000
--- a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/product_version.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package releasesjson
-
-import "github.com/hashicorp/go-version"
-
-// ProductVersion is a wrapper around a particular product version like
-// "consul 0.5.1". A ProductVersion may have one or more builds.
-type ProductVersion struct {
- Name string `json:"name"`
- RawVersion string `json:"version"`
- Version *version.Version `json:"-"`
- SHASUMS string `json:"shasums,omitempty"`
- SHASUMSSig string `json:"shasums_signature,omitempty"`
- SHASUMSSigs []string `json:"shasums_signatures,omitempty"`
- Builds ProductBuilds `json:"builds"`
-}
-
-type ProductVersionsMap map[string]*ProductVersion
-
-type ProductVersions []*ProductVersion
-
-func (pv ProductVersions) Len() int {
- return len(pv)
-}
-
-func (pv ProductVersions) Less(i, j int) bool {
- return pv[i].Version.LessThan(pv[j].Version)
-}
-
-func (pv ProductVersions) Swap(i, j int) {
- pv[i], pv[j] = pv[j], pv[i]
-}
-
-func (pvm ProductVersionsMap) AsSlice() ProductVersions {
- versions := make(ProductVersions, 0)
-
- for _, pVersion := range pvm {
- versions = append(versions, pVersion)
- }
-
- return versions
-}
diff --git a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go b/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go
deleted file mode 100644
index 755019f2..00000000
--- a/vendor/github.com/hashicorp/hc-install/internal/releasesjson/releases.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package releasesjson
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "io/ioutil"
- "log"
- "net/http"
- "net/url"
- "strings"
-
- "github.com/hashicorp/go-version"
- "github.com/hashicorp/hc-install/internal/httpclient"
-)
-
-const defaultBaseURL = "https://releases.hashicorp.com"
-
-// Product is a top-level product like "Consul" or "Nomad". A Product may have
-// one or more versions.
-type Product struct {
- Name string `json:"name"`
- Versions ProductVersionsMap `json:"versions"`
-}
-
-type ProductBuilds []*ProductBuild
-
-func (pbs ProductBuilds) FilterBuild(os string, arch string, suffix string) (*ProductBuild, bool) {
- for _, pb := range pbs {
- if pb.OS == os && pb.Arch == arch && strings.HasSuffix(pb.Filename, suffix) {
- return pb, true
- }
- }
- return nil, false
-}
-
-// ProductBuild is an OS/arch-specific representation of a product. This is the
-// actual file that a user would download, like "consul_0.5.1_linux_amd64".
-type ProductBuild struct {
- Name string `json:"name"`
- Version string `json:"version"`
- OS string `json:"os"`
- Arch string `json:"arch"`
- Filename string `json:"filename"`
- URL string `json:"url"`
-}
-
-type Releases struct {
- logger *log.Logger
- BaseURL string
-}
-
-func NewReleases() *Releases {
- return &Releases{
- logger: log.New(ioutil.Discard, "", 0),
- BaseURL: defaultBaseURL,
- }
-}
-
-func (r *Releases) SetLogger(logger *log.Logger) {
- r.logger = logger
-}
-
-func (r *Releases) ListProductVersions(ctx context.Context, productName string) (ProductVersionsMap, error) {
- client := httpclient.NewHTTPClient()
-
- productIndexURL := fmt.Sprintf("%s/%s/index.json",
- r.BaseURL,
- url.PathEscape(productName))
- r.logger.Printf("requesting versions from %s", productIndexURL)
-
- req, err := http.NewRequestWithContext(ctx, http.MethodGet, productIndexURL, nil)
- if err != nil {
- return nil, fmt.Errorf("failed to create request for %q: %w", productIndexURL, err)
- }
- resp, err := client.Do(req)
- if err != nil {
- return nil, err
- }
-
- if resp.StatusCode != 200 {
- return nil, fmt.Errorf("failed to obtain product versions from %q: %s ",
- productIndexURL, resp.Status)
- }
-
- contentType := resp.Header.Get("content-type")
- if contentType != "application/json" && contentType != "application/vnd+hashicorp.releases-api.v0+json" {
- return nil, fmt.Errorf("unexpected Content-Type: %q", contentType)
- }
-
- defer resp.Body.Close()
-
- r.logger.Printf("received %s", resp.Status)
-
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return nil, err
- }
-
- p := Product{}
- err = json.Unmarshal(body, &p)
- if err != nil {
- return nil, fmt.Errorf("%w: failed to unmarshal: %q",
- err, string(body))
- }
-
- for rawVersion := range p.Versions {
- v, err := version.NewVersion(rawVersion)
- if err != nil {
- // remove unparseable version
- delete(p.Versions, rawVersion)
- continue
- }
-
- p.Versions[rawVersion].Version = v
- }
-
- return p.Versions, nil
-}
-
-func (r *Releases) GetProductVersion(ctx context.Context, product string, version *version.Version) (*ProductVersion, error) {
- client := httpclient.NewHTTPClient()
-
- indexURL := fmt.Sprintf("%s/%s/%s/index.json",
- r.BaseURL,
- url.PathEscape(product),
- url.PathEscape(version.String()))
- r.logger.Printf("requesting version from %s", indexURL)
-
- req, err := http.NewRequestWithContext(ctx, http.MethodGet, indexURL, nil)
- if err != nil {
- return nil, fmt.Errorf("failed to create request for %q: %w", indexURL, err)
- }
- resp, err := client.Do(req)
- if err != nil {
- return nil, err
- }
-
- if resp.StatusCode != 200 {
- return nil, fmt.Errorf("failed to obtain product version from %q: %s ",
- indexURL, resp.Status)
- }
-
- contentType := resp.Header.Get("content-type")
- if contentType != "application/json" && contentType != "application/vnd+hashicorp.releases-api.v0+json" {
- return nil, fmt.Errorf("unexpected Content-Type: %q", contentType)
- }
-
- defer resp.Body.Close()
-
- r.logger.Printf("received %s", resp.Status)
-
- body, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- return nil, err
- }
-
- pv := &ProductVersion{}
- err = json.Unmarshal(body, pv)
- if err != nil {
- return nil, fmt.Errorf("%w: failed to unmarshal response: %q",
- err, string(body))
- }
-
- return pv, nil
-}
diff --git a/vendor/github.com/hashicorp/hc-install/internal/src/src.go b/vendor/github.com/hashicorp/hc-install/internal/src/src.go
deleted file mode 100644
index 9cac8a64..00000000
--- a/vendor/github.com/hashicorp/hc-install/internal/src/src.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package src
-
-type InstallSrcSigil struct{}
diff --git a/vendor/github.com/hashicorp/hc-install/internal/validators/validators.go b/vendor/github.com/hashicorp/hc-install/internal/validators/validators.go
deleted file mode 100644
index 8a331c4c..00000000
--- a/vendor/github.com/hashicorp/hc-install/internal/validators/validators.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package validators
-
-import "regexp"
-
-var (
- productNameRe = regexp.MustCompile(`^[a-z0-9-]+$`)
- binaryNameRe = regexp.MustCompile(`^[a-zA-Z0-9-_.]+$`)
-)
-
-// IsProductNameValid provides early user-facing validation of a product name
-func IsProductNameValid(productName string) bool {
- return productNameRe.MatchString(productName)
-}
-
-// IsBinaryNameValid provides early user-facing validation of binary name
-func IsBinaryNameValid(binaryName string) bool {
- return binaryNameRe.MatchString(binaryName)
-}
diff --git a/vendor/github.com/hashicorp/hc-install/product/consul.go b/vendor/github.com/hashicorp/hc-install/product/consul.go
deleted file mode 100644
index 9789d7c3..00000000
--- a/vendor/github.com/hashicorp/hc-install/product/consul.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package product
-
-import (
- "context"
- "fmt"
- "os/exec"
- "regexp"
- "runtime"
- "strings"
-
- "github.com/hashicorp/go-version"
- "github.com/hashicorp/hc-install/internal/build"
-)
-
-var consulVersionOutputRe = regexp.MustCompile(`Consul ` + simpleVersionRe)
-
-var (
- v1_18 = version.Must(version.NewVersion("1.18"))
-)
-
-var Consul = Product{
- Name: "consul",
- BinaryName: func() string {
- if runtime.GOOS == "windows" {
- return "consul.exe"
- }
- return "consul"
- },
- GetVersion: func(ctx context.Context, path string) (*version.Version, error) {
- cmd := exec.CommandContext(ctx, path, "version")
-
- out, err := cmd.Output()
- if err != nil {
- return nil, err
- }
-
- stdout := strings.TrimSpace(string(out))
-
- submatches := consulVersionOutputRe.FindStringSubmatch(stdout)
- if len(submatches) != 2 {
- return nil, fmt.Errorf("unexpected number of version matches %d for %s", len(submatches), stdout)
- }
- v, err := version.NewVersion(submatches[1])
- if err != nil {
- return nil, fmt.Errorf("unable to parse version %q: %w", submatches[1], err)
- }
-
- return v, err
- },
- BuildInstructions: &BuildInstructions{
- GitRepoURL: "https://github.com/hashicorp/consul.git",
- PreCloneCheck: &build.GoIsInstalled{},
- Build: &build.GoBuild{},
- },
-}
diff --git a/vendor/github.com/hashicorp/hc-install/product/nomad.go b/vendor/github.com/hashicorp/hc-install/product/nomad.go
deleted file mode 100644
index b675d9a1..00000000
--- a/vendor/github.com/hashicorp/hc-install/product/nomad.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package product
-
-import (
- "context"
- "fmt"
- "os/exec"
- "regexp"
- "runtime"
- "strings"
-
- "github.com/hashicorp/go-version"
- "github.com/hashicorp/hc-install/internal/build"
-)
-
-var nomadVersionOutputRe = regexp.MustCompile(`Nomad ` + simpleVersionRe)
-
-var Nomad = Product{
- Name: "nomad",
- BinaryName: func() string {
- if runtime.GOOS == "windows" {
- return "nomad.exe"
- }
- return "nomad"
- },
- GetVersion: func(ctx context.Context, path string) (*version.Version, error) {
- cmd := exec.CommandContext(ctx, path, "version")
-
- out, err := cmd.Output()
- if err != nil {
- return nil, err
- }
-
- stdout := strings.TrimSpace(string(out))
-
- submatches := nomadVersionOutputRe.FindStringSubmatch(stdout)
- if len(submatches) != 2 {
- return nil, fmt.Errorf("unexpected number of version matches %d for %s", len(submatches), stdout)
- }
- v, err := version.NewVersion(submatches[1])
- if err != nil {
- return nil, fmt.Errorf("unable to parse version %q: %w", submatches[1], err)
- }
-
- return v, err
- },
- BuildInstructions: &BuildInstructions{
- GitRepoURL: "https://github.com/hashicorp/nomad.git",
- PreCloneCheck: &build.GoIsInstalled{},
- Build: &build.GoBuild{DetectVendoring: true},
- },
-}
diff --git a/vendor/github.com/hashicorp/hc-install/product/product.go b/vendor/github.com/hashicorp/hc-install/product/product.go
deleted file mode 100644
index 85f2e11b..00000000
--- a/vendor/github.com/hashicorp/hc-install/product/product.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package product
-
-import (
- "context"
- "time"
-
- "github.com/hashicorp/go-version"
-)
-
-type Product struct {
- // Name which identifies the product
- // on releases.hashicorp.com and in Checkpoint
- Name string
-
- // BinaryName represents name of the unpacked binary to be executed or built
- BinaryName BinaryNameFunc
-
- // GetVersion represents how to obtain the version of the product
- // reflecting any output or CLI flag differences
- GetVersion func(ctx context.Context, execPath string) (*version.Version, error)
-
- // BuildInstructions represents how to build the product "from scratch"
- BuildInstructions *BuildInstructions
-}
-
-type BinaryNameFunc func() string
-
-type BuildInstructions struct {
- GitRepoURL string
-
- // CloneTimeout overrides default timeout
- // for cloning the repository
- CloneTimeout time.Duration
-
- // PreCloneCheck represents any checks to run
- // prior to building, such as verifying build
- // dependencies (e.g. whether Go is installed)
- PreCloneCheck Checker
-
- // PreCloneCheckTimeout overrides default timeout
- // for the PreCloneCheck
- PreCloneCheckTimeout time.Duration
-
- // Build represents how to build the product
- // after checking out the source code
- Build Builder
-
- // BuildTimeout overrides default timeout
- // for the Builder
- BuildTimeout time.Duration
-}
-
-type Checker interface {
- Check(ctx context.Context) error
-}
-
-type Builder interface {
- Build(ctx context.Context, repoDir, targetDir, binaryName string) (string, error)
- Remove(ctx context.Context) error
-}
diff --git a/vendor/github.com/hashicorp/hc-install/product/terraform.go b/vendor/github.com/hashicorp/hc-install/product/terraform.go
deleted file mode 100644
index afb6b35f..00000000
--- a/vendor/github.com/hashicorp/hc-install/product/terraform.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package product
-
-import (
- "context"
- "fmt"
- "os/exec"
- "regexp"
- "runtime"
- "strings"
-
- "github.com/hashicorp/go-version"
- "github.com/hashicorp/hc-install/internal/build"
-)
-
-var (
- simpleVersionRe = `v?(?P[0-9]+(?:\.[0-9]+)*(?:-[A-Za-z0-9\.]+)?)`
-
- terraformVersionOutputRe = regexp.MustCompile(`Terraform ` + simpleVersionRe)
-)
-
-var Terraform = Product{
- Name: "terraform",
- BinaryName: func() string {
- if runtime.GOOS == "windows" {
- return "terraform.exe"
- }
- return "terraform"
- },
- GetVersion: func(ctx context.Context, path string) (*version.Version, error) {
- cmd := exec.CommandContext(ctx, path, "version")
-
- out, err := cmd.Output()
- if err != nil {
- return nil, err
- }
-
- stdout := strings.TrimSpace(string(out))
-
- submatches := terraformVersionOutputRe.FindStringSubmatch(stdout)
- if len(submatches) != 2 {
- return nil, fmt.Errorf("unexpected number of version matches %d for %s", len(submatches), stdout)
- }
- v, err := version.NewVersion(submatches[1])
- if err != nil {
- return nil, fmt.Errorf("unable to parse version %q: %w", submatches[1], err)
- }
-
- return v, err
- },
- BuildInstructions: &BuildInstructions{
- GitRepoURL: "https://github.com/hashicorp/terraform.git",
- PreCloneCheck: &build.GoIsInstalled{},
- Build: &build.GoBuild{DetectVendoring: true},
- },
-}
diff --git a/vendor/github.com/hashicorp/hc-install/product/vault.go b/vendor/github.com/hashicorp/hc-install/product/vault.go
deleted file mode 100644
index 0b259659..00000000
--- a/vendor/github.com/hashicorp/hc-install/product/vault.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package product
-
-import (
- "context"
- "fmt"
- "os/exec"
- "regexp"
- "runtime"
- "strings"
-
- "github.com/hashicorp/go-version"
- "github.com/hashicorp/hc-install/internal/build"
-)
-
-var (
- vaultVersionOutputRe = regexp.MustCompile(`Vault ` + simpleVersionRe)
-)
-
-var Vault = Product{
- Name: "vault",
- BinaryName: func() string {
- if runtime.GOOS == "windows" {
- return "vault.exe"
- }
- return "vault"
- },
- GetVersion: func(ctx context.Context, path string) (*version.Version, error) {
- cmd := exec.CommandContext(ctx, path, "version")
-
- out, err := cmd.Output()
- if err != nil {
- return nil, err
- }
-
- stdout := strings.TrimSpace(string(out))
-
- submatches := vaultVersionOutputRe.FindStringSubmatch(stdout)
- if len(submatches) != 2 {
- return nil, fmt.Errorf("unexpected number of version matches %d for %s", len(submatches), stdout)
- }
- v, err := version.NewVersion(submatches[1])
- if err != nil {
- return nil, fmt.Errorf("unable to parse version %q: %w", submatches[1], err)
- }
-
- return v, err
- },
- BuildInstructions: &BuildInstructions{
- GitRepoURL: "https://github.com/hashicorp/vault.git",
- PreCloneCheck: &build.GoIsInstalled{},
- Build: &build.GoBuild{},
- },
-}
diff --git a/vendor/github.com/hashicorp/hc-install/releases/enterprise.go b/vendor/github.com/hashicorp/hc-install/releases/enterprise.go
deleted file mode 100644
index 179d40d1..00000000
--- a/vendor/github.com/hashicorp/hc-install/releases/enterprise.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package releases
-
-import "fmt"
-
-type EnterpriseOptions struct {
- // LicenseDir represents directory path where to install license files (required)
- LicenseDir string
-
- // Meta represents optional version metadata (e.g. hsm, fips1402)
- Meta string
-}
-
-func enterpriseVersionMetadata(eo *EnterpriseOptions) string {
- if eo == nil {
- return ""
- }
-
- metadata := "ent"
- if eo.Meta != "" {
- metadata += "." + eo.Meta
- }
- return metadata
-}
-
-func validateEnterpriseOptions(eo *EnterpriseOptions) error {
- if eo == nil {
- return nil
- }
-
- if eo.LicenseDir == "" {
- return fmt.Errorf("LicenseDir must be provided when requesting enterprise versions")
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/hc-install/releases/exact_version.go b/vendor/github.com/hashicorp/hc-install/releases/exact_version.go
deleted file mode 100644
index e42f4d23..00000000
--- a/vendor/github.com/hashicorp/hc-install/releases/exact_version.go
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package releases
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "log"
- "os"
- "path/filepath"
- "time"
-
- "github.com/hashicorp/go-version"
- "github.com/hashicorp/hc-install/internal/pubkey"
- rjson "github.com/hashicorp/hc-install/internal/releasesjson"
- isrc "github.com/hashicorp/hc-install/internal/src"
- "github.com/hashicorp/hc-install/internal/validators"
- "github.com/hashicorp/hc-install/product"
-)
-
-// ExactVersion installs the given Version of product
-// to OS temp directory, or to InstallDir (if not empty)
-type ExactVersion struct {
- Product product.Product
- Version *version.Version
- InstallDir string
- Timeout time.Duration
-
- // Enterprise indicates installation of enterprise version (leave nil for Community editions)
- Enterprise *EnterpriseOptions
-
- SkipChecksumVerification bool
-
- // ArmoredPublicKey is a public PGP key in ASCII/armor format to use
- // instead of built-in pubkey to verify signature of downloaded checksums
- ArmoredPublicKey string
-
- apiBaseURL string
- logger *log.Logger
- pathsToRemove []string
-}
-
-func (*ExactVersion) IsSourceImpl() isrc.InstallSrcSigil {
- return isrc.InstallSrcSigil{}
-}
-
-func (ev *ExactVersion) SetLogger(logger *log.Logger) {
- ev.logger = logger
-}
-
-func (ev *ExactVersion) log() *log.Logger {
- if ev.logger == nil {
- return discardLogger
- }
- return ev.logger
-}
-
-func (ev *ExactVersion) Validate() error {
- if !validators.IsProductNameValid(ev.Product.Name) {
- return fmt.Errorf("invalid product name: %q", ev.Product.Name)
- }
-
- if !validators.IsBinaryNameValid(ev.Product.BinaryName()) {
- return fmt.Errorf("invalid binary name: %q", ev.Product.BinaryName())
- }
-
- if ev.Version == nil {
- return fmt.Errorf("unknown version")
- }
-
- if err := validateEnterpriseOptions(ev.Enterprise); err != nil {
- return err
- }
-
- return nil
-}
-
-func (ev *ExactVersion) Install(ctx context.Context) (string, error) {
- timeout := defaultInstallTimeout
- if ev.Timeout > 0 {
- timeout = ev.Timeout
- }
- ctx, cancelFunc := context.WithTimeout(ctx, timeout)
- defer cancelFunc()
-
- if ev.pathsToRemove == nil {
- ev.pathsToRemove = make([]string, 0)
- }
-
- dstDir := ev.InstallDir
- if dstDir == "" {
- var err error
- dirName := fmt.Sprintf("%s_*", ev.Product.Name)
- dstDir, err = ioutil.TempDir("", dirName)
- if err != nil {
- return "", err
- }
- ev.pathsToRemove = append(ev.pathsToRemove, dstDir)
- ev.log().Printf("created new temp dir at %s", dstDir)
- }
- ev.log().Printf("will install into dir at %s", dstDir)
-
- rels := rjson.NewReleases()
- if ev.apiBaseURL != "" {
- rels.BaseURL = ev.apiBaseURL
- }
- rels.SetLogger(ev.log())
- installVersion := ev.Version
- if ev.Enterprise != nil {
- installVersion = versionWithMetadata(installVersion, enterpriseVersionMetadata(ev.Enterprise))
- }
- pv, err := rels.GetProductVersion(ctx, ev.Product.Name, installVersion)
- if err != nil {
- return "", err
- }
-
- d := &rjson.Downloader{
- Logger: ev.log(),
- VerifyChecksum: !ev.SkipChecksumVerification,
- ArmoredPublicKey: pubkey.DefaultPublicKey,
- BaseURL: rels.BaseURL,
- }
- if ev.ArmoredPublicKey != "" {
- d.ArmoredPublicKey = ev.ArmoredPublicKey
- }
- if ev.apiBaseURL != "" {
- d.BaseURL = ev.apiBaseURL
- }
-
- licenseDir := ""
- if ev.Enterprise != nil {
- licenseDir = ev.Enterprise.LicenseDir
- }
- zipFilePath, err := d.DownloadAndUnpack(ctx, pv, dstDir, licenseDir)
- if zipFilePath != "" {
- ev.pathsToRemove = append(ev.pathsToRemove, zipFilePath)
- }
- if err != nil {
- return "", err
- }
-
- execPath := filepath.Join(dstDir, ev.Product.BinaryName())
-
- ev.pathsToRemove = append(ev.pathsToRemove, execPath)
-
- ev.log().Printf("changing perms of %s", execPath)
- err = os.Chmod(execPath, 0o700)
- if err != nil {
- return "", err
- }
-
- return execPath, nil
-}
-
-func (ev *ExactVersion) Remove(ctx context.Context) error {
- if ev.pathsToRemove != nil {
- for _, path := range ev.pathsToRemove {
- err := os.RemoveAll(path)
- if err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-// versionWithMetadata returns a new version by combining the given version with the given metadata
-func versionWithMetadata(v *version.Version, metadata string) *version.Version {
- if v == nil {
- return nil
- }
-
- if metadata == "" {
- return v
- }
-
- v2, err := version.NewVersion(fmt.Sprintf("%s+%s", v.Core(), metadata))
- if err != nil {
- return nil
- }
-
- return v2
-}
diff --git a/vendor/github.com/hashicorp/hc-install/releases/latest_version.go b/vendor/github.com/hashicorp/hc-install/releases/latest_version.go
deleted file mode 100644
index 9893b223..00000000
--- a/vendor/github.com/hashicorp/hc-install/releases/latest_version.go
+++ /dev/null
@@ -1,195 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package releases
-
-import (
- "context"
- "fmt"
- "io/ioutil"
- "log"
- "os"
- "path/filepath"
- "sort"
- "time"
-
- "github.com/hashicorp/go-version"
- "github.com/hashicorp/hc-install/internal/pubkey"
- rjson "github.com/hashicorp/hc-install/internal/releasesjson"
- isrc "github.com/hashicorp/hc-install/internal/src"
- "github.com/hashicorp/hc-install/internal/validators"
- "github.com/hashicorp/hc-install/product"
-)
-
-type LatestVersion struct {
- Product product.Product
- Constraints version.Constraints
- InstallDir string
- Timeout time.Duration
- IncludePrereleases bool
-
- // Enterprise indicates installation of enterprise version (leave nil for Community editions)
- Enterprise *EnterpriseOptions
-
- SkipChecksumVerification bool
-
- // ArmoredPublicKey is a public PGP key in ASCII/armor format to use
- // instead of built-in pubkey to verify signature of downloaded checksums
- ArmoredPublicKey string
-
- apiBaseURL string
- logger *log.Logger
- pathsToRemove []string
-}
-
-func (*LatestVersion) IsSourceImpl() isrc.InstallSrcSigil {
- return isrc.InstallSrcSigil{}
-}
-
-func (lv *LatestVersion) SetLogger(logger *log.Logger) {
- lv.logger = logger
-}
-
-func (lv *LatestVersion) log() *log.Logger {
- if lv.logger == nil {
- return discardLogger
- }
- return lv.logger
-}
-
-func (lv *LatestVersion) Validate() error {
- if !validators.IsProductNameValid(lv.Product.Name) {
- return fmt.Errorf("invalid product name: %q", lv.Product.Name)
- }
-
- if !validators.IsBinaryNameValid(lv.Product.BinaryName()) {
- return fmt.Errorf("invalid binary name: %q", lv.Product.BinaryName())
- }
-
- if err := validateEnterpriseOptions(lv.Enterprise); err != nil {
- return err
- }
-
- return nil
-}
-
-func (lv *LatestVersion) Install(ctx context.Context) (string, error) {
- timeout := defaultInstallTimeout
- if lv.Timeout > 0 {
- timeout = lv.Timeout
- }
- ctx, cancelFunc := context.WithTimeout(ctx, timeout)
- defer cancelFunc()
-
- if lv.pathsToRemove == nil {
- lv.pathsToRemove = make([]string, 0)
- }
-
- dstDir := lv.InstallDir
- if dstDir == "" {
- var err error
- dirName := fmt.Sprintf("%s_*", lv.Product.Name)
- dstDir, err = ioutil.TempDir("", dirName)
- if err != nil {
- return "", err
- }
- lv.pathsToRemove = append(lv.pathsToRemove, dstDir)
- lv.log().Printf("created new temp dir at %s", dstDir)
- }
- lv.log().Printf("will install into dir at %s", dstDir)
-
- rels := rjson.NewReleases()
- if lv.apiBaseURL != "" {
- rels.BaseURL = lv.apiBaseURL
- }
- rels.SetLogger(lv.log())
- versions, err := rels.ListProductVersions(ctx, lv.Product.Name)
- if err != nil {
- return "", err
- }
-
- if len(versions) == 0 {
- return "", fmt.Errorf("no versions found for %q", lv.Product.Name)
- }
-
- versionToInstall, ok := lv.findLatestMatchingVersion(versions, lv.Constraints)
- if !ok {
- return "", fmt.Errorf("no matching version found for %q", lv.Constraints)
- }
-
- d := &rjson.Downloader{
- Logger: lv.log(),
- VerifyChecksum: !lv.SkipChecksumVerification,
- ArmoredPublicKey: pubkey.DefaultPublicKey,
- BaseURL: rels.BaseURL,
- }
- if lv.ArmoredPublicKey != "" {
- d.ArmoredPublicKey = lv.ArmoredPublicKey
- }
- if lv.apiBaseURL != "" {
- d.BaseURL = lv.apiBaseURL
- }
- licenseDir := ""
- if lv.Enterprise != nil {
- licenseDir = lv.Enterprise.LicenseDir
- }
- zipFilePath, err := d.DownloadAndUnpack(ctx, versionToInstall, dstDir, licenseDir)
- if zipFilePath != "" {
- lv.pathsToRemove = append(lv.pathsToRemove, zipFilePath)
- }
- if err != nil {
- return "", err
- }
-
- execPath := filepath.Join(dstDir, lv.Product.BinaryName())
-
- lv.pathsToRemove = append(lv.pathsToRemove, execPath)
-
- lv.log().Printf("changing perms of %s", execPath)
- err = os.Chmod(execPath, 0o700)
- if err != nil {
- return "", err
- }
-
- return execPath, nil
-}
-
-func (lv *LatestVersion) Remove(ctx context.Context) error {
- if lv.pathsToRemove != nil {
- for _, path := range lv.pathsToRemove {
- err := os.RemoveAll(path)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func (lv *LatestVersion) findLatestMatchingVersion(pvs rjson.ProductVersionsMap, vc version.Constraints) (*rjson.ProductVersion, bool) {
- expectedMetadata := enterpriseVersionMetadata(lv.Enterprise)
- versions := make(version.Collection, 0)
- for _, pv := range pvs.AsSlice() {
- if !lv.IncludePrereleases && pv.Version.Prerelease() != "" {
- // skip prereleases if desired
- continue
- }
-
- if pv.Version.Metadata() != expectedMetadata {
- continue
- }
-
- if vc.Check(pv.Version) {
- versions = append(versions, pv.Version)
- }
- }
-
- if len(versions) == 0 {
- return nil, false
- }
-
- sort.Stable(versions)
- latestVersion := versions[len(versions)-1]
-
- return pvs[latestVersion.Original()], true
-}
diff --git a/vendor/github.com/hashicorp/hc-install/releases/releases.go b/vendor/github.com/hashicorp/hc-install/releases/releases.go
deleted file mode 100644
index 7bef49ba..00000000
--- a/vendor/github.com/hashicorp/hc-install/releases/releases.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package releases
-
-import (
- "io/ioutil"
- "log"
- "time"
-)
-
-var (
- defaultInstallTimeout = 30 * time.Second
- defaultListTimeout = 10 * time.Second
- discardLogger = log.New(ioutil.Discard, "", 0)
-)
diff --git a/vendor/github.com/hashicorp/hc-install/releases/versions.go b/vendor/github.com/hashicorp/hc-install/releases/versions.go
deleted file mode 100644
index 49b1af78..00000000
--- a/vendor/github.com/hashicorp/hc-install/releases/versions.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package releases
-
-import (
- "context"
- "fmt"
- "sort"
- "time"
-
- "github.com/hashicorp/go-version"
- rjson "github.com/hashicorp/hc-install/internal/releasesjson"
- "github.com/hashicorp/hc-install/internal/validators"
- "github.com/hashicorp/hc-install/product"
- "github.com/hashicorp/hc-install/src"
-)
-
-// Versions allows listing all versions of a product
-// which match Constraints
-type Versions struct {
- Product product.Product
- Constraints version.Constraints
- Enterprise *EnterpriseOptions // require enterprise version if set (leave nil for OSS)
-
- ListTimeout time.Duration
-
- // Install represents configuration for installation of any listed version
- Install InstallationOptions
-}
-
-type InstallationOptions struct {
- Timeout time.Duration
- Dir string
-
- SkipChecksumVerification bool
-
- // ArmoredPublicKey is a public PGP key in ASCII/armor format to use
- // instead of built-in pubkey to verify signature of downloaded checksums
- // during installation
- ArmoredPublicKey string
-}
-
-func (v *Versions) List(ctx context.Context) ([]src.Source, error) {
- if !validators.IsProductNameValid(v.Product.Name) {
- return nil, fmt.Errorf("invalid product name: %q", v.Product.Name)
- }
-
- if err := validateEnterpriseOptions(v.Enterprise); err != nil {
- return nil, err
- }
-
- timeout := defaultListTimeout
- if v.ListTimeout > 0 {
- timeout = v.ListTimeout
- }
- ctx, cancelFunc := context.WithTimeout(ctx, timeout)
- defer cancelFunc()
-
- r := rjson.NewReleases()
- pvs, err := r.ListProductVersions(ctx, v.Product.Name)
- if err != nil {
- return nil, err
- }
-
- versions := pvs.AsSlice()
- sort.Stable(versions)
-
- expectedMetadata := enterpriseVersionMetadata(v.Enterprise)
-
- installables := make([]src.Source, 0)
- for _, pv := range versions {
- if !v.Constraints.Check(pv.Version) {
- // skip version which doesn't match constraint
- continue
- }
-
- if pv.Version.Metadata() != expectedMetadata {
- // skip version which doesn't match required metadata for enterprise or OSS versions
- continue
- }
-
- ev := &ExactVersion{
- Product: v.Product,
- Version: pv.Version,
- InstallDir: v.Install.Dir,
- Timeout: v.Install.Timeout,
-
- ArmoredPublicKey: v.Install.ArmoredPublicKey,
- SkipChecksumVerification: v.Install.SkipChecksumVerification,
- }
-
- if v.Enterprise != nil {
- ev.Enterprise = &EnterpriseOptions{
- Meta: v.Enterprise.Meta,
- LicenseDir: v.Enterprise.LicenseDir,
- }
- }
-
- installables = append(installables, ev)
- }
-
- return installables, nil
-}
diff --git a/vendor/github.com/hashicorp/hc-install/src/src.go b/vendor/github.com/hashicorp/hc-install/src/src.go
deleted file mode 100644
index f7b8265e..00000000
--- a/vendor/github.com/hashicorp/hc-install/src/src.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package src
-
-import (
- "context"
- "log"
-
- isrc "github.com/hashicorp/hc-install/internal/src"
-)
-
-// Source represents an installer, finder, or builder
-type Source interface {
- IsSourceImpl() isrc.InstallSrcSigil
-}
-
-type Installable interface {
- Source
- Install(ctx context.Context) (string, error)
-}
-
-type Findable interface {
- Source
- Find(ctx context.Context) (string, error)
-}
-
-type Buildable interface {
- Source
- Build(ctx context.Context) (string, error)
-}
-
-type Validatable interface {
- Source
- Validate() error
-}
-
-type Removable interface {
- Source
- Remove(ctx context.Context) error
-}
-
-type LoggerSettable interface {
- SetLogger(logger *log.Logger)
-}
diff --git a/vendor/github.com/hashicorp/hc-install/version/VERSION b/vendor/github.com/hashicorp/hc-install/version/VERSION
deleted file mode 100644
index d2b13eb6..00000000
--- a/vendor/github.com/hashicorp/hc-install/version/VERSION
+++ /dev/null
@@ -1 +0,0 @@
-0.6.4
diff --git a/vendor/github.com/hashicorp/hc-install/version/version.go b/vendor/github.com/hashicorp/hc-install/version/version.go
deleted file mode 100644
index facd4294..00000000
--- a/vendor/github.com/hashicorp/hc-install/version/version.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package version
-
-import (
- _ "embed"
- "strings"
-
- "github.com/hashicorp/go-version"
-)
-
-//go:embed VERSION
-var rawVersion string
-
-// parsedVersion declared here ensures that invalid versions panic early, on import
-var parsedVersion = version.Must(version.NewVersion(strings.TrimSpace(rawVersion)))
-
-// Version returns the version of the library
-//
-// Note: This is only exposed as public function/package
-// due to hard-coded constraints in the release tooling.
-// In general downstream should not implement version-specific
-// logic and rely on this function to be present in future releases.
-func Version() *version.Version {
- return parsedVersion
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/.copywrite.hcl b/vendor/github.com/hashicorp/hcl/v2/.copywrite.hcl
deleted file mode 100644
index 35eae082..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/.copywrite.hcl
+++ /dev/null
@@ -1,16 +0,0 @@
-schema_version = 1
-
-project {
- license = "MPL-2.0"
- copyright_year = 2014
-
- # (OPTIONAL) A list of globs that should not have copyright/license headers.
- # Supports doublestar glob patterns for more flexibility in defining which
- # files or folders should be ignored
- header_ignore = [
- "hclsyntax/fuzz/testdata/**",
- "hclwrite/fuzz/testdata/**",
- "json/fuzz/testdata/**",
- "specsuite/tests/**",
- ]
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md b/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md
deleted file mode 100644
index f81110dc..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/CHANGELOG.md
+++ /dev/null
@@ -1,340 +0,0 @@
-# HCL Changelog
-
-## v2.21.0 (June 19, 2024)
-
-### Enhancements
-
-* Introduce `ParseTraversalPartial`, which allows traversals that include the splat (`[*]`) index operator. ([#673](https://github.com/hashicorp/hcl/pull/673))
-* ext/dynblock: Now accepts marked values in `for_each`, and will transfer those marks (as much as technically possible) to values in the generated blocks. ([#679](https://github.com/hashicorp/hcl/pull/679))
-
-### Bugs Fixed
-
-* Expression evaluation will no longer panic if the splat operator is applied to an unknown value that has cty marks. ([#678](https://github.com/hashicorp/hcl/pull/678))
-
-## v2.20.1 (March 26, 2024)
-
-### Bugs Fixed
-
-* Return `ExprSyntaxError` when an invalid namespaced function is encountered during parsing ([#668](https://github.com/hashicorp/hcl/pull/668))
-
-### Internal
-
-* Standardize on only two value dumping/diffing libraries ([#669](https://github.com/hashicorp/hcl/pull/669))
-
-## v2.20.0 (February 29, 2024)
-
-### Enhancements
-
-* Support for namespaced functions ([#639](https://github.com/hashicorp/hcl/pull/639))
-
-### Bugs Fixed
-
-* ext/dynblock: if `iterator` is invalid return this error instead of consequential errors ([#656](https://github.com/hashicorp/hcl/pull/656))
-
-## v2.19.0 (October 16, 2023)
-
-### Enhancements
-
-* ext/dynblock: `dynblock.Expand` now supports an optional hook for calling applications to check and potentially veto (by returning error diagnostics) particular `for_each` values. The behavior is unchanged for callers that don't set the new option. ([#634](https://github.com/hashicorp/hcl/pull/634))
-
-### Bugs Fixed
-
-* hclsyntax: Further fixes for treatment of "marked" values in the conditional expression, and better tracking of refined values into the conditional expression results, building on the fixes from v2.18.1. ([#633](https://github.com/hashicorp/hcl/pull/633))
-
-## v2.18.1 (October 5, 2023)
-
-### Bugs Fixed
-
-* hclsyntax: Conditional expressions will no longer panic when one or both of their results are "marked", as is the case for situations like how HashiCorp Terraform tracks its concept of "sensitive values". ([#630](https://github.com/hashicorp/hcl/pull/630))
-
-## v2.18.0 (August 30, 2023)
-
-### Enhancements
-
-* HCL now uses the tables from Unicode 15 when performing string normalization and character segmentation. HCL was previously using the Unicode 13 tables.
-
- For calling applications where consistent Unicode support is important, consider also upgrading to Go 1.21 at the same time as adopting HCL v2.18.0 so that the standard library unicode tables (used for case folding, etc) will also be from Unicode 15.
-
-## v2.17.1 (August 30, 2023)
-
-### Enhancements
-
-* hclsyntax: When evaluating string templates that have a long known constant prefix, HCL will truncate the known prefix to avoid creating excessively-large refinements. String prefix refinements are intended primarily for relatively-short fixed prefixes, such as `https://` at the start of a URL known to use that scheme. ([#617](https://github.com/hashicorp/hcl/pull/617))
-* ext/tryfunc: The "try" and "can" functions now handle unknown values slightly more precisely, and so can return known values in more situations when given expressions referring to unknown symbols. ([#622](https://github.com/hashicorp/hcl/pull/622))
-
-### Bugs Fixed
-
-* ext/typeexpr: Will no longer try to refine unknown values of unknown type when dealing with a user-specified type constraint containing the `any` keyword, avoiding an incorrect panic at runtime. ([#625](https://github.com/hashicorp/hcl/pull/625))
-* ext/typeexpr: Now correctly handles attempts to declare the same object type attribute multiple times by returning an error. Previously this could potentially panic by creating an incoherent internal state. ([#624](https://github.com/hashicorp/hcl/pull/624))
-
-## v2.17.0 (May 31, 2023)
-
-### Enhancements
-
-* HCL now uses a newer version of the upstream `cty` library which has improved treatment of unknown values: it can now track additional optional information that reduces the range of an unknown value, which allows some operations against unknown values to return known or partially-known results. ([#590](https://github.com/hashicorp/hcl/pull/590))
-
- **Note:** This change effectively passes on [`cty`'s notion of backward compatibility](https://github.com/zclconf/go-cty/blob/main/COMPATIBILITY.md) whereby unknown values can become "more known" in later releases. In particular, if your caller is using `cty.Value.RawEquals` in its tests against the results of operations with unknown values then you may see those tests begin failing after upgrading, due to the values now being more "refined".
-
- If so, you should review the refinements with consideration to [the `cty` refinements docs](https://github.com/zclconf/go-cty/blob/7dcbae46a6f247e983efb1fa774d2bb68781a333/docs/refinements.md) and update your expected results to match only if the reported refinements seem correct for the given situation. The `RawEquals` method is intended only for making exact value comparisons in test cases, so main application code should not use it; use `Equals` instead for real logic, which will take refinements into account automatically.
-
-## v2.16.2 (March 9, 2023)
-
-### Bugs Fixed
-
-* ext/typeexpr: Verify type assumptions when applying default values, and ignore input values that do not match type assumptions. ([#594](https://github.com/hashicorp/hcl/pull/594))
-
-## v2.16.1 (February 13, 2023)
-
-### Bugs Fixed
-
-* hclsyntax: Report correct `Range.End` for `FunctionCall` with incomplete argument ([#588](https://github.com/hashicorp/hcl/pull/588))
-
-## v2.16.0 (January 30, 2023)
-
-### Enhancements
-
-* ext/typeexpr: Modify the `Defaults` functionality to implement additional flexibility. HCL will now upcast lists and sets into tuples, and maps into objects, when applying default values if the applied defaults cause the elements within a target collection to have differing types. Previously, this would have resulted in a panic, now HCL will return a modified overall type. ([#574](https://github.com/hashicorp/hcl/pull/574))
-
- Users should return to the advice provided by v2.14.0, and apply the go-cty convert functionality *after* setting defaults on a given `cty.Value`, rather than before.
-* hclfmt: Avoid rewriting unchanged files. ([#576](https://github.com/hashicorp/hcl/pull/576))
-* hclsyntax: Simplify the AST for certain string expressions. ([#584](https://github.com/hashicorp/hcl/pull/584))
-
-### Bugs Fixed
-
-* hclwrite: Fix data race in `formatSpaces`. ([#511](https://github.com/hashicorp/hcl/pull/511))
-
-## v2.15.0 (November 10, 2022)
-
-### Bugs Fixed
-
-* ext/typeexpr: Skip null objects when applying defaults. This prevents crashes when null objects are creating inside collections, and stops incomplete objects being created with only optional attributes set. ([#567](https://github.com/hashicorp/hcl/pull/567))
-* ext/typeexpr: Ensure default values do not have optional metadata attached. This prevents crashes when default values are inserted into concrete go-cty values that have also been stripped of their optional metadata. ([#568](https://github.com/hashicorp/hcl/pull/568))
-
-### Enhancements
-
-* ext/typeexpr: With the [go-cty](https://github.com/zclconf/go-cty) upstream depenendency updated to v1.12.0, the `Defaults` struct and associated functions can apply additional and more flexible 'unsafe' conversions (examples include tuples into collections such as lists and sets, and additional safety around null and dynamic values). ([#564](https://github.com/hashicorp/hcl/pull/564))
-* ext/typeexpr: With the [go-cty](https://github.com/zclconf/go-cty) upstream depenendency updated to v1.12.0, users should now apply the go-cty convert functionality *before* setting defaults on a given `cty.Value`, rather than after, if they require a specific `cty.Type`. ([#564](https://github.com/hashicorp/hcl/pull/564))
-
-## v2.14.1 (September 23, 2022)
-
-### Bugs Fixed
-
-* ext/typeexpr: Type convert defaults for optional object attributes when applying them. This prevents crashes in certain cases when the objects in question are part of a collection. ([#555](https://github.com/hashicorp/hcl/pull/555))
-
-## v2.14.0 (September 1, 2022)
-
-### Enhancements
-
-* ext/typeexpr: Added support for optional object attributes to `TypeConstraint`. Attributes can be wrapped in the special `optional(…)` modifier, allowing the attribute to be omitted while still meeting the type constraint. For more information, [cty's documentation on conversion between object types](https://github.com/zclconf/go-cty/blob/main/docs/convert.md#conversion-between-object-types). ([#549](https://github.com/hashicorp/hcl/pull/549))
-* ext/typeexpr: New function: `TypeConstraintWithDefaults`. In this mode, the `optional(…)` modifier accepts a second argument which can be used as the default value for omitted object attributes. The function returns both a `cty.Type` and associated `Defaults`, the latter of which has an `Apply` method to apply defaults to a given value. ([#549](https://github.com/hashicorp/hcl/pull/549))
-
-## v2.13.0 (June 22, 2022)
-
-### Enhancements
-
-* hcl: `hcl.Diagnostic` now has an additional field `Extra` which is intended for carrying arbitrary supporting data ("extra information") related to the diagnostic message, intended to allow diagnostic renderers to optionally tailor the presentation of messages for particular situations. ([#539](https://github.com/hashicorp/hcl/pull/539))
-* hclsyntax: When an error occurs during a function call, the returned diagnostics will include _extra information_ (as described in the previous point) about which function was being called and, if the message is about an error returned by the function itself, that raw `error` value without any post-processing. ([#539](https://github.com/hashicorp/hcl/pull/539))
-
-### Bugs Fixed
-
-* hclwrite: Fixed a potential data race for any situation where `hclwrite.Format` runs concurrently with itself. ([#534](https://github.com/hashicorp/hcl/pull/534))
-
-## v2.12.0 (April 22, 2022)
-
-### Enhancements
-
-* hclsyntax: Evaluation of conditional expressions will now produce more precise error messages about inconsistencies between the types of the true and false result expressions, particularly in cases where both are of the same structural type kind but differ in their nested elements. ([#530](https://github.com/hashicorp/hcl/pull/530))
-* hclsyntax: The lexer will no longer allocate a small object on the heap for each token. Instead, in that situation it will allocate only when needed to return a diagnostic message with source location information. ([#490](https://github.com/hashicorp/hcl/pull/490))
-* hclwrite: New functions `TokensForTuple`, `TokensForObject`, and `TokensForFunctionCall` allow for more easily constructing the three constructs which are supported for static analysis and which HCL-based languages typically use in contexts where an expression is used only for its syntax, and not evaluated to produce a real value. For example, these new functions together are sufficient to construct all valid type constraint expressions from [the Type Expressions Extension](./ext/typeexpr/), which is the basis of variable type constraints in the Terraform language at the time of writing. ([#502](https://github.com/hashicorp/hcl/pull/502))
-* json: New functions `IsJSONExpression` and `IsJSONBody` to determine if a given expression or body was created by the JSON syntax parser. In normal situations it's better not to worry about what syntax a particular expression/body originated in, but this can be useful in some trickier cases where an application needs to shim for backwards-compatibility or for static analysis that needs to have special handling of the JSON syntax's embedded expression/template conventions. ([#524](https://github.com/hashicorp/hcl/pull/524))
-
-### Bugs Fixed
-
-* gohcl: Fix docs about supported types for blocks. ([#507](https://github.com/hashicorp/hcl/pull/507))
-
-## v2.11.1 (December 1, 2021)
-
-### Bugs Fixed
-
-* hclsyntax: The type for an upgraded unknown value with a splat expression cannot be known ([#495](https://github.com/hashicorp/hcl/pull/495))
-
-## v2.11.0 (December 1, 2021)
-
-### Enhancements
-
-* hclsyntax: Various error messages related to unexpectedly reaching end of file while parsing a delimited subtree will now return specialized messages describing the opening tokens as "unclosed", instead of returning a generic diagnostic that just happens to refer to the empty source range at the end of the file. This gives better feedback when error messages are being presented alongside a source code snippet, as is common in HCL-based applications, because it shows which innermost container the parser was working on when it encountered the error. ([#492](https://github.com/hashicorp/hcl/pull/492))
-
-### Bugs Fixed
-
-* hclsyntax: Upgrading an unknown single value to a list using a splat expression must return unknown ([#493](https://github.com/hashicorp/hcl/pull/493))
-
-## v2.10.1 (July 21, 2021)
-
-* dynblock: Decode unknown dynamic blocks in order to obtain any diagnostics even though the decoded value is not used ([#476](https://github.com/hashicorp/hcl/pull/476))
-* hclsyntax: Calling functions is now more robust in the face of an incorrectly-implemented function which returns a `function.ArgError` whose argument index is out of range for the length of the arguments. Previously this would often lead to a panic, but now it'll return a less-precice error message instead. Functions that return out-of-bounds argument indices still ought to be fixed so that the resulting error diagnostics can be as precise as possible. ([#472](https://github.com/hashicorp/hcl/pull/472))
-* hclsyntax: Ensure marks on unknown values are maintained when processing string templates. ([#478](https://github.com/hashicorp/hcl/pull/478))
-* hcl: Improved error messages for various common error situtions in `hcl.Index` and `hcl.GetAttr`. These are part of the implementation of indexing and attribute lookup in the native syntax expression language too, so the new error messages will apply to problems using those operators. ([#474](https://github.com/hashicorp/hcl/pull/474))
-
-## v2.10.0 (April 20, 2021)
-
-### Enhancements
-
-* dynblock,hcldec: Using dynblock in conjunction with hcldec can now decode blocks with unknown dynamic for_each arguments as entirely unknown values ([#461](https://github.com/hashicorp/hcl/pull/461))
-* hclsyntax: Some syntax errors during parsing of the inside of `${` ... `}` template interpolation sequences will now produce an extra hint message about the need to escape as `$${` when trying to include interpolation syntax for other languages like shell scripting, AWS IAM policies, etc. ([#462](https://github.com/hashicorp/hcl/pull/462))
-
-## v2.9.1 (March 10, 2021)
-
-### Bugs Fixed
-
-* hclsyntax: Fix panic for marked index value. ([#451](https://github.com/hashicorp/hcl/pull/451))
-
-## v2.9.0 (February 23, 2021)
-
-### Enhancements
-
-* HCL's native syntax and JSON scanners -- and thus all of the other parsing components that build on top of them -- are now using Unicode 13 rules for text segmentation when counting text characters for the purpose of reporting source location columns. Previously HCL was using Unicode 12. Unicode 13 still uses the same algorithm but includes some additions to the character tables the algorithm is defined in terms of, to properly categorize new characters defined in Unicode 13.
-
-## v2.8.2 (January 6, 2021)
-
-### Bugs Fixed
-
-* hclsyntax: Fix panic for marked collection splat. ([#436](https://github.com/hashicorp/hcl/pull/436))
-* hclsyntax: Fix panic for marked template loops. ([#437](https://github.com/hashicorp/hcl/pull/437))
-* hclsyntax: Fix `for` expression marked conditional. ([#438](https://github.com/hashicorp/hcl/pull/438))
-* hclsyntax: Mark objects with keys that are sensitive. ([#440](https://github.com/hashicorp/hcl/pull/440))
-
-## v2.8.1 (December 17, 2020)
-
-### Bugs Fixed
-
-* hclsyntax: Fix panic when expanding marked function arguments. ([#429](https://github.com/hashicorp/hcl/pull/429))
-* hclsyntax: Error when attempting to use a marked value as an object key. ([#434](https://github.com/hashicorp/hcl/pull/434))
-* hclsyntax: Error when attempting to use a marked value as an object key in expressions. ([#433](https://github.com/hashicorp/hcl/pull/433))
-
-## v2.8.0 (December 7, 2020)
-
-### Enhancements
-
-* hclsyntax: Expression grouping parentheses will now be reflected by an explicit node in the AST, whereas before they were only considered during parsing. ([#426](https://github.com/hashicorp/hcl/pull/426))
-
-### Bugs Fixed
-
-* hclwrite: The parser will now correctly include the `(` and `)` tokens when an expression is surrounded by parentheses. Previously it would incorrectly recognize those tokens as being extraneous tokens outside of the expression. ([#426](https://github.com/hashicorp/hcl/pull/426))
-* hclwrite: The formatter will now remove (rather than insert) spaces between the `!` (unary boolean "not") operator and its subsequent operand. ([#403](https://github.com/hashicorp/hcl/pull/403))
-* hclsyntax: Unmark conditional values in expressions before checking their truthfulness ([#427](https://github.com/hashicorp/hcl/pull/427))
-
-## v2.7.2 (November 30, 2020)
-
-### Bugs Fixed
-
-* gohcl: Fix panic when decoding into type containing value slices. ([#335](https://github.com/hashicorp/hcl/pull/335))
-* hclsyntax: The unusual expression `null[*]` was previously always returning an unknown value, even though the rules for `[*]` normally call for it to return an empty tuple when applied to a null. As well as being a surprising result, it was particularly problematic because it violated the rule that a calling application may assume that an expression result will always be known unless the application itself introduces unknown values via the evaluation context. `null[*]` will now produce an empty tuple. ([#416](https://github.com/hashicorp/hcl/pull/416))
-* hclsyntax: Fix panic when traversing a list, tuple, or map with cty "marks" ([#424](https://github.com/hashicorp/hcl/pull/424))
-
-## v2.7.1 (November 18, 2020)
-
-### Bugs Fixed
-
-* hclwrite: Correctly handle blank quoted string block labels, instead of dropping them ([#422](https://github.com/hashicorp/hcl/pull/422))
-
-## v2.7.0 (October 14, 2020)
-
-### Enhancements
-
-* json: There is a new function `ParseWithStartPos`, which allows overriding the starting position for parsing in case the given JSON bytes are a fragment of a larger document, such as might happen when decoding with `encoding/json` into a `json.RawMessage`. ([#389](https://github.com/hashicorp/hcl/pull/389))
-* json: There is a new function `ParseExpression`, which allows parsing a JSON string directly in expression mode, whereas previously it was only possible to parse a JSON string in body mode. ([#381](https://github.com/hashicorp/hcl/pull/381))
-* hclwrite: `Block` type now supports `SetType` and `SetLabels`, allowing surgical changes to the type and labels of an existing block without having to reconstruct the entire block. ([#340](https://github.com/hashicorp/hcl/pull/340))
-
-### Bugs Fixed
-
-* hclsyntax: Fix confusing error message for bitwise OR operator ([#380](https://github.com/hashicorp/hcl/pull/380))
-* hclsyntax: Several bug fixes for using HCL with values containing cty "marks" ([#404](https://github.com/hashicorp/hcl/pull/404), [#406](https://github.com/hashicorp/hcl/pull/404), [#407](https://github.com/hashicorp/hcl/pull/404))
-
-## v2.6.0 (June 4, 2020)
-
-### Enhancements
-
-* hcldec: Add a new `Spec`, `ValidateSpec`, which allows custom validation of values at decode-time. ([#387](https://github.com/hashicorp/hcl/pull/387))
-
-### Bugs Fixed
-
-* hclsyntax: Fix panic with combination of sequences and null arguments ([#386](https://github.com/hashicorp/hcl/pull/386))
-* hclsyntax: Fix handling of unknown values and sequences ([#386](https://github.com/hashicorp/hcl/pull/386))
-
-## v2.5.1 (May 14, 2020)
-
-### Bugs Fixed
-
-* hclwrite: handle legacy dot access of numeric indexes. ([#369](https://github.com/hashicorp/hcl/pull/369))
-* hclwrite: Fix panic for dotted full splat (`foo.*`) ([#374](https://github.com/hashicorp/hcl/pull/374))
-
-## v2.5.0 (May 6, 2020)
-
-### Enhancements
-
-* hclwrite: Generate multi-line objects and maps. ([#372](https://github.com/hashicorp/hcl/pull/372))
-
-## v2.4.0 (Apr 13, 2020)
-
-### Enhancements
-
-* The Unicode data tables that HCL uses to produce user-perceived "column" positions in diagnostics and other source ranges are now updated to Unicode 12.0.0, which will cause HCL to produce more accurate column numbers for combining characters introduced to Unicode since Unicode 9.0.0.
-
-### Bugs Fixed
-
-* json: Fix panic when parsing malformed JSON. ([#358](https://github.com/hashicorp/hcl/pull/358))
-
-## v2.3.0 (Jan 3, 2020)
-
-### Enhancements
-
-* ext/tryfunc: Optional functions `try` and `can` to include in your `hcl.EvalContext` when evaluating expressions, which allow users to make decisions based on the success of expressions. ([#330](https://github.com/hashicorp/hcl/pull/330))
-* ext/typeexpr: Now has an optional function `convert` which you can include in your `hcl.EvalContext` when evaluating expressions, allowing users to convert values to specific type constraints using the type constraint expression syntax. ([#330](https://github.com/hashicorp/hcl/pull/330))
-* ext/typeexpr: A new `cty` capsule type `typeexpr.TypeConstraintType` which, when used as either a type constraint for a function parameter or as a type constraint for a `hcldec` attribute specification will cause the given expression to be interpreted as a type constraint expression rather than a value expression. ([#330](https://github.com/hashicorp/hcl/pull/330))
-* ext/customdecode: An optional extension that allows overriding the static decoding behavior for expressions either in function arguments or `hcldec` attribute specifications. ([#330](https://github.com/hashicorp/hcl/pull/330))
-* ext/customdecode: New `cty` capsuletypes `customdecode.ExpressionType` and `customdecode.ExpressionClosureType` which, when used as either a type constraint for a function parameter or as a type constraint for a `hcldec` attribute specification will cause the given expression (and, for the closure type, also the `hcl.EvalContext` it was evaluated in) to be captured for later analysis, rather than immediately evaluated. ([#330](https://github.com/hashicorp/hcl/pull/330))
-
-## v2.2.0 (Dec 11, 2019)
-
-### Enhancements
-
-* hcldec: Attribute evaluation (as part of `AttrSpec` or `BlockAttrsSpec`) now captures expression evaluation metadata in any errors it produces during type conversions, allowing for better feedback in calling applications that are able to make use of this metadata when printing diagnostic messages. ([#329](https://github.com/hashicorp/hcl/pull/329))
-
-### Bugs Fixed
-
-* hclsyntax: `IndexExpr`, `SplatExpr`, and `RelativeTraversalExpr` will now report a source range that covers all of their child expression nodes. Previously they would report only the operator part, such as `["foo"]`, `[*]`, or `.foo`, which was problematic for callers using source ranges for code analysis. ([#328](https://github.com/hashicorp/hcl/pull/328))
-* hclwrite: Parser will no longer panic when the input includes index, splat, or relative traversal syntax. ([#328](https://github.com/hashicorp/hcl/pull/328))
-
-## v2.1.0 (Nov 19, 2019)
-
-### Enhancements
-
-* gohcl: When decoding into a struct value with some fields already populated, those values will be retained if not explicitly overwritten in the given HCL body, with similar overriding/merging behavior as `json.Unmarshal` in the Go standard library.
-* hclwrite: New interface to set the expression for an attribute to be a raw token sequence, with no special processing. This has some caveats, so if you intend to use it please refer to the godoc comments. ([#320](https://github.com/hashicorp/hcl/pull/320))
-
-### Bugs Fixed
-
-* hclwrite: The `Body.Blocks` method was returing the blocks in an indefined order, rather than preserving the order of declaration in the source input. ([#313](https://github.com/hashicorp/hcl/pull/313))
-* hclwrite: The `TokensForTraversal` function (and thus in turn the `Body.SetAttributeTraversal` method) was not correctly handling index steps in traversals, and thus producing invalid results. ([#319](https://github.com/hashicorp/hcl/pull/319))
-
-## v2.0.0 (Oct 2, 2019)
-
-Initial release of HCL 2, which is a new implementating combining the HCL 1
-language with the HIL expression language to produce a single language
-supporting both nested configuration structures and arbitrary expressions.
-
-HCL 2 has an entirely new Go library API and so is _not_ a drop-in upgrade
-relative to HCL 1. It's possible to import both versions of HCL into a single
-program using Go's _semantic import versioning_ mechanism:
-
-```
-import (
- hcl1 "github.com/hashicorp/hcl"
- hcl2 "github.com/hashicorp/hcl/v2"
-)
-```
-
----
-
-Prior to v2.0.0 there was not a curated changelog. Consult the git history
-from the latest v1.x.x tag for information on the changes to HCL 1.
diff --git a/vendor/github.com/hashicorp/hcl/v2/LICENSE b/vendor/github.com/hashicorp/hcl/v2/LICENSE
deleted file mode 100644
index e25da5fa..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/LICENSE
+++ /dev/null
@@ -1,355 +0,0 @@
-Copyright (c) 2014 HashiCorp, Inc.
-
-Mozilla Public License, version 2.0
-
-1. Definitions
-
-1.1. “Contributor”
-
- means each individual or legal entity that creates, contributes to the
- creation of, or owns Covered Software.
-
-1.2. “Contributor Version”
-
- means the combination of the Contributions of others (if any) used by a
- Contributor and that particular Contributor’s Contribution.
-
-1.3. “Contribution”
-
- means Covered Software of a particular Contributor.
-
-1.4. “Covered Software”
-
- means Source Code Form to which the initial Contributor has attached the
- notice in Exhibit A, the Executable Form of such Source Code Form, and
- Modifications of such Source Code Form, in each case including portions
- thereof.
-
-1.5. “Incompatible With Secondary Licenses”
- means
-
- a. that the initial Contributor has attached the notice described in
- Exhibit B to the Covered Software; or
-
- b. that the Covered Software was made available under the terms of version
- 1.1 or earlier of the License, but not also under the terms of a
- Secondary License.
-
-1.6. “Executable Form”
-
- means any form of the work other than Source Code Form.
-
-1.7. “Larger Work”
-
- means a work that combines Covered Software with other material, in a separate
- file or files, that is not Covered Software.
-
-1.8. “License”
-
- means this document.
-
-1.9. “Licensable”
-
- means having the right to grant, to the maximum extent possible, whether at the
- time of the initial grant or subsequently, any and all of the rights conveyed by
- this License.
-
-1.10. “Modifications”
-
- means any of the following:
-
- a. any file in Source Code Form that results from an addition to, deletion
- from, or modification of the contents of Covered Software; or
-
- b. any new file in Source Code Form that contains any Covered Software.
-
-1.11. “Patent Claims” of a Contributor
-
- means any patent claim(s), including without limitation, method, process,
- and apparatus claims, in any patent Licensable by such Contributor that
- would be infringed, but for the grant of the License, by the making,
- using, selling, offering for sale, having made, import, or transfer of
- either its Contributions or its Contributor Version.
-
-1.12. “Secondary License”
-
- means either the GNU General Public License, Version 2.0, the GNU Lesser
- General Public License, Version 2.1, the GNU Affero General Public
- License, Version 3.0, or any later versions of those licenses.
-
-1.13. “Source Code Form”
-
- means the form of the work preferred for making modifications.
-
-1.14. “You” (or “Your”)
-
- means an individual or a legal entity exercising rights under this
- License. For legal entities, “You” includes any entity that controls, is
- controlled by, or is under common control with You. For purposes of this
- definition, “control” means (a) the power, direct or indirect, to cause
- the direction or management of such entity, whether by contract or
- otherwise, or (b) ownership of more than fifty percent (50%) of the
- outstanding shares or beneficial ownership of such entity.
-
-
-2. License Grants and Conditions
-
-2.1. Grants
-
- Each Contributor hereby grants You a world-wide, royalty-free,
- non-exclusive license:
-
- a. under intellectual property rights (other than patent or trademark)
- Licensable by such Contributor to use, reproduce, make available,
- modify, display, perform, distribute, and otherwise exploit its
- Contributions, either on an unmodified basis, with Modifications, or as
- part of a Larger Work; and
-
- b. under Patent Claims of such Contributor to make, use, sell, offer for
- sale, have made, import, and otherwise transfer either its Contributions
- or its Contributor Version.
-
-2.2. Effective Date
-
- The licenses granted in Section 2.1 with respect to any Contribution become
- effective for each Contribution on the date the Contributor first distributes
- such Contribution.
-
-2.3. Limitations on Grant Scope
-
- The licenses granted in this Section 2 are the only rights granted under this
- License. No additional rights or licenses will be implied from the distribution
- or licensing of Covered Software under this License. Notwithstanding Section
- 2.1(b) above, no patent license is granted by a Contributor:
-
- a. for any code that a Contributor has removed from Covered Software; or
-
- b. for infringements caused by: (i) Your and any other third party’s
- modifications of Covered Software, or (ii) the combination of its
- Contributions with other software (except as part of its Contributor
- Version); or
-
- c. under Patent Claims infringed by Covered Software in the absence of its
- Contributions.
-
- This License does not grant any rights in the trademarks, service marks, or
- logos of any Contributor (except as may be necessary to comply with the
- notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
- No Contributor makes additional grants as a result of Your choice to
- distribute the Covered Software under a subsequent version of this License
- (see Section 10.2) or under the terms of a Secondary License (if permitted
- under the terms of Section 3.3).
-
-2.5. Representation
-
- Each Contributor represents that the Contributor believes its Contributions
- are its original creation(s) or it has sufficient rights to grant the
- rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
- This License is not intended to limit any rights You have under applicable
- copyright doctrines of fair use, fair dealing, or other equivalents.
-
-2.7. Conditions
-
- Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
- Section 2.1.
-
-
-3. Responsibilities
-
-3.1. Distribution of Source Form
-
- All distribution of Covered Software in Source Code Form, including any
- Modifications that You create or to which You contribute, must be under the
- terms of this License. You must inform recipients that the Source Code Form
- of the Covered Software is governed by the terms of this License, and how
- they can obtain a copy of this License. You may not attempt to alter or
- restrict the recipients’ rights in the Source Code Form.
-
-3.2. Distribution of Executable Form
-
- If You distribute Covered Software in Executable Form then:
-
- a. such Covered Software must also be made available in Source Code Form,
- as described in Section 3.1, and You must inform recipients of the
- Executable Form how they can obtain a copy of such Source Code Form by
- reasonable means in a timely manner, at a charge no more than the cost
- of distribution to the recipient; and
-
- b. You may distribute such Executable Form under the terms of this License,
- or sublicense it under different terms, provided that the license for
- the Executable Form does not attempt to limit or alter the recipients’
- rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
- You may create and distribute a Larger Work under terms of Your choice,
- provided that You also comply with the requirements of this License for the
- Covered Software. If the Larger Work is a combination of Covered Software
- with a work governed by one or more Secondary Licenses, and the Covered
- Software is not Incompatible With Secondary Licenses, this License permits
- You to additionally distribute such Covered Software under the terms of
- such Secondary License(s), so that the recipient of the Larger Work may, at
- their option, further distribute the Covered Software under the terms of
- either this License or such Secondary License(s).
-
-3.4. Notices
-
- You may not remove or alter the substance of any license notices (including
- copyright notices, patent notices, disclaimers of warranty, or limitations
- of liability) contained within the Source Code Form of the Covered
- Software, except that You may alter any license notices to the extent
- required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
- You may choose to offer, and to charge a fee for, warranty, support,
- indemnity or liability obligations to one or more recipients of Covered
- Software. However, You may do so only on Your own behalf, and not on behalf
- of any Contributor. You must make it absolutely clear that any such
- warranty, support, indemnity, or liability obligation is offered by You
- alone, and You hereby agree to indemnify every Contributor for any
- liability incurred by such Contributor as a result of warranty, support,
- indemnity or liability terms You offer. You may include additional
- disclaimers of warranty and limitations of liability specific to any
- jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
-
- If it is impossible for You to comply with any of the terms of this License
- with respect to some or all of the Covered Software due to statute, judicial
- order, or regulation then You must: (a) comply with the terms of this License
- to the maximum extent possible; and (b) describe the limitations and the code
- they affect. Such description must be placed in a text file included with all
- distributions of the Covered Software under this License. Except to the
- extent prohibited by statute or regulation, such description must be
- sufficiently detailed for a recipient of ordinary skill to be able to
- understand it.
-
-5. Termination
-
-5.1. The rights granted under this License will terminate automatically if You
- fail to comply with any of its terms. However, if You become compliant,
- then the rights granted under this License from a particular Contributor
- are reinstated (a) provisionally, unless and until such Contributor
- explicitly and finally terminates Your grants, and (b) on an ongoing basis,
- if such Contributor fails to notify You of the non-compliance by some
- reasonable means prior to 60 days after You have come back into compliance.
- Moreover, Your grants from a particular Contributor are reinstated on an
- ongoing basis if such Contributor notifies You of the non-compliance by
- some reasonable means, this is the first time You have received notice of
- non-compliance with this License from such Contributor, and You become
- compliant prior to 30 days after Your receipt of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
- infringement claim (excluding declaratory judgment actions, counter-claims,
- and cross-claims) alleging that a Contributor Version directly or
- indirectly infringes any patent, then the rights granted to You by any and
- all Contributors for the Covered Software under Section 2.1 of this License
- shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
- license agreements (excluding distributors and resellers) which have been
- validly granted by You or Your distributors under this License prior to
- termination shall survive termination.
-
-6. Disclaimer of Warranty
-
- Covered Software is provided under this License on an “as is” basis, without
- warranty of any kind, either expressed, implied, or statutory, including,
- without limitation, warranties that the Covered Software is free of defects,
- merchantable, fit for a particular purpose or non-infringing. The entire
- risk as to the quality and performance of the Covered Software is with You.
- Should any Covered Software prove defective in any respect, You (not any
- Contributor) assume the cost of any necessary servicing, repair, or
- correction. This disclaimer of warranty constitutes an essential part of this
- License. No use of any Covered Software is authorized under this License
- except under this disclaimer.
-
-7. Limitation of Liability
-
- Under no circumstances and under no legal theory, whether tort (including
- negligence), contract, or otherwise, shall any Contributor, or anyone who
- distributes Covered Software as permitted above, be liable to You for any
- direct, indirect, special, incidental, or consequential damages of any
- character including, without limitation, damages for lost profits, loss of
- goodwill, work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses, even if such party shall have been
- informed of the possibility of such damages. This limitation of liability
- shall not apply to liability for death or personal injury resulting from such
- party’s negligence to the extent applicable law prohibits such limitation.
- Some jurisdictions do not allow the exclusion or limitation of incidental or
- consequential damages, so this exclusion and limitation may not apply to You.
-
-8. Litigation
-
- Any litigation relating to this License may be brought only in the courts of
- a jurisdiction where the defendant maintains its principal place of business
- and such litigation shall be governed by laws of that jurisdiction, without
- reference to its conflict-of-law provisions. Nothing in this Section shall
- prevent a party’s ability to bring cross-claims or counter-claims.
-
-9. Miscellaneous
-
- This License represents the complete agreement concerning the subject matter
- hereof. If any provision of this License is held to be unenforceable, such
- provision shall be reformed only to the extent necessary to make it
- enforceable. Any law or regulation which provides that the language of a
- contract shall be construed against the drafter shall not be used to construe
- this License against a Contributor.
-
-
-10. Versions of the License
-
-10.1. New Versions
-
- Mozilla Foundation is the license steward. Except as provided in Section
- 10.3, no one other than the license steward has the right to modify or
- publish new versions of this License. Each version will be given a
- distinguishing version number.
-
-10.2. Effect of New Versions
-
- You may distribute the Covered Software under the terms of the version of
- the License under which You originally received the Covered Software, or
- under the terms of any subsequent version published by the license
- steward.
-
-10.3. Modified Versions
-
- If you create software not governed by this License, and you want to
- create a new license for such software, you may create and use a modified
- version of this License if you rename the license and remove any
- references to the name of the license steward (except to note that such
- modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
- If You choose to distribute Source Code Form that is Incompatible With
- Secondary Licenses under the terms of this version of the License, the
- notice described in Exhibit B of this License must be attached.
-
-Exhibit A - Source Code Form License Notice
-
- This Source Code Form is subject to the
- terms of the Mozilla Public License, v.
- 2.0. If a copy of the MPL was not
- distributed with this file, You can
- obtain one at
- http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular file, then
-You may include the notice in a location (such as a LICENSE file in a relevant
-directory) where a recipient would be likely to look for such a notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - “Incompatible With Secondary Licenses” Notice
-
- This Source Code Form is “Incompatible
- With Secondary Licenses”, as defined by
- the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/hcl/v2/Makefile b/vendor/github.com/hashicorp/hcl/v2/Makefile
deleted file mode 100644
index 675178e7..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/Makefile
+++ /dev/null
@@ -1,18 +0,0 @@
-fmtcheck:
- "$(CURDIR)/scripts/gofmtcheck.sh"
-
-fmtfix:
- gofmt -w ./
-
-vetcheck:
- go vet ./...
-
-copyrightcheck:
- go run github.com/hashicorp/copywrite@latest headers --plan
-
-copyrightfix:
- go run github.com/hashicorp/copywrite@latest headers
-
-check: copyrightcheck vetcheck fmtcheck
-
-fix: copyrightfix fmtfix
diff --git a/vendor/github.com/hashicorp/hcl/v2/README.md b/vendor/github.com/hashicorp/hcl/v2/README.md
deleted file mode 100644
index 9af736c9..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/README.md
+++ /dev/null
@@ -1,219 +0,0 @@
-# HCL
-
-HCL is a toolkit for creating structured configuration languages that are
-both human- and machine-friendly, for use with command-line tools.
-Although intended to be generally useful, it is primarily targeted
-towards devops tools, servers, etc.
-
-> **NOTE:** This is major version 2 of HCL, whose Go API is incompatible with
-> major version 1. Both versions are available for selection in Go Modules
-> projects. HCL 2 _cannot_ be imported from Go projects that are not using Go Modules. For more information, see
-> [our version selection guide](https://github.com/hashicorp/hcl/wiki/Version-Selection).
-
-HCL has both a _native syntax_, intended to be pleasant to read and write for
-humans, and a JSON-based variant that is easier for machines to generate
-and parse.
-
-The HCL native syntax is inspired by [libucl](https://github.com/vstakhov/libucl),
-[nginx configuration](http://nginx.org/en/docs/beginners_guide.html#conf_structure),
-and others.
-
-It includes an expression syntax that allows basic inline computation and,
-with support from the calling application, use of variables and functions
-for more dynamic configuration languages.
-
-HCL provides a set of constructs that can be used by a calling application to
-construct a configuration language. The application defines which attribute
-names and nested block types are expected, and HCL parses the configuration
-file, verifies that it conforms to the expected structure, and returns
-high-level objects that the application can use for further processing.
-
-```go
-package main
-
-import (
- "log"
-
- "github.com/hashicorp/hcl/v2/hclsimple"
-)
-
-type Config struct {
- IOMode string `hcl:"io_mode"`
- Service ServiceConfig `hcl:"service,block"`
-}
-
-type ServiceConfig struct {
- Protocol string `hcl:"protocol,label"`
- Type string `hcl:"type,label"`
- ListenAddr string `hcl:"listen_addr"`
- Processes []ProcessConfig `hcl:"process,block"`
-}
-
-type ProcessConfig struct {
- Type string `hcl:"type,label"`
- Command []string `hcl:"command"`
-}
-
-func main() {
- var config Config
- err := hclsimple.DecodeFile("config.hcl", nil, &config)
- if err != nil {
- log.Fatalf("Failed to load configuration: %s", err)
- }
- log.Printf("Configuration is %#v", config)
-}
-```
-
-A lower-level API is available for applications that need more control over
-the parsing, decoding, and evaluation of configuration. For more information,
-see [the package documentation](https://pkg.go.dev/github.com/hashicorp/hcl/v2).
-
-## Why?
-
-Newcomers to HCL often ask: why not JSON, YAML, etc?
-
-Whereas JSON and YAML are formats for serializing data structures, HCL is
-a syntax and API specifically designed for building structured configuration
-formats.
-
-HCL attempts to strike a compromise between generic serialization formats
-such as JSON and configuration formats built around full programming languages
-such as Ruby. HCL syntax is designed to be easily read and written by humans,
-and allows _declarative_ logic to permit its use in more complex applications.
-
-HCL is intended as a base syntax for configuration formats built
-around key-value pairs and hierarchical blocks whose structure is well-defined
-by the calling application, and this definition of the configuration structure
-allows for better error messages and more convenient definition within the
-calling application.
-
-It can't be denied that JSON is very convenient as a _lingua franca_
-for interoperability between different pieces of software. Because of this,
-HCL defines a common configuration model that can be parsed from either its
-native syntax or from a well-defined equivalent JSON structure. This allows
-configuration to be provided as a mixture of human-authored configuration
-files in the native syntax and machine-generated files in JSON.
-
-## Information Model and Syntax
-
-HCL is built around two primary concepts: _attributes_ and _blocks_. In
-native syntax, a configuration file for a hypothetical application might look
-something like this:
-
-```hcl
-io_mode = "async"
-
-service "http" "web_proxy" {
- listen_addr = "127.0.0.1:8080"
-
- process "main" {
- command = ["/usr/local/bin/awesome-app", "server"]
- }
-
- process "mgmt" {
- command = ["/usr/local/bin/awesome-app", "mgmt"]
- }
-}
-```
-
-The JSON equivalent of this configuration is the following:
-
-```json
-{
- "io_mode": "async",
- "service": {
- "http": {
- "web_proxy": {
- "listen_addr": "127.0.0.1:8080",
- "process": {
- "main": {
- "command": ["/usr/local/bin/awesome-app", "server"]
- },
- "mgmt": {
- "command": ["/usr/local/bin/awesome-app", "mgmt"]
- },
- }
- }
- }
- }
-}
-```
-
-Regardless of which syntax is used, the API within the calling application
-is the same. It can either work directly with the low-level attributes and
-blocks, for more advanced use-cases, or it can use one of the _decoder_
-packages to declaratively extract into either Go structs or dynamic value
-structures.
-
-Attribute values can be expressions as well as just literal values:
-
-```hcl
-# Arithmetic with literals and application-provided variables
-sum = 1 + addend
-
-# String interpolation and templates
-message = "Hello, ${name}!"
-
-# Application-provided functions
-shouty_message = upper(message)
-```
-
-Although JSON syntax doesn't permit direct use of expressions, the interpolation
-syntax allows use of arbitrary expressions within JSON strings:
-
-```json
-{
- "sum": "${1 + addend}",
- "message": "Hello, ${name}!",
- "shouty_message": "${upper(message)}"
-}
-```
-
-For more information, see the detailed specifications:
-
-* [Syntax-agnostic Information Model](spec.md)
-* [HCL Native Syntax](hclsyntax/spec.md)
-* [JSON Representation](json/spec.md)
-
-## Changes in 2.0
-
-Version 2.0 of HCL combines the features of HCL 1.0 with those of the
-interpolation language HIL to produce a single configuration language that
-supports arbitrary expressions.
-
-This new version has a completely new parser and Go API, with no direct
-migration path. Although the syntax is similar, the implementation takes some
-very different approaches to improve on some "rough edges" that existed with
-the original implementation and to allow for more robust error handling.
-
-It's possible to import both HCL 1 and HCL 2 into the same program using Go's
-_semantic import versioning_ mechanism:
-
-```go
-import (
- hcl1 "github.com/hashicorp/hcl"
- hcl2 "github.com/hashicorp/hcl/v2"
-)
-```
-
-## Acknowledgements
-
-HCL was heavily inspired by [libucl](https://github.com/vstakhov/libucl),
-by [Vsevolod Stakhov](https://github.com/vstakhov).
-
-HCL and HIL originate in [HashiCorp Terraform](https://terraform.io/),
-with the original parsers for each written by
-[Mitchell Hashimoto](https://github.com/mitchellh).
-
-The original HCL parser was ported to pure Go (from yacc) by
-[Fatih Arslan](https://github.com/fatih). The structure-related portions of
-the new native syntax parser build on that work.
-
-The original HIL parser was ported to pure Go (from yacc) by
-[Martin Atkins](https://github.com/apparentlymart). The expression-related
-portions of the new native syntax parser build on that work.
-
-HCL 2, which merged the original HCL and HIL languages into this single new
-language, builds on design and prototyping work by
-[Martin Atkins](https://github.com/apparentlymart) in
-[zcl](https://github.com/zclconf/go-zcl).
diff --git a/vendor/github.com/hashicorp/hcl/v2/diagnostic.go b/vendor/github.com/hashicorp/hcl/v2/diagnostic.go
deleted file mode 100644
index 578f81a2..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/diagnostic.go
+++ /dev/null
@@ -1,189 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hcl
-
-import (
- "fmt"
-)
-
-// DiagnosticSeverity represents the severity of a diagnostic.
-type DiagnosticSeverity int
-
-const (
- // DiagInvalid is the invalid zero value of DiagnosticSeverity
- DiagInvalid DiagnosticSeverity = iota
-
- // DiagError indicates that the problem reported by a diagnostic prevents
- // further progress in parsing and/or evaluating the subject.
- DiagError
-
- // DiagWarning indicates that the problem reported by a diagnostic warrants
- // user attention but does not prevent further progress. It is most
- // commonly used for showing deprecation notices.
- DiagWarning
-)
-
-// Diagnostic represents information to be presented to a user about an
-// error or anomaly in parsing or evaluating configuration.
-type Diagnostic struct {
- Severity DiagnosticSeverity
-
- // Summary and Detail contain the English-language description of the
- // problem. Summary is a terse description of the general problem and
- // detail is a more elaborate, often-multi-sentence description of
- // the problem and what might be done to solve it.
- Summary string
- Detail string
-
- // Subject and Context are both source ranges relating to the diagnostic.
- //
- // Subject is a tight range referring to exactly the construct that
- // is problematic, while Context is an optional broader range (which should
- // fully contain Subject) that ought to be shown around Subject when
- // generating isolated source-code snippets in diagnostic messages.
- // If Context is nil, the Subject is also the Context.
- //
- // Some diagnostics have no source ranges at all. If Context is set then
- // Subject should always also be set.
- Subject *Range
- Context *Range
-
- // For diagnostics that occur when evaluating an expression, Expression
- // may refer to that expression and EvalContext may point to the
- // EvalContext that was active when evaluating it. This may allow for the
- // inclusion of additional useful information when rendering a diagnostic
- // message to the user.
- //
- // It is not always possible to select a single EvalContext for a
- // diagnostic, and so in some cases this field may be nil even when an
- // expression causes a problem.
- //
- // EvalContexts form a tree, so the given EvalContext may refer to a parent
- // which in turn refers to another parent, etc. For a full picture of all
- // of the active variables and functions the caller must walk up this
- // chain, preferring definitions that are "closer" to the expression in
- // case of colliding names.
- Expression Expression
- EvalContext *EvalContext
-
- // Extra is an extension point for additional machine-readable information
- // about this problem.
- //
- // Recipients of diagnostic objects may type-assert this value with
- // specific interface types they know about to discover if any additional
- // information is available that is interesting for their use-case.
- //
- // Extra is always considered to be optional extra information and so a
- // diagnostic message should still always be fully described (from the
- // perspective of a human who understands the language the messages are
- // written in) by the other fields in case a particular recipient.
- //
- // Functions that return diagnostics with Extra populated should typically
- // document that they place values implementing a particular interface,
- // rather than a concrete type, and define that interface such that its
- // methods can dynamically indicate a lack of support at runtime even
- // if the interface happens to be statically available. An Extra
- // type that wraps other Extra values should additionally implement
- // interface DiagnosticExtraUnwrapper to return the value they are wrapping
- // so that callers can access inner values to type-assert against.
- Extra interface{}
-}
-
-// Diagnostics is a list of Diagnostic instances.
-type Diagnostics []*Diagnostic
-
-// error implementation, so that diagnostics can be returned via APIs
-// that normally deal in vanilla Go errors.
-//
-// This presents only minimal context about the error, for compatibility
-// with usual expectations about how errors will present as strings.
-func (d *Diagnostic) Error() string {
- return fmt.Sprintf("%s: %s; %s", d.Subject, d.Summary, d.Detail)
-}
-
-// error implementation, so that sets of diagnostics can be returned via
-// APIs that normally deal in vanilla Go errors.
-func (d Diagnostics) Error() string {
- count := len(d)
- switch {
- case count == 0:
- return "no diagnostics"
- case count == 1:
- return d[0].Error()
- default:
- return fmt.Sprintf("%s, and %d other diagnostic(s)", d[0].Error(), count-1)
- }
-}
-
-// Append appends a new error to a Diagnostics and return the whole Diagnostics.
-//
-// This is provided as a convenience for returning from a function that
-// collects and then returns a set of diagnostics:
-//
-// return nil, diags.Append(&hcl.Diagnostic{ ... })
-//
-// Note that this modifies the array underlying the diagnostics slice, so
-// must be used carefully within a single codepath. It is incorrect (and rude)
-// to extend a diagnostics created by a different subsystem.
-func (d Diagnostics) Append(diag *Diagnostic) Diagnostics {
- return append(d, diag)
-}
-
-// Extend concatenates the given Diagnostics with the receiver and returns
-// the whole new Diagnostics.
-//
-// This is similar to Append but accepts multiple diagnostics to add. It has
-// all the same caveats and constraints.
-func (d Diagnostics) Extend(diags Diagnostics) Diagnostics {
- return append(d, diags...)
-}
-
-// HasErrors returns true if the receiver contains any diagnostics of
-// severity DiagError.
-func (d Diagnostics) HasErrors() bool {
- for _, diag := range d {
- if diag.Severity == DiagError {
- return true
- }
- }
- return false
-}
-
-func (d Diagnostics) Errs() []error {
- var errs []error
- for _, diag := range d {
- if diag.Severity == DiagError {
- errs = append(errs, diag)
- }
- }
-
- return errs
-}
-
-// A DiagnosticWriter emits diagnostics somehow.
-type DiagnosticWriter interface {
- WriteDiagnostic(*Diagnostic) error
- WriteDiagnostics(Diagnostics) error
-}
-
-// DiagnosticExtraUnwrapper is an interface implemented by values in the
-// Extra field of Diagnostic when they are wrapping another "Extra" value that
-// was generated downstream.
-//
-// Diagnostic recipients which want to examine "Extra" values to sniff for
-// particular types of extra data can either type-assert this interface
-// directly and repeatedly unwrap until they recieve nil, or can use the
-// helper function DiagnosticExtra.
-type DiagnosticExtraUnwrapper interface {
- // If the reciever is wrapping another "diagnostic extra" value, returns
- // that value. Otherwise returns nil to indicate dynamically that nothing
- // is wrapped.
- //
- // The "nothing is wrapped" condition can be signalled either by this
- // method returning nil or by a type not implementing this interface at all.
- //
- // Implementers should never create unwrap "cycles" where a nested extra
- // value returns a value that was also wrapping it.
- UnwrapDiagnosticExtra() interface{}
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/diagnostic_text.go b/vendor/github.com/hashicorp/hcl/v2/diagnostic_text.go
deleted file mode 100644
index bdfad42b..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/diagnostic_text.go
+++ /dev/null
@@ -1,314 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hcl
-
-import (
- "bufio"
- "bytes"
- "errors"
- "fmt"
- "io"
- "sort"
-
- wordwrap "github.com/mitchellh/go-wordwrap"
- "github.com/zclconf/go-cty/cty"
-)
-
-type diagnosticTextWriter struct {
- files map[string]*File
- wr io.Writer
- width uint
- color bool
-}
-
-// NewDiagnosticTextWriter creates a DiagnosticWriter that writes diagnostics
-// to the given writer as formatted text.
-//
-// It is designed to produce text appropriate to print in a monospaced font
-// in a terminal of a particular width, or optionally with no width limit.
-//
-// The given width may be zero to disable word-wrapping of the detail text
-// and truncation of source code snippets.
-//
-// If color is set to true, the output will include VT100 escape sequences to
-// color-code the severity indicators. It is suggested to turn this off if
-// the target writer is not a terminal.
-func NewDiagnosticTextWriter(wr io.Writer, files map[string]*File, width uint, color bool) DiagnosticWriter {
- return &diagnosticTextWriter{
- files: files,
- wr: wr,
- width: width,
- color: color,
- }
-}
-
-func (w *diagnosticTextWriter) WriteDiagnostic(diag *Diagnostic) error {
- if diag == nil {
- return errors.New("nil diagnostic")
- }
-
- var colorCode, highlightCode, resetCode string
- if w.color {
- switch diag.Severity {
- case DiagError:
- colorCode = "\x1b[31m"
- case DiagWarning:
- colorCode = "\x1b[33m"
- }
- resetCode = "\x1b[0m"
- highlightCode = "\x1b[1;4m"
- }
-
- var severityStr string
- switch diag.Severity {
- case DiagError:
- severityStr = "Error"
- case DiagWarning:
- severityStr = "Warning"
- default:
- // should never happen
- severityStr = "???????"
- }
-
- fmt.Fprintf(w.wr, "%s%s%s: %s\n\n", colorCode, severityStr, resetCode, diag.Summary)
-
- if diag.Subject != nil {
- snipRange := *diag.Subject
- highlightRange := snipRange
- if diag.Context != nil {
- // Show enough of the source code to include both the subject
- // and context ranges, which overlap in all reasonable
- // situations.
- snipRange = RangeOver(snipRange, *diag.Context)
- }
- // We can't illustrate an empty range, so we'll turn such ranges into
- // single-character ranges, which might not be totally valid (may point
- // off the end of a line, or off the end of the file) but are good
- // enough for the bounds checks we do below.
- if snipRange.Empty() {
- snipRange.End.Byte++
- snipRange.End.Column++
- }
- if highlightRange.Empty() {
- highlightRange.End.Byte++
- highlightRange.End.Column++
- }
-
- file := w.files[diag.Subject.Filename]
- if file == nil || file.Bytes == nil {
- fmt.Fprintf(w.wr, " on %s line %d:\n (source code not available)\n\n", diag.Subject.Filename, diag.Subject.Start.Line)
- } else {
-
- var contextLine string
- if diag.Subject != nil {
- contextLine = contextString(file, diag.Subject.Start.Byte)
- if contextLine != "" {
- contextLine = ", in " + contextLine
- }
- }
-
- fmt.Fprintf(w.wr, " on %s line %d%s:\n", diag.Subject.Filename, diag.Subject.Start.Line, contextLine)
-
- src := file.Bytes
- sc := NewRangeScanner(src, diag.Subject.Filename, bufio.ScanLines)
-
- for sc.Scan() {
- lineRange := sc.Range()
- if !lineRange.Overlaps(snipRange) {
- continue
- }
-
- beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange)
- if highlightedRange.Empty() {
- fmt.Fprintf(w.wr, "%4d: %s\n", lineRange.Start.Line, sc.Bytes())
- } else {
- before := beforeRange.SliceBytes(src)
- highlighted := highlightedRange.SliceBytes(src)
- after := afterRange.SliceBytes(src)
- fmt.Fprintf(
- w.wr, "%4d: %s%s%s%s%s\n",
- lineRange.Start.Line,
- before,
- highlightCode, highlighted, resetCode,
- after,
- )
- }
-
- }
-
- w.wr.Write([]byte{'\n'})
- }
-
- if diag.Expression != nil && diag.EvalContext != nil {
- // We will attempt to render the values for any variables
- // referenced in the given expression as additional context, for
- // situations where the same expression is evaluated multiple
- // times in different scopes.
- expr := diag.Expression
- ctx := diag.EvalContext
-
- vars := expr.Variables()
- stmts := make([]string, 0, len(vars))
- seen := make(map[string]struct{}, len(vars))
- for _, traversal := range vars {
- val, diags := traversal.TraverseAbs(ctx)
- if diags.HasErrors() {
- // Skip anything that generates errors, since we probably
- // already have the same error in our diagnostics set
- // already.
- continue
- }
-
- traversalStr := w.traversalStr(traversal)
- if _, exists := seen[traversalStr]; exists {
- continue // don't show duplicates when the same variable is referenced multiple times
- }
- switch {
- case !val.IsKnown():
- // Can't say anything about this yet, then.
- continue
- case val.IsNull():
- stmts = append(stmts, fmt.Sprintf("%s set to null", traversalStr))
- default:
- stmts = append(stmts, fmt.Sprintf("%s as %s", traversalStr, w.valueStr(val)))
- }
- seen[traversalStr] = struct{}{}
- }
-
- sort.Strings(stmts) // FIXME: Should maybe use a traversal-aware sort that can sort numeric indexes properly?
- last := len(stmts) - 1
-
- for i, stmt := range stmts {
- switch i {
- case 0:
- w.wr.Write([]byte{'w', 'i', 't', 'h', ' '})
- default:
- w.wr.Write([]byte{' ', ' ', ' ', ' ', ' '})
- }
- w.wr.Write([]byte(stmt))
- switch i {
- case last:
- w.wr.Write([]byte{'.', '\n', '\n'})
- default:
- w.wr.Write([]byte{',', '\n'})
- }
- }
- }
- }
-
- if diag.Detail != "" {
- detail := diag.Detail
- if w.width != 0 {
- detail = wordwrap.WrapString(detail, w.width)
- }
- fmt.Fprintf(w.wr, "%s\n\n", detail)
- }
-
- return nil
-}
-
-func (w *diagnosticTextWriter) WriteDiagnostics(diags Diagnostics) error {
- for _, diag := range diags {
- err := w.WriteDiagnostic(diag)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (w *diagnosticTextWriter) traversalStr(traversal Traversal) string {
- // This is a specialized subset of traversal rendering tailored to
- // producing helpful contextual messages in diagnostics. It is not
- // comprehensive nor intended to be used for other purposes.
-
- var buf bytes.Buffer
- for _, step := range traversal {
- switch tStep := step.(type) {
- case TraverseRoot:
- buf.WriteString(tStep.Name)
- case TraverseAttr:
- buf.WriteByte('.')
- buf.WriteString(tStep.Name)
- case TraverseIndex:
- buf.WriteByte('[')
- if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() {
- buf.WriteString(w.valueStr(tStep.Key))
- } else {
- // We'll just use a placeholder for more complex values,
- // since otherwise our result could grow ridiculously long.
- buf.WriteString("...")
- }
- buf.WriteByte(']')
- }
- }
- return buf.String()
-}
-
-func (w *diagnosticTextWriter) valueStr(val cty.Value) string {
- // This is a specialized subset of value rendering tailored to producing
- // helpful but concise messages in diagnostics. It is not comprehensive
- // nor intended to be used for other purposes.
-
- ty := val.Type()
- switch {
- case val.IsNull():
- return "null"
- case !val.IsKnown():
- // Should never happen here because we should filter before we get
- // in here, but we'll do something reasonable rather than panic.
- return "(not yet known)"
- case ty == cty.Bool:
- if val.True() {
- return "true"
- }
- return "false"
- case ty == cty.Number:
- bf := val.AsBigFloat()
- return bf.Text('g', 10)
- case ty == cty.String:
- // Go string syntax is not exactly the same as HCL native string syntax,
- // but we'll accept the minor edge-cases where this is different here
- // for now, just to get something reasonable here.
- return fmt.Sprintf("%q", val.AsString())
- case ty.IsCollectionType() || ty.IsTupleType():
- l := val.LengthInt()
- switch l {
- case 0:
- return "empty " + ty.FriendlyName()
- case 1:
- return ty.FriendlyName() + " with 1 element"
- default:
- return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l)
- }
- case ty.IsObjectType():
- atys := ty.AttributeTypes()
- l := len(atys)
- switch l {
- case 0:
- return "object with no attributes"
- case 1:
- var name string
- for k := range atys {
- name = k
- }
- return fmt.Sprintf("object with 1 attribute %q", name)
- default:
- return fmt.Sprintf("object with %d attributes", l)
- }
- default:
- return ty.FriendlyName()
- }
-}
-
-func contextString(file *File, offset int) string {
- type contextStringer interface {
- ContextString(offset int) string
- }
-
- if cser, ok := file.Nav.(contextStringer); ok {
- return cser.ContextString(offset)
- }
- return ""
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/diagnostic_typeparams.go b/vendor/github.com/hashicorp/hcl/v2/diagnostic_typeparams.go
deleted file mode 100644
index 92be8f1a..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/diagnostic_typeparams.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-//go:build go1.18
-// +build go1.18
-
-package hcl
-
-// This file contains additional diagnostics-related symbols that use the
-// Go 1.18 type parameters syntax and would therefore be incompatible with
-// Go 1.17 and earlier.
-
-// DiagnosticExtra attempts to retrieve an "extra value" of type T from the
-// given diagnostic, if either the diag.Extra field directly contains a value
-// of that type or the value implements DiagnosticExtraUnwrapper and directly
-// or indirectly returns a value of that type.
-//
-// Type T should typically be an interface type, so that code which generates
-// diagnostics can potentially return different implementations of the same
-// interface dynamically as needed.
-//
-// If a value of type T is found, returns that value and true to indicate
-// success. Otherwise, returns the zero value of T and false to indicate
-// failure.
-func DiagnosticExtra[T any](diag *Diagnostic) (T, bool) {
- extra := diag.Extra
- var zero T
-
- for {
- if ret, ok := extra.(T); ok {
- return ret, true
- }
-
- if unwrap, ok := extra.(DiagnosticExtraUnwrapper); ok {
- // If our "extra" implements DiagnosticExtraUnwrapper then we'll
- // unwrap one level and try this again.
- extra = unwrap.UnwrapDiagnosticExtra()
- } else {
- return zero, false
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/didyoumean.go b/vendor/github.com/hashicorp/hcl/v2/didyoumean.go
deleted file mode 100644
index fd00ca6f..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/didyoumean.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hcl
-
-import (
- "github.com/agext/levenshtein"
-)
-
-// nameSuggestion tries to find a name from the given slice of suggested names
-// that is close to the given name and returns it if found. If no suggestion
-// is close enough, returns the empty string.
-//
-// The suggestions are tried in order, so earlier suggestions take precedence
-// if the given string is similar to two or more suggestions.
-//
-// This function is intended to be used with a relatively-small number of
-// suggestions. It's not optimized for hundreds or thousands of them.
-func nameSuggestion(given string, suggestions []string) string {
- for _, suggestion := range suggestions {
- dist := levenshtein.Distance(given, suggestion, nil)
- if dist < 3 { // threshold determined experimentally
- return suggestion
- }
- }
- return ""
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/doc.go b/vendor/github.com/hashicorp/hcl/v2/doc.go
deleted file mode 100644
index a0e3119f..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/doc.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-// Package hcl contains the main modelling types and general utility functions
-// for HCL.
-//
-// For a simple entry point into HCL, see the package in the subdirectory
-// "hclsimple", which has an opinionated function Decode that can decode HCL
-// configurations in either native HCL syntax or JSON syntax into a Go struct
-// type:
-//
-// package main
-//
-// import (
-// "log"
-// "github.com/hashicorp/hcl/v2/hclsimple"
-// )
-//
-// type Config struct {
-// LogLevel string `hcl:"log_level"`
-// }
-//
-// func main() {
-// var config Config
-// err := hclsimple.DecodeFile("config.hcl", nil, &config)
-// if err != nil {
-// log.Fatalf("Failed to load configuration: %s", err)
-// }
-// log.Printf("Configuration is %#v", config)
-// }
-//
-// If your application needs more control over the evaluation of the
-// configuration, you can use the functions in the subdirectories hclparse,
-// gohcl, hcldec, etc. Splitting the handling of configuration into multiple
-// phases allows for advanced patterns such as allowing expressions in one
-// part of the configuration to refer to data defined in another part.
-package hcl
diff --git a/vendor/github.com/hashicorp/hcl/v2/eval_context.go b/vendor/github.com/hashicorp/hcl/v2/eval_context.go
deleted file mode 100644
index 921cfcb4..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/eval_context.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hcl
-
-import (
- "github.com/zclconf/go-cty/cty"
- "github.com/zclconf/go-cty/cty/function"
-)
-
-// An EvalContext provides the variables and functions that should be used
-// to evaluate an expression.
-type EvalContext struct {
- Variables map[string]cty.Value
- Functions map[string]function.Function
- parent *EvalContext
-}
-
-// NewChild returns a new EvalContext that is a child of the receiver.
-func (ctx *EvalContext) NewChild() *EvalContext {
- return &EvalContext{parent: ctx}
-}
-
-// Parent returns the parent of the receiver, or nil if the receiver has
-// no parent.
-func (ctx *EvalContext) Parent() *EvalContext {
- return ctx.parent
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/expr_call.go b/vendor/github.com/hashicorp/hcl/v2/expr_call.go
deleted file mode 100644
index ca59b90d..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/expr_call.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hcl
-
-// ExprCall tests if the given expression is a function call and,
-// if so, extracts the function name and the expressions that represent
-// the arguments. If the given expression is not statically a function call,
-// error diagnostics are returned.
-//
-// A particular Expression implementation can support this function by
-// offering a method called ExprCall that takes no arguments and returns
-// *StaticCall. This method should return nil if a static call cannot
-// be extracted. Alternatively, an implementation can support
-// UnwrapExpression to delegate handling of this function to a wrapped
-// Expression object.
-func ExprCall(expr Expression) (*StaticCall, Diagnostics) {
- type exprCall interface {
- ExprCall() *StaticCall
- }
-
- physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool {
- _, supported := expr.(exprCall)
- return supported
- })
-
- if exC, supported := physExpr.(exprCall); supported {
- if call := exC.ExprCall(); call != nil {
- return call, nil
- }
- }
- return nil, Diagnostics{
- &Diagnostic{
- Severity: DiagError,
- Summary: "Invalid expression",
- Detail: "A static function call is required.",
- Subject: expr.StartRange().Ptr(),
- },
- }
-}
-
-// StaticCall represents a function call that was extracted statically from
-// an expression using ExprCall.
-type StaticCall struct {
- Name string
- NameRange Range
- Arguments []Expression
- ArgsRange Range
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/expr_list.go b/vendor/github.com/hashicorp/hcl/v2/expr_list.go
deleted file mode 100644
index 8c0cf405..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/expr_list.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hcl
-
-// ExprList tests if the given expression is a static list construct and,
-// if so, extracts the expressions that represent the list elements.
-// If the given expression is not a static list, error diagnostics are
-// returned.
-//
-// A particular Expression implementation can support this function by
-// offering a method called ExprList that takes no arguments and returns
-// []Expression. This method should return nil if a static list cannot
-// be extracted. Alternatively, an implementation can support
-// UnwrapExpression to delegate handling of this function to a wrapped
-// Expression object.
-func ExprList(expr Expression) ([]Expression, Diagnostics) {
- type exprList interface {
- ExprList() []Expression
- }
-
- physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool {
- _, supported := expr.(exprList)
- return supported
- })
-
- if exL, supported := physExpr.(exprList); supported {
- if list := exL.ExprList(); list != nil {
- return list, nil
- }
- }
- return nil, Diagnostics{
- &Diagnostic{
- Severity: DiagError,
- Summary: "Invalid expression",
- Detail: "A static list expression is required.",
- Subject: expr.StartRange().Ptr(),
- },
- }
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/expr_map.go b/vendor/github.com/hashicorp/hcl/v2/expr_map.go
deleted file mode 100644
index 56cf9747..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/expr_map.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hcl
-
-// ExprMap tests if the given expression is a static map construct and,
-// if so, extracts the expressions that represent the map elements.
-// If the given expression is not a static map, error diagnostics are
-// returned.
-//
-// A particular Expression implementation can support this function by
-// offering a method called ExprMap that takes no arguments and returns
-// []KeyValuePair. This method should return nil if a static map cannot
-// be extracted. Alternatively, an implementation can support
-// UnwrapExpression to delegate handling of this function to a wrapped
-// Expression object.
-func ExprMap(expr Expression) ([]KeyValuePair, Diagnostics) {
- type exprMap interface {
- ExprMap() []KeyValuePair
- }
-
- physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool {
- _, supported := expr.(exprMap)
- return supported
- })
-
- if exM, supported := physExpr.(exprMap); supported {
- if pairs := exM.ExprMap(); pairs != nil {
- return pairs, nil
- }
- }
- return nil, Diagnostics{
- &Diagnostic{
- Severity: DiagError,
- Summary: "Invalid expression",
- Detail: "A static map expression is required.",
- Subject: expr.StartRange().Ptr(),
- },
- }
-}
-
-// KeyValuePair represents a pair of expressions that serve as a single item
-// within a map or object definition construct.
-type KeyValuePair struct {
- Key Expression
- Value Expression
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/expr_unwrap.go b/vendor/github.com/hashicorp/hcl/v2/expr_unwrap.go
deleted file mode 100644
index 6683fd54..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/expr_unwrap.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hcl
-
-type unwrapExpression interface {
- UnwrapExpression() Expression
-}
-
-// UnwrapExpression removes any "wrapper" expressions from the given expression,
-// to recover the representation of the physical expression given in source
-// code.
-//
-// Sometimes wrapping expressions are used to modify expression behavior, e.g.
-// in extensions that need to make some local variables available to certain
-// sub-trees of the configuration. This can make it difficult to reliably
-// type-assert on the physical AST types used by the underlying syntax.
-//
-// Unwrapping an expression may modify its behavior by stripping away any
-// additional constraints or capabilities being applied to the Value and
-// Variables methods, so this function should generally only be used prior
-// to operations that concern themselves with the static syntax of the input
-// configuration, and not with the effective value of the expression.
-//
-// Wrapper expression types must support unwrapping by implementing a method
-// called UnwrapExpression that takes no arguments and returns the embedded
-// Expression. Implementations of this method should peel away only one level
-// of wrapping, if multiple are present. This method may return nil to
-// indicate _dynamically_ that no wrapped expression is available, for
-// expression types that might only behave as wrappers in certain cases.
-func UnwrapExpression(expr Expression) Expression {
- for {
- unwrap, wrapped := expr.(unwrapExpression)
- if !wrapped {
- return expr
- }
- innerExpr := unwrap.UnwrapExpression()
- if innerExpr == nil {
- return expr
- }
- expr = innerExpr
- }
-}
-
-// UnwrapExpressionUntil is similar to UnwrapExpression except it gives the
-// caller an opportunity to test each level of unwrapping to see each a
-// particular expression is accepted.
-//
-// This could be used, for example, to unwrap until a particular other
-// interface is satisfied, regardless of wrap wrapping level it is satisfied
-// at.
-//
-// The given callback function must return false to continue wrapping, or
-// true to accept and return the proposed expression given. If the callback
-// function rejects even the final, physical expression then the result of
-// this function is nil.
-func UnwrapExpressionUntil(expr Expression, until func(Expression) bool) Expression {
- for {
- if until(expr) {
- return expr
- }
- unwrap, wrapped := expr.(unwrapExpression)
- if !wrapped {
- return nil
- }
- expr = unwrap.UnwrapExpression()
- if expr == nil {
- return nil
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/README.md b/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/README.md
deleted file mode 100644
index 1636f577..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/README.md
+++ /dev/null
@@ -1,209 +0,0 @@
-# HCL Custom Static Decoding Extension
-
-This HCL extension provides a mechanism for defining arguments in an HCL-based
-language whose values are derived using custom decoding rules against the
-HCL expression syntax, overriding the usual behavior of normal expression
-evaluation.
-
-"Arguments", for the purpose of this extension, currently includes the
-following two contexts:
-
-* For applications using `hcldec` for dynamic decoding, a `hcldec.AttrSpec`
- or `hcldec.BlockAttrsSpec` can be given a special type constraint that
- opts in to custom decoding behavior for the attribute(s) that are selected
- by that specification.
-
-* When working with the HCL native expression syntax, a function given in
- the `hcl.EvalContext` during evaluation can have parameters with special
- type constraints that opt in to custom decoding behavior for the argument
- expression associated with that parameter in any call.
-
-The above use-cases are rather abstract, so we'll consider a motivating
-real-world example: sometimes we (language designers) need to allow users
-to specify type constraints directly in the language itself, such as in
-[Terraform's Input Variables](https://www.terraform.io/docs/configuration/variables.html).
-Terraform's `variable` blocks include an argument called `type` which takes
-a type constraint given using HCL expression building-blocks as defined by
-[the HCL `typeexpr` extension](../typeexpr/README.md).
-
-A "type constraint expression" of that sort is not an expression intended to
-be evaluated in the usual way. Instead, the physical expression is
-deconstructed using [the static analysis operations](../../spec.md#static-analysis)
-to produce a `cty.Type` as the result, rather than a `cty.Value`.
-
-The purpose of this Custom Static Decoding Extension, then, is to provide a
-bridge to allow that sort of custom decoding to be used via mechanisms that
-normally deal in `cty.Value`, such as `hcldec` and native syntax function
-calls as listed above.
-
-(Note: [`gohcl`](https://pkg.go.dev/github.com/hashicorp/hcl/v2/gohcl) has
-its own mechanism to support this use case, exploiting the fact that it is
-working directly with "normal" Go types. Decoding into a struct field of
-type `hcl.Expression` obtains the expression directly without evaluating it
-first. The Custom Static Decoding Extension is not necessary for that `gohcl`
-technique. You can also implement custom decoding by working directly with
-the lowest-level HCL API, which separates extraction of and evaluation of
-expressions into two steps.)
-
-## Custom Decoding Types
-
-This extension relies on a convention implemented in terms of
-[_Capsule Types_ in the underlying `cty` type system](https://github.com/zclconf/go-cty/blob/master/docs/types.md#capsule-types). `cty` allows a capsule type to carry arbitrary
-extension metadata values as an aid to creating higher-level abstractions like
-this extension.
-
-A custom argument decoding mode, then, is implemented by creating a new `cty`
-capsule type that implements the `ExtensionData` custom operation to return
-a decoding function when requested. For example:
-
-```go
-var keywordType cty.Type
-keywordType = cty.CapsuleWithOps("keyword", reflect.TypeOf(""), &cty.CapsuleOps{
- ExtensionData: func(key interface{}) interface{} {
- switch key {
- case customdecode.CustomExpressionDecoder:
- return customdecode.CustomExpressionDecoderFunc(
- func(expr hcl.Expression, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
- var diags hcl.Diagnostics
- kw := hcl.ExprAsKeyword(expr)
- if kw == "" {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid keyword",
- Detail: "A keyword is required",
- Subject: expr.Range().Ptr(),
- })
- return cty.UnkownVal(keywordType), diags
- }
- return cty.CapsuleVal(keywordType, &kw)
- },
- )
- default:
- return nil
- }
- },
-})
-```
-
-The boilerplate here is a bit fussy, but the important part for our purposes
-is the `case customdecode.CustomExpressionDecoder:` clause, which uses
-a custom extension key type defined in this package to recognize when a
-component implementing this extension is checking to see if a target type
-has a custom decode implementation.
-
-In the above case we've defined a type that decodes expressions as static
-keywords, so a keyword like `foo` would decode as an encapsulated `"foo"`
-string, while any other sort of expression like `"baz"` or `1 + 1` would
-return an error.
-
-We could then use `keywordType` as a type constraint either for a function
-parameter or a `hcldec` attribute specification, which would require the
-argument for that function parameter or the expression for the matching
-attributes to be a static keyword, rather than an arbitrary expression.
-For example, in a `hcldec.AttrSpec`:
-
-```go
-keywordSpec := &hcldec.AttrSpec{
- Name: "keyword",
- Type: keywordType,
-}
-```
-
-The above would accept input like the following and would set its result to
-a `cty.Value` of `keywordType`, after decoding:
-
-```hcl
-keyword = foo
-```
-
-## The Expression and Expression Closure `cty` types
-
-Building on the above, this package also includes two capsule types that use
-the above mechanism to allow calling applications to capture expressions
-directly and thus defer analysis to a later step, after initial decoding.
-
-The `customdecode.ExpressionType` type encapsulates an `hcl.Expression` alone,
-for situations like our type constraint expression example above where it's
-the static structure of the expression we want to inspect, and thus any
-variables and functions defined in the evaluation context are irrelevant.
-
-The `customdecode.ExpressionClosureType` type encapsulates a
-`*customdecode.ExpressionClosure` value, which binds the given expression to
-the `hcl.EvalContext` it was asked to evaluate against and thus allows the
-receiver of that result to later perform normal evaluation of the expression
-with all the same variables and functions that would've been available to it
-naturally.
-
-Both of these types can be used as type constraints either for `hcldec`
-attribute specifications or for function arguments. Here's an example of
-`ExpressionClosureType` to implement a function that can evaluate
-an expression with some additional variables defined locally, which we'll
-call the `with(...)` function:
-
-```go
-var WithFunc = function.New(&function.Spec{
- Params: []function.Parameter{
- {
- Name: "variables",
- Type: cty.DynamicPseudoType,
- },
- {
- Name: "expression",
- Type: customdecode.ExpressionClosureType,
- },
- },
- Type: func(args []cty.Value) (cty.Type, error) {
- varsVal := args[0]
- exprVal := args[1]
- if !varsVal.Type().IsObjectType() {
- return cty.NilVal, function.NewArgErrorf(0, "must be an object defining local variables")
- }
- if !varsVal.IsKnown() {
- // We can't predict our result type until the variables object
- // is known.
- return cty.DynamicPseudoType, nil
- }
- vars := varsVal.AsValueMap()
- closure := customdecode.ExpressionClosureFromVal(exprVal)
- result, err := evalWithLocals(vars, closure)
- if err != nil {
- return cty.NilVal, err
- }
- return result.Type(), nil
- },
- Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
- varsVal := args[0]
- exprVal := args[1]
- vars := varsVal.AsValueMap()
- closure := customdecode.ExpressionClosureFromVal(exprVal)
- return evalWithLocals(vars, closure)
- },
-})
-
-func evalWithLocals(locals map[string]cty.Value, closure *customdecode.ExpressionClosure) (cty.Value, error) {
- childCtx := closure.EvalContext.NewChild()
- childCtx.Variables = locals
- val, diags := closure.Expression.Value(childCtx)
- if diags.HasErrors() {
- return cty.NilVal, function.NewArgErrorf(1, "couldn't evaluate expression: %s", diags.Error())
- }
- return val, nil
-}
-```
-
-If the above function were placed into an `hcl.EvalContext` as `with`, it
-could be used in a native syntax call to that function as follows:
-
-```hcl
- foo = with({name = "Cory"}, "${greeting}, ${name}!")
-```
-
-The above assumes a variable in the main context called `greeting`, to which
-the `with` function adds `name` before evaluating the expression given in
-its second argument. This makes that second argument context-sensitive -- it
-would behave differently if the user wrote the same thing somewhere else -- so
-this capability should be used with care to make sure it doesn't cause confusion
-for the end-users of your language.
-
-There are some other examples of this capability to evaluate expressions in
-unusual ways in the `tryfunc` directory that is a sibling of this one.
diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/customdecode.go b/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/customdecode.go
deleted file mode 100644
index e0dda0df..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/customdecode.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-// Package customdecode contains a HCL extension that allows, in certain
-// contexts, expression evaluation to be overridden by custom static analysis.
-//
-// This mechanism is only supported in certain specific contexts where
-// expressions are decoded with a specific target type in mind. For more
-// information, see the documentation on CustomExpressionDecoder.
-package customdecode
-
-import (
- "github.com/hashicorp/hcl/v2"
- "github.com/zclconf/go-cty/cty"
-)
-
-type customDecoderImpl int
-
-// CustomExpressionDecoder is a value intended to be used as a cty capsule
-// type ExtensionData key for capsule types whose values are to be obtained
-// by static analysis of an expression rather than normal evaluation of that
-// expression.
-//
-// When a cooperating capsule type is asked for ExtensionData with this key,
-// it must return a non-nil CustomExpressionDecoderFunc value.
-//
-// This mechanism is not universally supported; instead, it's handled in a few
-// specific places where expressions are evaluated with the intent of producing
-// a cty.Value of a type given by the calling application.
-//
-// Specifically, this currently works for type constraints given in
-// hcldec.AttrSpec and hcldec.BlockAttrsSpec, and it works for arguments to
-// function calls in the HCL native syntax. HCL extensions implemented outside
-// of the main HCL module may also implement this; consult their own
-// documentation for details.
-const CustomExpressionDecoder = customDecoderImpl(1)
-
-// CustomExpressionDecoderFunc is the type of value that must be returned by
-// a capsule type handling the key CustomExpressionDecoder in its ExtensionData
-// implementation.
-//
-// If no error diagnostics are returned, the result value MUST be of the
-// capsule type that the decoder function was derived from. If the returned
-// error diagnostics prevent producing a value at all, return cty.NilVal.
-type CustomExpressionDecoderFunc func(expr hcl.Expression, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics)
-
-// CustomExpressionDecoderForType takes any cty type and returns its
-// custom expression decoder implementation if it has one. If it is not a
-// capsule type or it does not implement a custom expression decoder, this
-// function returns nil.
-func CustomExpressionDecoderForType(ty cty.Type) CustomExpressionDecoderFunc {
- if !ty.IsCapsuleType() {
- return nil
- }
- if fn, ok := ty.CapsuleExtensionData(CustomExpressionDecoder).(CustomExpressionDecoderFunc); ok {
- return fn
- }
- return nil
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/expression_type.go b/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/expression_type.go
deleted file mode 100644
index 2477f219..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/ext/customdecode/expression_type.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package customdecode
-
-import (
- "fmt"
- "reflect"
-
- "github.com/hashicorp/hcl/v2"
- "github.com/zclconf/go-cty/cty"
-)
-
-// ExpressionType is a cty capsule type that carries hcl.Expression values.
-//
-// This type implements custom decoding in the most general way possible: it
-// just captures whatever expression is given to it, with no further processing
-// whatsoever. It could therefore be useful in situations where an application
-// must defer processing of the expression content until a later step.
-//
-// ExpressionType only captures the expression, not the evaluation context it
-// was destined to be evaluated in. That means this type can be fine for
-// situations where the recipient of the value only intends to do static
-// analysis, but ExpressionClosureType is more appropriate in situations where
-// the recipient will eventually evaluate the given expression.
-var ExpressionType cty.Type
-
-// ExpressionVal returns a new cty value of type ExpressionType, wrapping the
-// given expression.
-func ExpressionVal(expr hcl.Expression) cty.Value {
- return cty.CapsuleVal(ExpressionType, &expr)
-}
-
-// ExpressionFromVal returns the expression encapsulated in the given value, or
-// panics if the value is not a known value of ExpressionType.
-func ExpressionFromVal(v cty.Value) hcl.Expression {
- if !v.Type().Equals(ExpressionType) {
- panic("value is not of ExpressionType")
- }
- ptr := v.EncapsulatedValue().(*hcl.Expression)
- return *ptr
-}
-
-// ExpressionClosureType is a cty capsule type that carries hcl.Expression
-// values along with their original evaluation contexts.
-//
-// This is similar to ExpressionType except that during custom decoding it
-// also captures the hcl.EvalContext that was provided, allowing callers to
-// evaluate the expression later in the same context where it would originally
-// have been evaluated, or a context derived from that one.
-var ExpressionClosureType cty.Type
-
-// ExpressionClosure is the type encapsulated in ExpressionClosureType
-type ExpressionClosure struct {
- Expression hcl.Expression
- EvalContext *hcl.EvalContext
-}
-
-// ExpressionClosureVal returns a new cty value of type ExpressionClosureType,
-// wrapping the given expression closure.
-func ExpressionClosureVal(closure *ExpressionClosure) cty.Value {
- return cty.CapsuleVal(ExpressionClosureType, closure)
-}
-
-// Value evaluates the closure's expression using the closure's EvalContext,
-// returning the result.
-func (c *ExpressionClosure) Value() (cty.Value, hcl.Diagnostics) {
- return c.Expression.Value(c.EvalContext)
-}
-
-// ExpressionClosureFromVal returns the expression closure encapsulated in the
-// given value, or panics if the value is not a known value of
-// ExpressionClosureType.
-//
-// The caller MUST NOT modify the returned closure or the EvalContext inside
-// it. To derive a new EvalContext, either create a child context or make
-// a copy.
-func ExpressionClosureFromVal(v cty.Value) *ExpressionClosure {
- if !v.Type().Equals(ExpressionClosureType) {
- panic("value is not of ExpressionClosureType")
- }
- return v.EncapsulatedValue().(*ExpressionClosure)
-}
-
-func init() {
- // Getting hold of a reflect.Type for hcl.Expression is a bit tricky because
- // it's an interface type, but we can do it with some indirection.
- goExpressionType := reflect.TypeOf((*hcl.Expression)(nil)).Elem()
-
- ExpressionType = cty.CapsuleWithOps("expression", goExpressionType, &cty.CapsuleOps{
- ExtensionData: func(key interface{}) interface{} {
- switch key {
- case CustomExpressionDecoder:
- return CustomExpressionDecoderFunc(
- func(expr hcl.Expression, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
- return ExpressionVal(expr), nil
- },
- )
- default:
- return nil
- }
- },
- TypeGoString: func(_ reflect.Type) string {
- return "customdecode.ExpressionType"
- },
- GoString: func(raw interface{}) string {
- exprPtr := raw.(*hcl.Expression)
- return fmt.Sprintf("customdecode.ExpressionVal(%#v)", *exprPtr)
- },
- RawEquals: func(a, b interface{}) bool {
- aPtr := a.(*hcl.Expression)
- bPtr := b.(*hcl.Expression)
- return reflect.DeepEqual(*aPtr, *bPtr)
- },
- })
- ExpressionClosureType = cty.CapsuleWithOps("expression closure", reflect.TypeOf(ExpressionClosure{}), &cty.CapsuleOps{
- ExtensionData: func(key interface{}) interface{} {
- switch key {
- case CustomExpressionDecoder:
- return CustomExpressionDecoderFunc(
- func(expr hcl.Expression, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
- return ExpressionClosureVal(&ExpressionClosure{
- Expression: expr,
- EvalContext: ctx,
- }), nil
- },
- )
- default:
- return nil
- }
- },
- TypeGoString: func(_ reflect.Type) string {
- return "customdecode.ExpressionClosureType"
- },
- GoString: func(raw interface{}) string {
- closure := raw.(*ExpressionClosure)
- return fmt.Sprintf("customdecode.ExpressionClosureVal(%#v)", closure)
- },
- RawEquals: func(a, b interface{}) bool {
- closureA := a.(*ExpressionClosure)
- closureB := b.(*ExpressionClosure)
- // The expression itself compares by deep equality, but EvalContexts
- // conventionally compare by pointer identity, so we'll comply
- // with both conventions here by testing them separately.
- return closureA.EvalContext == closureB.EvalContext &&
- reflect.DeepEqual(closureA.Expression, closureB.Expression)
- },
- })
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/diagnostics.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/diagnostics.go
deleted file mode 100644
index 43689d74..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/diagnostics.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hclsyntax
-
-import (
- "github.com/hashicorp/hcl/v2"
-)
-
-// setDiagEvalContext is an internal helper that will impose a particular
-// EvalContext on a set of diagnostics in-place, for any diagnostic that
-// does not already have an EvalContext set.
-//
-// We generally expect diagnostics to be immutable, but this is safe to use
-// on any Diagnostics where none of the contained Diagnostic objects have yet
-// been seen by a caller. Its purpose is to apply additional context to a
-// set of diagnostics produced by a "deeper" component as the stack unwinds
-// during expression evaluation.
-func setDiagEvalContext(diags hcl.Diagnostics, expr hcl.Expression, ctx *hcl.EvalContext) {
- for _, diag := range diags {
- if diag.Expression == nil {
- diag.Expression = expr
- diag.EvalContext = ctx
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/didyoumean.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/didyoumean.go
deleted file mode 100644
index 5b0e4681..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/didyoumean.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hclsyntax
-
-import (
- "github.com/agext/levenshtein"
-)
-
-// nameSuggestion tries to find a name from the given slice of suggested names
-// that is close to the given name and returns it if found. If no suggestion
-// is close enough, returns the empty string.
-//
-// The suggestions are tried in order, so earlier suggestions take precedence
-// if the given string is similar to two or more suggestions.
-//
-// This function is intended to be used with a relatively-small number of
-// suggestions. It's not optimized for hundreds or thousands of them.
-func nameSuggestion(given string, suggestions []string) string {
- for _, suggestion := range suggestions {
- dist := levenshtein.Distance(given, suggestion, nil)
- if dist < 3 { // threshold determined experimentally
- return suggestion
- }
- }
- return ""
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/doc.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/doc.go
deleted file mode 100644
index defe3dbb..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/doc.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-// Package hclsyntax contains the parser, AST, etc for HCL's native language,
-// as opposed to the JSON variant.
-//
-// In normal use applications should rarely depend on this package directly,
-// instead preferring the higher-level interface of the main hcl package and
-// its companion package hclparse.
-package hclsyntax
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go
deleted file mode 100644
index 577a50fa..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression.go
+++ /dev/null
@@ -1,2040 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hclsyntax
-
-import (
- "fmt"
- "sort"
- "strings"
- "sync"
-
- "github.com/hashicorp/hcl/v2"
- "github.com/hashicorp/hcl/v2/ext/customdecode"
- "github.com/zclconf/go-cty/cty"
- "github.com/zclconf/go-cty/cty/convert"
- "github.com/zclconf/go-cty/cty/function"
-)
-
-// Expression is the abstract type for nodes that behave as HCL expressions.
-type Expression interface {
- Node
-
- // The hcl.Expression methods are duplicated here, rather than simply
- // embedded, because both Node and hcl.Expression have a Range method
- // and so they conflict.
-
- Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics)
- Variables() []hcl.Traversal
- StartRange() hcl.Range
-}
-
-// Assert that Expression implements hcl.Expression
-var _ hcl.Expression = Expression(nil)
-
-// ParenthesesExpr represents an expression written in grouping
-// parentheses.
-//
-// The parser takes care of the precedence effect of the parentheses, so the
-// only purpose of this separate expression node is to capture the source range
-// of the parentheses themselves, rather than the source range of the
-// expression within. All of the other expression operations just pass through
-// to the underlying expression.
-type ParenthesesExpr struct {
- Expression
- SrcRange hcl.Range
-}
-
-var _ hcl.Expression = (*ParenthesesExpr)(nil)
-
-func (e *ParenthesesExpr) Range() hcl.Range {
- return e.SrcRange
-}
-
-func (e *ParenthesesExpr) walkChildNodes(w internalWalkFunc) {
- // We override the walkChildNodes from the embedded Expression to
- // ensure that both the parentheses _and_ the content are visible
- // in a walk.
- w(e.Expression)
-}
-
-// LiteralValueExpr is an expression that just always returns a given value.
-type LiteralValueExpr struct {
- Val cty.Value
- SrcRange hcl.Range
-}
-
-func (e *LiteralValueExpr) walkChildNodes(w internalWalkFunc) {
- // Literal values have no child nodes
-}
-
-func (e *LiteralValueExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
- return e.Val, nil
-}
-
-func (e *LiteralValueExpr) Range() hcl.Range {
- return e.SrcRange
-}
-
-func (e *LiteralValueExpr) StartRange() hcl.Range {
- return e.SrcRange
-}
-
-// Implementation for hcl.AbsTraversalForExpr.
-func (e *LiteralValueExpr) AsTraversal() hcl.Traversal {
- // This one's a little weird: the contract for AsTraversal is to interpret
- // an expression as if it were traversal syntax, and traversal syntax
- // doesn't have the special keywords "null", "true", and "false" so these
- // are expected to be treated like variables in that case.
- // Since our parser already turned them into LiteralValueExpr by the time
- // we get here, we need to undo this and infer the name that would've
- // originally led to our value.
- // We don't do anything for any other values, since they don't overlap
- // with traversal roots.
-
- if e.Val.IsNull() {
- // In practice the parser only generates null values of the dynamic
- // pseudo-type for literals, so we can safely assume that any null
- // was orignally the keyword "null".
- return hcl.Traversal{
- hcl.TraverseRoot{
- Name: "null",
- SrcRange: e.SrcRange,
- },
- }
- }
-
- switch e.Val {
- case cty.True:
- return hcl.Traversal{
- hcl.TraverseRoot{
- Name: "true",
- SrcRange: e.SrcRange,
- },
- }
- case cty.False:
- return hcl.Traversal{
- hcl.TraverseRoot{
- Name: "false",
- SrcRange: e.SrcRange,
- },
- }
- default:
- // No traversal is possible for any other value.
- return nil
- }
-}
-
-// ScopeTraversalExpr is an Expression that retrieves a value from the scope
-// using a traversal.
-type ScopeTraversalExpr struct {
- Traversal hcl.Traversal
- SrcRange hcl.Range
-}
-
-func (e *ScopeTraversalExpr) walkChildNodes(w internalWalkFunc) {
- // Scope traversals have no child nodes
-}
-
-func (e *ScopeTraversalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
- val, diags := e.Traversal.TraverseAbs(ctx)
- setDiagEvalContext(diags, e, ctx)
- return val, diags
-}
-
-func (e *ScopeTraversalExpr) Range() hcl.Range {
- return e.SrcRange
-}
-
-func (e *ScopeTraversalExpr) StartRange() hcl.Range {
- return e.SrcRange
-}
-
-// Implementation for hcl.AbsTraversalForExpr.
-func (e *ScopeTraversalExpr) AsTraversal() hcl.Traversal {
- return e.Traversal
-}
-
-// RelativeTraversalExpr is an Expression that retrieves a value from another
-// value using a _relative_ traversal.
-type RelativeTraversalExpr struct {
- Source Expression
- Traversal hcl.Traversal
- SrcRange hcl.Range
-}
-
-func (e *RelativeTraversalExpr) walkChildNodes(w internalWalkFunc) {
- w(e.Source)
-}
-
-func (e *RelativeTraversalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
- src, diags := e.Source.Value(ctx)
- ret, travDiags := e.Traversal.TraverseRel(src)
- setDiagEvalContext(travDiags, e, ctx)
- diags = append(diags, travDiags...)
- return ret, diags
-}
-
-func (e *RelativeTraversalExpr) Range() hcl.Range {
- return e.SrcRange
-}
-
-func (e *RelativeTraversalExpr) StartRange() hcl.Range {
- return e.SrcRange
-}
-
-// Implementation for hcl.AbsTraversalForExpr.
-func (e *RelativeTraversalExpr) AsTraversal() hcl.Traversal {
- // We can produce a traversal only if our source can.
- st, diags := hcl.AbsTraversalForExpr(e.Source)
- if diags.HasErrors() {
- return nil
- }
-
- ret := make(hcl.Traversal, len(st)+len(e.Traversal))
- copy(ret, st)
- copy(ret[len(st):], e.Traversal)
- return ret
-}
-
-// FunctionCallExpr is an Expression that calls a function from the EvalContext
-// and returns its result.
-type FunctionCallExpr struct {
- Name string
- Args []Expression
-
- // If true, the final argument should be a tuple, list or set which will
- // expand to be one argument per element.
- ExpandFinal bool
-
- NameRange hcl.Range
- OpenParenRange hcl.Range
- CloseParenRange hcl.Range
-}
-
-func (e *FunctionCallExpr) walkChildNodes(w internalWalkFunc) {
- for _, arg := range e.Args {
- w(arg)
- }
-}
-
-func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
- var diags hcl.Diagnostics
-
- var f function.Function
- exists := false
- hasNonNilMap := false
- thisCtx := ctx
- for thisCtx != nil {
- if thisCtx.Functions == nil {
- thisCtx = thisCtx.Parent()
- continue
- }
- hasNonNilMap = true
- f, exists = thisCtx.Functions[e.Name]
- if exists {
- break
- }
- thisCtx = thisCtx.Parent()
- }
-
- if !exists {
- if !hasNonNilMap {
- return cty.DynamicVal, hcl.Diagnostics{
- {
- Severity: hcl.DiagError,
- Summary: "Function calls not allowed",
- Detail: "Functions may not be called here.",
- Subject: e.Range().Ptr(),
- Expression: e,
- EvalContext: ctx,
- },
- }
- }
-
- extraUnknown := &functionCallUnknown{
- name: e.Name,
- }
-
- // For historical reasons, we represent namespaced function names
- // as strings with :: separating the names. If this was an attempt
- // to call a namespaced function then we'll try to distinguish
- // between an invalid namespace or an invalid name within a valid
- // namespace in order to give the user better feedback about what
- // is wrong.
- //
- // The parser guarantees that a function name will always
- // be a series of valid identifiers separated by "::" with no
- // other content, so we can be relatively unforgiving in our processing
- // here.
- if sepIdx := strings.LastIndex(e.Name, "::"); sepIdx != -1 {
- namespace := e.Name[:sepIdx+2]
- name := e.Name[sepIdx+2:]
-
- avail := make([]string, 0, len(ctx.Functions))
- for availName := range ctx.Functions {
- if strings.HasPrefix(availName, namespace) {
- avail = append(avail, availName)
- }
- }
-
- extraUnknown.name = name
- extraUnknown.namespace = namespace
-
- if len(avail) == 0 {
- // TODO: Maybe use nameSuggestion for the other available
- // namespaces? But that'd require us to go scan the function
- // table again, so we'll wait to see if it's really warranted.
- // For now, we're assuming people are more likely to misremember
- // the function names than the namespaces, because in many
- // applications there will be relatively few namespaces compared
- // to the number of distinct functions.
- return cty.DynamicVal, hcl.Diagnostics{
- {
- Severity: hcl.DiagError,
- Summary: "Call to unknown function",
- Detail: fmt.Sprintf("There are no functions in namespace %q.", namespace),
- Subject: &e.NameRange,
- Context: e.Range().Ptr(),
- Expression: e,
- EvalContext: ctx,
- Extra: extraUnknown,
- },
- }
- } else {
- suggestion := nameSuggestion(name, avail)
- if suggestion != "" {
- suggestion = fmt.Sprintf(" Did you mean %s%s?", namespace, suggestion)
- }
-
- return cty.DynamicVal, hcl.Diagnostics{
- {
- Severity: hcl.DiagError,
- Summary: "Call to unknown function",
- Detail: fmt.Sprintf("There is no function named %q in namespace %s.%s", name, namespace, suggestion),
- Subject: &e.NameRange,
- Context: e.Range().Ptr(),
- Expression: e,
- EvalContext: ctx,
- Extra: extraUnknown,
- },
- }
- }
- }
-
- avail := make([]string, 0, len(ctx.Functions))
- for name := range ctx.Functions {
- avail = append(avail, name)
- }
- suggestion := nameSuggestion(e.Name, avail)
- if suggestion != "" {
- suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
- }
-
- return cty.DynamicVal, hcl.Diagnostics{
- {
- Severity: hcl.DiagError,
- Summary: "Call to unknown function",
- Detail: fmt.Sprintf("There is no function named %q.%s", e.Name, suggestion),
- Subject: &e.NameRange,
- Context: e.Range().Ptr(),
- Expression: e,
- EvalContext: ctx,
- Extra: extraUnknown,
- },
- }
- }
-
- diagExtra := functionCallDiagExtra{
- calledFunctionName: e.Name,
- }
-
- params := f.Params()
- varParam := f.VarParam()
-
- args := e.Args
- if e.ExpandFinal {
- if len(args) < 1 {
- // should never happen if the parser is behaving
- panic("ExpandFinal set on function call with no arguments")
- }
- expandExpr := args[len(args)-1]
- expandVal, expandDiags := expandExpr.Value(ctx)
- diags = append(diags, expandDiags...)
- if expandDiags.HasErrors() {
- return cty.DynamicVal, diags
- }
-
- switch {
- case expandVal.Type().Equals(cty.DynamicPseudoType):
- if expandVal.IsNull() {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid expanding argument value",
- Detail: "The expanding argument (indicated by ...) must not be null.",
- Subject: expandExpr.Range().Ptr(),
- Context: e.Range().Ptr(),
- Expression: expandExpr,
- EvalContext: ctx,
- Extra: &diagExtra,
- })
- return cty.DynamicVal, diags
- }
- return cty.DynamicVal, diags
- case expandVal.Type().IsTupleType() || expandVal.Type().IsListType() || expandVal.Type().IsSetType():
- if expandVal.IsNull() {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid expanding argument value",
- Detail: "The expanding argument (indicated by ...) must not be null.",
- Subject: expandExpr.Range().Ptr(),
- Context: e.Range().Ptr(),
- Expression: expandExpr,
- EvalContext: ctx,
- Extra: &diagExtra,
- })
- return cty.DynamicVal, diags
- }
- if !expandVal.IsKnown() {
- return cty.DynamicVal, diags
- }
-
- // When expanding arguments from a collection, we must first unmark
- // the collection itself, and apply any marks directly to the
- // elements. This ensures that marks propagate correctly.
- expandVal, marks := expandVal.Unmark()
- newArgs := make([]Expression, 0, (len(args)-1)+expandVal.LengthInt())
- newArgs = append(newArgs, args[:len(args)-1]...)
- it := expandVal.ElementIterator()
- for it.Next() {
- _, val := it.Element()
- newArgs = append(newArgs, &LiteralValueExpr{
- Val: val.WithMarks(marks),
- SrcRange: expandExpr.Range(),
- })
- }
- args = newArgs
- default:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid expanding argument value",
- Detail: "The expanding argument (indicated by ...) must be of a tuple, list, or set type.",
- Subject: expandExpr.Range().Ptr(),
- Context: e.Range().Ptr(),
- Expression: expandExpr,
- EvalContext: ctx,
- Extra: &diagExtra,
- })
- return cty.DynamicVal, diags
- }
- }
-
- if len(args) < len(params) {
- missing := params[len(args)]
- qual := ""
- if varParam != nil {
- qual = " at least"
- }
- return cty.DynamicVal, hcl.Diagnostics{
- {
- Severity: hcl.DiagError,
- Summary: "Not enough function arguments",
- Detail: fmt.Sprintf(
- "Function %q expects%s %d argument(s). Missing value for %q.",
- e.Name, qual, len(params), missing.Name,
- ),
- Subject: &e.CloseParenRange,
- Context: e.Range().Ptr(),
- Expression: e,
- EvalContext: ctx,
- Extra: &diagExtra,
- },
- }
- }
-
- if varParam == nil && len(args) > len(params) {
- return cty.DynamicVal, hcl.Diagnostics{
- {
- Severity: hcl.DiagError,
- Summary: "Too many function arguments",
- Detail: fmt.Sprintf(
- "Function %q expects only %d argument(s).",
- e.Name, len(params),
- ),
- Subject: args[len(params)].StartRange().Ptr(),
- Context: e.Range().Ptr(),
- Expression: e,
- EvalContext: ctx,
- Extra: &diagExtra,
- },
- }
- }
-
- argVals := make([]cty.Value, len(args))
-
- for i, argExpr := range args {
- var param *function.Parameter
- if i < len(params) {
- param = ¶ms[i]
- } else {
- param = varParam
- }
-
- var val cty.Value
- if decodeFn := customdecode.CustomExpressionDecoderForType(param.Type); decodeFn != nil {
- var argDiags hcl.Diagnostics
- val, argDiags = decodeFn(argExpr, ctx)
- diags = append(diags, argDiags...)
- if val == cty.NilVal {
- val = cty.UnknownVal(param.Type)
- }
- } else {
- var argDiags hcl.Diagnostics
- val, argDiags = argExpr.Value(ctx)
- if len(argDiags) > 0 {
- diags = append(diags, argDiags...)
- }
-
- // Try to convert our value to the parameter type
- var err error
- val, err = convert.Convert(val, param.Type)
- if err != nil {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid function argument",
- Detail: fmt.Sprintf(
- "Invalid value for %q parameter: %s.",
- param.Name, err,
- ),
- Subject: argExpr.StartRange().Ptr(),
- Context: e.Range().Ptr(),
- Expression: argExpr,
- EvalContext: ctx,
- Extra: &diagExtra,
- })
- }
- }
-
- argVals[i] = val
- }
-
- if diags.HasErrors() {
- // Don't try to execute the function if we already have errors with
- // the arguments, because the result will probably be a confusing
- // error message.
- return cty.DynamicVal, diags
- }
-
- resultVal, err := f.Call(argVals)
- if err != nil {
- // For errors in the underlying call itself we also return the raw
- // call error via an extra method on our "diagnostic extra" value.
- diagExtra.functionCallError = err
-
- switch terr := err.(type) {
- case function.ArgError:
- i := terr.Index
- var param *function.Parameter
- if i < len(params) {
- param = ¶ms[i]
- } else {
- param = varParam
- }
-
- if param == nil || i > len(args)-1 {
- // Getting here means that the function we called has a bug:
- // it returned an arg error that refers to an argument index
- // that wasn't present in the call. For that situation
- // we'll degrade to a less specific error just to give
- // some sort of answer, but best to still fix the buggy
- // function so that it only returns argument indices that
- // are in range.
- switch {
- case param != nil:
- // In this case we'll assume that the function was trying
- // to talk about a final variadic parameter but the caller
- // didn't actually provide any arguments for it. That means
- // we can at least still name the parameter in the
- // error message, but our source range will be the call
- // as a whole because we don't have an argument expression
- // to highlight specifically.
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid function argument",
- Detail: fmt.Sprintf(
- "Invalid value for %q parameter: %s.",
- param.Name, err,
- ),
- Subject: e.Range().Ptr(),
- Expression: e,
- EvalContext: ctx,
- Extra: &diagExtra,
- })
- default:
- // This is the most degenerate case of all, where the
- // index is out of range even for the declared parameters,
- // and so we can't tell which parameter the function is
- // trying to report an error for. Just a generic error
- // report in that case.
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Error in function call",
- Detail: fmt.Sprintf(
- "Call to function %q failed: %s.",
- e.Name, err,
- ),
- Subject: e.StartRange().Ptr(),
- Context: e.Range().Ptr(),
- Expression: e,
- EvalContext: ctx,
- Extra: &diagExtra,
- })
- }
- } else {
- argExpr := args[i]
-
- // TODO: we should also unpick a PathError here and show the
- // path to the deep value where the error was detected.
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid function argument",
- Detail: fmt.Sprintf(
- "Invalid value for %q parameter: %s.",
- param.Name, err,
- ),
- Subject: argExpr.StartRange().Ptr(),
- Context: e.Range().Ptr(),
- Expression: argExpr,
- EvalContext: ctx,
- Extra: &diagExtra,
- })
- }
-
- default:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Error in function call",
- Detail: fmt.Sprintf(
- "Call to function %q failed: %s.",
- e.Name, err,
- ),
- Subject: e.StartRange().Ptr(),
- Context: e.Range().Ptr(),
- Expression: e,
- EvalContext: ctx,
- Extra: &diagExtra,
- })
- }
-
- return cty.DynamicVal, diags
- }
-
- return resultVal, diags
-}
-
-func (e *FunctionCallExpr) Range() hcl.Range {
- return hcl.RangeBetween(e.NameRange, e.CloseParenRange)
-}
-
-func (e *FunctionCallExpr) StartRange() hcl.Range {
- return hcl.RangeBetween(e.NameRange, e.OpenParenRange)
-}
-
-// Implementation for hcl.ExprCall.
-func (e *FunctionCallExpr) ExprCall() *hcl.StaticCall {
- ret := &hcl.StaticCall{
- Name: e.Name,
- NameRange: e.NameRange,
- Arguments: make([]hcl.Expression, len(e.Args)),
- ArgsRange: hcl.RangeBetween(e.OpenParenRange, e.CloseParenRange),
- }
- // Need to convert our own Expression objects into hcl.Expression.
- for i, arg := range e.Args {
- ret.Arguments[i] = arg
- }
- return ret
-}
-
-// FunctionCallDiagExtra is an interface implemented by the value in the "Extra"
-// field of some diagnostics returned by FunctionCallExpr.Value, giving
-// cooperating callers access to some machine-readable information about the
-// call that a diagnostic relates to.
-type FunctionCallDiagExtra interface {
- // CalledFunctionName returns the name of the function being called at
- // the time the diagnostic was generated, if any. Returns an empty string
- // if there is no known called function.
- CalledFunctionName() string
-
- // FunctionCallError returns the error value returned by the implementation
- // of the function being called, if any. Returns nil if the diagnostic was
- // not returned in response to a call error.
- //
- // Some errors related to calling functions are generated by HCL itself
- // rather than by the underlying function, in which case this method
- // will return nil.
- FunctionCallError() error
-}
-
-type functionCallDiagExtra struct {
- calledFunctionName string
- functionCallError error
-}
-
-func (e *functionCallDiagExtra) CalledFunctionName() string {
- return e.calledFunctionName
-}
-
-func (e *functionCallDiagExtra) FunctionCallError() error {
- return e.functionCallError
-}
-
-// FunctionCallUnknownDiagExtra is an interface implemented by a value in the Extra
-// field of some diagnostics to indicate when the error was caused by a call to
-// an unknown function.
-type FunctionCallUnknownDiagExtra interface {
- CalledFunctionName() string
- CalledFunctionNamespace() string
-}
-
-type functionCallUnknown struct {
- name string
- namespace string
-}
-
-func (e *functionCallUnknown) CalledFunctionName() string {
- return e.name
-}
-
-func (e *functionCallUnknown) CalledFunctionNamespace() string {
- return e.namespace
-}
-
-type ConditionalExpr struct {
- Condition Expression
- TrueResult Expression
- FalseResult Expression
-
- SrcRange hcl.Range
-}
-
-func (e *ConditionalExpr) walkChildNodes(w internalWalkFunc) {
- w(e.Condition)
- w(e.TrueResult)
- w(e.FalseResult)
-}
-
-func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
- trueResult, trueDiags := e.TrueResult.Value(ctx)
- falseResult, falseDiags := e.FalseResult.Value(ctx)
- var diags hcl.Diagnostics
-
- resultType := cty.DynamicPseudoType
- convs := make([]convert.Conversion, 2)
-
- switch {
- // If either case is a dynamic null value (which would result from a
- // literal null in the config), we know that it can convert to the expected
- // type of the opposite case, and we don't need to speculatively reduce the
- // final result type to DynamicPseudoType.
-
- // If we know that either Type is a DynamicPseudoType, we can be certain
- // that the other value can convert since it's a pass-through, and we don't
- // need to unify the types. If the final evaluation results in the dynamic
- // value being returned, there's no conversion we can do, so we return the
- // value directly.
- case trueResult.RawEquals(cty.NullVal(cty.DynamicPseudoType)):
- resultType = falseResult.Type()
- convs[0] = convert.GetConversionUnsafe(cty.DynamicPseudoType, resultType)
- case falseResult.RawEquals(cty.NullVal(cty.DynamicPseudoType)):
- resultType = trueResult.Type()
- convs[1] = convert.GetConversionUnsafe(cty.DynamicPseudoType, resultType)
- case trueResult.Type() == cty.DynamicPseudoType, falseResult.Type() == cty.DynamicPseudoType:
- // the final resultType type is still unknown
- // we don't need to get the conversion, because both are a noop.
-
- default:
- // Try to find a type that both results can be converted to.
- resultType, convs = convert.UnifyUnsafe([]cty.Type{trueResult.Type(), falseResult.Type()})
- }
-
- if resultType == cty.NilType {
- return cty.DynamicVal, hcl.Diagnostics{
- {
- Severity: hcl.DiagError,
- Summary: "Inconsistent conditional result types",
- Detail: fmt.Sprintf(
- "The true and false result expressions must have consistent types. %s.",
- describeConditionalTypeMismatch(trueResult.Type(), falseResult.Type()),
- ),
- Subject: hcl.RangeBetween(e.TrueResult.Range(), e.FalseResult.Range()).Ptr(),
- Context: &e.SrcRange,
- Expression: e,
- EvalContext: ctx,
- },
- }
- }
-
- condResult, condDiags := e.Condition.Value(ctx)
- diags = append(diags, condDiags...)
- if condResult.IsNull() {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Null condition",
- Detail: "The condition value is null. Conditions must either be true or false.",
- Subject: e.Condition.Range().Ptr(),
- Context: &e.SrcRange,
- Expression: e.Condition,
- EvalContext: ctx,
- })
- return cty.UnknownVal(resultType), diags
- }
- if !condResult.IsKnown() {
- // we use the unmarked values throughout the unknown branch
- _, condResultMarks := condResult.Unmark()
- trueResult, trueResultMarks := trueResult.Unmark()
- falseResult, falseResultMarks := falseResult.Unmark()
-
- // use a value to merge marks
- _, resMarks := cty.DynamicVal.WithMarks(condResultMarks, trueResultMarks, falseResultMarks).Unmark()
-
- trueRange := trueResult.Range()
- falseRange := falseResult.Range()
-
- // if both branches are known to be null, then the result must still be null
- if trueResult.IsNull() && falseResult.IsNull() {
- return cty.NullVal(resultType).WithMarks(resMarks), diags
- }
-
- // We might be able to offer a refined range for the result based on
- // the two possible outcomes.
- if trueResult.Type() == cty.Number && falseResult.Type() == cty.Number {
- ref := cty.UnknownVal(cty.Number).Refine()
- if trueRange.DefinitelyNotNull() && falseRange.DefinitelyNotNull() {
- ref = ref.NotNull()
- }
-
- falseLo, falseLoInc := falseRange.NumberLowerBound()
- falseHi, falseHiInc := falseRange.NumberUpperBound()
- trueLo, trueLoInc := trueRange.NumberLowerBound()
- trueHi, trueHiInc := trueRange.NumberUpperBound()
-
- if falseLo.IsKnown() && trueLo.IsKnown() {
- lo, loInc := falseLo, falseLoInc
- switch {
- case trueLo.LessThan(falseLo).True():
- lo, loInc = trueLo, trueLoInc
- case trueLo.Equals(falseLo).True():
- loInc = trueLoInc || falseLoInc
- }
-
- ref = ref.NumberRangeLowerBound(lo, loInc)
- }
-
- if falseHi.IsKnown() && trueHi.IsKnown() {
- hi, hiInc := falseHi, falseHiInc
- switch {
- case trueHi.GreaterThan(falseHi).True():
- hi, hiInc = trueHi, trueHiInc
- case trueHi.Equals(falseHi).True():
- hiInc = trueHiInc || falseHiInc
- }
- ref = ref.NumberRangeUpperBound(hi, hiInc)
- }
-
- return ref.NewValue().WithMarks(resMarks), diags
- }
-
- if trueResult.Type().IsCollectionType() && falseResult.Type().IsCollectionType() {
- if trueResult.Type().Equals(falseResult.Type()) {
- ref := cty.UnknownVal(resultType).Refine()
- if trueRange.DefinitelyNotNull() && falseRange.DefinitelyNotNull() {
- ref = ref.NotNull()
- }
-
- falseLo := falseRange.LengthLowerBound()
- falseHi := falseRange.LengthUpperBound()
- trueLo := trueRange.LengthLowerBound()
- trueHi := trueRange.LengthUpperBound()
-
- lo := falseLo
- if trueLo < falseLo {
- lo = trueLo
- }
-
- hi := falseHi
- if trueHi > falseHi {
- hi = trueHi
- }
-
- ref = ref.CollectionLengthLowerBound(lo).CollectionLengthUpperBound(hi)
- return ref.NewValue().WithMarks(resMarks), diags
- }
- }
-
- ret := cty.UnknownVal(resultType)
- if trueRange.DefinitelyNotNull() && falseRange.DefinitelyNotNull() {
- ret = ret.RefineNotNull()
- }
- return ret.WithMarks(resMarks), diags
- }
-
- condResult, err := convert.Convert(condResult, cty.Bool)
- if err != nil {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Incorrect condition type",
- Detail: "The condition expression must be of type bool.",
- Subject: e.Condition.Range().Ptr(),
- Context: &e.SrcRange,
- Expression: e.Condition,
- EvalContext: ctx,
- })
- return cty.UnknownVal(resultType), diags
- }
-
- // Unmark result before testing for truthiness
- condResult, _ = condResult.UnmarkDeep()
- if condResult.True() {
- diags = append(diags, trueDiags...)
- if convs[0] != nil {
- var err error
- trueResult, err = convs[0](trueResult)
- if err != nil {
- // Unsafe conversion failed with the concrete result value
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Inconsistent conditional result types",
- Detail: fmt.Sprintf(
- "The true result value has the wrong type: %s.",
- err.Error(),
- ),
- Subject: e.TrueResult.Range().Ptr(),
- Context: &e.SrcRange,
- Expression: e.TrueResult,
- EvalContext: ctx,
- })
- trueResult = cty.UnknownVal(resultType)
- }
- }
- return trueResult, diags
- } else {
- diags = append(diags, falseDiags...)
- if convs[1] != nil {
- var err error
- falseResult, err = convs[1](falseResult)
- if err != nil {
- // Unsafe conversion failed with the concrete result value
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Inconsistent conditional result types",
- Detail: fmt.Sprintf(
- "The false result value has the wrong type: %s.",
- err.Error(),
- ),
- Subject: e.FalseResult.Range().Ptr(),
- Context: &e.SrcRange,
- Expression: e.FalseResult,
- EvalContext: ctx,
- })
- falseResult = cty.UnknownVal(resultType)
- }
- }
- return falseResult, diags
- }
-}
-
-// describeConditionalTypeMismatch makes a best effort to describe the
-// difference between types in the true and false arms of a conditional
-// expression in a way that would be useful to someone trying to understand
-// why their conditional expression isn't valid.
-//
-// NOTE: This function is only designed to deal with situations
-// where trueTy and falseTy are different. Calling it with two equal
-// types will produce a nonsense result. This function also only really
-// deals with situations that type unification can't resolve, so we should
-// call this function only after trying type unification first.
-func describeConditionalTypeMismatch(trueTy, falseTy cty.Type) string {
- // The main tricky cases here are when both trueTy and falseTy are
- // of the same structural type kind, such as both being object types
- // or both being tuple types. In that case the "FriendlyName" method
- // returns only "object" or "tuple" and so we need to do some more
- // work to describe what's different inside them.
-
- switch {
- case trueTy.IsObjectType() && falseTy.IsObjectType():
- // We'll first gather up the attribute names and sort them. In the
- // event that there are multiple attributes that disagree across
- // the two types, we'll prefer to report the one that sorts lexically
- // least just so that our error message is consistent between
- // evaluations.
- var trueAttrs, falseAttrs []string
- for name := range trueTy.AttributeTypes() {
- trueAttrs = append(trueAttrs, name)
- }
- sort.Strings(trueAttrs)
- for name := range falseTy.AttributeTypes() {
- falseAttrs = append(falseAttrs, name)
- }
- sort.Strings(falseAttrs)
-
- for _, name := range trueAttrs {
- if !falseTy.HasAttribute(name) {
- return fmt.Sprintf("The 'true' value includes object attribute %q, which is absent in the 'false' value", name)
- }
- trueAty := trueTy.AttributeType(name)
- falseAty := falseTy.AttributeType(name)
- if !trueAty.Equals(falseAty) {
- // For deeply-nested differences this will likely get very
- // clunky quickly by nesting these messages inside one another,
- // but we'll accept that for now in the interests of producing
- // _some_ useful feedback, even if it isn't as concise as
- // we'd prefer it to be. Deeply-nested structures in
- // conditionals are thankfully not super common.
- return fmt.Sprintf(
- "Type mismatch for object attribute %q: %s",
- name, describeConditionalTypeMismatch(trueAty, falseAty),
- )
- }
- }
- for _, name := range falseAttrs {
- if !trueTy.HasAttribute(name) {
- return fmt.Sprintf("The 'false' value includes object attribute %q, which is absent in the 'true' value", name)
- }
- // NOTE: We don't need to check the attribute types again, because
- // any attribute that both types have in common would already have
- // been checked in the previous loop.
- }
- case trueTy.IsTupleType() && falseTy.IsTupleType():
- trueEtys := trueTy.TupleElementTypes()
- falseEtys := falseTy.TupleElementTypes()
-
- if trueCount, falseCount := len(trueEtys), len(falseEtys); trueCount != falseCount {
- return fmt.Sprintf("The 'true' tuple has length %d, but the 'false' tuple has length %d", trueCount, falseCount)
- }
-
- // NOTE: Thanks to the condition above, we know that both tuples are
- // of the same length and so they must have some differing types
- // instead.
- for i := range trueEtys {
- trueEty := trueEtys[i]
- falseEty := falseEtys[i]
-
- if !trueEty.Equals(falseEty) {
- // For deeply-nested differences this will likely get very
- // clunky quickly by nesting these messages inside one another,
- // but we'll accept that for now in the interests of producing
- // _some_ useful feedback, even if it isn't as concise as
- // we'd prefer it to be. Deeply-nested structures in
- // conditionals are thankfully not super common.
- return fmt.Sprintf(
- "Type mismatch for tuple element %d: %s",
- i, describeConditionalTypeMismatch(trueEty, falseEty),
- )
- }
- }
- case trueTy.IsCollectionType() && falseTy.IsCollectionType():
- // For this case we're specifically interested in the situation where:
- // - both collections are of the same kind, AND
- // - the element types of both are either object or tuple types.
- // This is just to avoid writing a useless statement like
- // "The 'true' value is list of object, but the 'false' value is list of object".
- // This still doesn't account for more awkward cases like collections
- // of collections of structural types, but we won't let perfect be
- // the enemy of the good.
- trueEty := trueTy.ElementType()
- falseEty := falseTy.ElementType()
- if (trueTy.IsListType() && falseTy.IsListType()) || (trueTy.IsMapType() && falseTy.IsMapType()) || (trueTy.IsSetType() && falseTy.IsSetType()) {
- if (trueEty.IsObjectType() && falseEty.IsObjectType()) || (trueEty.IsTupleType() && falseEty.IsTupleType()) {
- noun := "collection"
- switch { // NOTE: We now know that trueTy and falseTy have the same collection kind
- case trueTy.IsListType():
- noun = "list"
- case trueTy.IsSetType():
- noun = "set"
- case trueTy.IsMapType():
- noun = "map"
- }
- return fmt.Sprintf(
- "Mismatched %s element types: %s",
- noun, describeConditionalTypeMismatch(trueEty, falseEty),
- )
- }
- }
- }
-
- // If we don't manage any more specialized message, we'll just report
- // what the two types are.
- trueName := trueTy.FriendlyName()
- falseName := falseTy.FriendlyName()
- if trueName == falseName {
- // Absolute last resort for when we have no special rule above but
- // we have two types with the same friendly name anyway. This is
- // the most vague of all possible messages but is reserved for
- // particularly awkward cases, like lists of lists of differing tuple
- // types.
- return "At least one deeply-nested attribute or element is not compatible across both the 'true' and the 'false' value"
- }
- return fmt.Sprintf(
- "The 'true' value is %s, but the 'false' value is %s",
- trueTy.FriendlyName(), falseTy.FriendlyName(),
- )
-
-}
-
-func (e *ConditionalExpr) Range() hcl.Range {
- return e.SrcRange
-}
-
-func (e *ConditionalExpr) StartRange() hcl.Range {
- return e.Condition.StartRange()
-}
-
-type IndexExpr struct {
- Collection Expression
- Key Expression
-
- SrcRange hcl.Range
- OpenRange hcl.Range
- BracketRange hcl.Range
-}
-
-func (e *IndexExpr) walkChildNodes(w internalWalkFunc) {
- w(e.Collection)
- w(e.Key)
-}
-
-func (e *IndexExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
- var diags hcl.Diagnostics
- coll, collDiags := e.Collection.Value(ctx)
- key, keyDiags := e.Key.Value(ctx)
- diags = append(diags, collDiags...)
- diags = append(diags, keyDiags...)
-
- val, indexDiags := hcl.Index(coll, key, &e.BracketRange)
- setDiagEvalContext(indexDiags, e, ctx)
- diags = append(diags, indexDiags...)
- return val, diags
-}
-
-func (e *IndexExpr) Range() hcl.Range {
- return e.SrcRange
-}
-
-func (e *IndexExpr) StartRange() hcl.Range {
- return e.OpenRange
-}
-
-type TupleConsExpr struct {
- Exprs []Expression
-
- SrcRange hcl.Range
- OpenRange hcl.Range
-}
-
-func (e *TupleConsExpr) walkChildNodes(w internalWalkFunc) {
- for _, expr := range e.Exprs {
- w(expr)
- }
-}
-
-func (e *TupleConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
- var vals []cty.Value
- var diags hcl.Diagnostics
-
- vals = make([]cty.Value, len(e.Exprs))
- for i, expr := range e.Exprs {
- val, valDiags := expr.Value(ctx)
- vals[i] = val
- diags = append(diags, valDiags...)
- }
-
- return cty.TupleVal(vals), diags
-}
-
-func (e *TupleConsExpr) Range() hcl.Range {
- return e.SrcRange
-}
-
-func (e *TupleConsExpr) StartRange() hcl.Range {
- return e.OpenRange
-}
-
-// Implementation for hcl.ExprList
-func (e *TupleConsExpr) ExprList() []hcl.Expression {
- ret := make([]hcl.Expression, len(e.Exprs))
- for i, expr := range e.Exprs {
- ret[i] = expr
- }
- return ret
-}
-
-type ObjectConsExpr struct {
- Items []ObjectConsItem
-
- SrcRange hcl.Range
- OpenRange hcl.Range
-}
-
-type ObjectConsItem struct {
- KeyExpr Expression
- ValueExpr Expression
-}
-
-func (e *ObjectConsExpr) walkChildNodes(w internalWalkFunc) {
- for _, item := range e.Items {
- w(item.KeyExpr)
- w(item.ValueExpr)
- }
-}
-
-func (e *ObjectConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
- var vals map[string]cty.Value
- var diags hcl.Diagnostics
- var marks []cty.ValueMarks
-
- // This will get set to true if we fail to produce any of our keys,
- // either because they are actually unknown or if the evaluation produces
- // errors. In all of these case we must return DynamicPseudoType because
- // we're unable to know the full set of keys our object has, and thus
- // we can't produce a complete value of the intended type.
- //
- // We still evaluate all of the item keys and values to make sure that we
- // get as complete as possible a set of diagnostics.
- known := true
-
- vals = make(map[string]cty.Value, len(e.Items))
- for _, item := range e.Items {
- key, keyDiags := item.KeyExpr.Value(ctx)
- diags = append(diags, keyDiags...)
-
- val, valDiags := item.ValueExpr.Value(ctx)
- diags = append(diags, valDiags...)
-
- if keyDiags.HasErrors() {
- known = false
- continue
- }
-
- if key.IsNull() {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Null value as key",
- Detail: "Can't use a null value as a key.",
- Subject: item.ValueExpr.Range().Ptr(),
- Expression: item.KeyExpr,
- EvalContext: ctx,
- })
- known = false
- continue
- }
-
- key, keyMarks := key.Unmark()
- marks = append(marks, keyMarks)
-
- var err error
- key, err = convert.Convert(key, cty.String)
- if err != nil {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Incorrect key type",
- Detail: fmt.Sprintf("Can't use this value as a key: %s.", err.Error()),
- Subject: item.KeyExpr.Range().Ptr(),
- Expression: item.KeyExpr,
- EvalContext: ctx,
- })
- known = false
- continue
- }
-
- if !key.IsKnown() {
- known = false
- continue
- }
-
- keyStr := key.AsString()
-
- vals[keyStr] = val
- }
-
- if !known {
- return cty.DynamicVal, diags
- }
-
- return cty.ObjectVal(vals).WithMarks(marks...), diags
-}
-
-func (e *ObjectConsExpr) Range() hcl.Range {
- return e.SrcRange
-}
-
-func (e *ObjectConsExpr) StartRange() hcl.Range {
- return e.OpenRange
-}
-
-// Implementation for hcl.ExprMap
-func (e *ObjectConsExpr) ExprMap() []hcl.KeyValuePair {
- ret := make([]hcl.KeyValuePair, len(e.Items))
- for i, item := range e.Items {
- ret[i] = hcl.KeyValuePair{
- Key: item.KeyExpr,
- Value: item.ValueExpr,
- }
- }
- return ret
-}
-
-// ObjectConsKeyExpr is a special wrapper used only for ObjectConsExpr keys,
-// which deals with the special case that a naked identifier in that position
-// must be interpreted as a literal string rather than evaluated directly.
-type ObjectConsKeyExpr struct {
- Wrapped Expression
- ForceNonLiteral bool
-}
-
-func (e *ObjectConsKeyExpr) literalName() string {
- // This is our logic for deciding whether to behave like a literal string.
- // We lean on our AbsTraversalForExpr implementation here, which already
- // deals with some awkward cases like the expression being the result
- // of the keywords "null", "true" and "false" which we'd want to interpret
- // as keys here too.
- return hcl.ExprAsKeyword(e.Wrapped)
-}
-
-func (e *ObjectConsKeyExpr) walkChildNodes(w internalWalkFunc) {
- // We only treat our wrapped expression as a real expression if we're
- // not going to interpret it as a literal.
- if e.literalName() == "" {
- w(e.Wrapped)
- }
-}
-
-func (e *ObjectConsKeyExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
- // Because we accept a naked identifier as a literal key rather than a
- // reference, it's confusing to accept a traversal containing periods
- // here since we can't tell if the user intends to create a key with
- // periods or actually reference something. To avoid confusing downstream
- // errors we'll just prohibit a naked multi-step traversal here and
- // require the user to state their intent more clearly.
- // (This is handled at evaluation time rather than parse time because
- // an application using static analysis _can_ accept a naked multi-step
- // traversal here, if desired.)
- if !e.ForceNonLiteral {
- if travExpr, isTraversal := e.Wrapped.(*ScopeTraversalExpr); isTraversal && len(travExpr.Traversal) > 1 {
- var diags hcl.Diagnostics
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Ambiguous attribute key",
- Detail: "If this expression is intended to be a reference, wrap it in parentheses. If it's instead intended as a literal name containing periods, wrap it in quotes to create a string literal.",
- Subject: e.Range().Ptr(),
- })
- return cty.DynamicVal, diags
- }
-
- if ln := e.literalName(); ln != "" {
- return cty.StringVal(ln), nil
- }
- }
- return e.Wrapped.Value(ctx)
-}
-
-func (e *ObjectConsKeyExpr) Range() hcl.Range {
- return e.Wrapped.Range()
-}
-
-func (e *ObjectConsKeyExpr) StartRange() hcl.Range {
- return e.Wrapped.StartRange()
-}
-
-// Implementation for hcl.AbsTraversalForExpr.
-func (e *ObjectConsKeyExpr) AsTraversal() hcl.Traversal {
- // If we're forcing a non-literal then we can never be interpreted
- // as a traversal.
- if e.ForceNonLiteral {
- return nil
- }
-
- // We can produce a traversal only if our wrappee can.
- st, diags := hcl.AbsTraversalForExpr(e.Wrapped)
- if diags.HasErrors() {
- return nil
- }
-
- return st
-}
-
-func (e *ObjectConsKeyExpr) UnwrapExpression() Expression {
- return e.Wrapped
-}
-
-// ForExpr represents iteration constructs:
-//
-// tuple = [for i, v in list: upper(v) if i > 2]
-// object = {for k, v in map: k => upper(v)}
-// object_of_tuples = {for v in list: v.key: v...}
-type ForExpr struct {
- KeyVar string // empty if ignoring the key
- ValVar string
-
- CollExpr Expression
-
- KeyExpr Expression // nil when producing a tuple
- ValExpr Expression
- CondExpr Expression // null if no "if" clause is present
-
- Group bool // set if the ellipsis is used on the value in an object for
-
- SrcRange hcl.Range
- OpenRange hcl.Range
- CloseRange hcl.Range
-}
-
-func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
- var diags hcl.Diagnostics
- var marks []cty.ValueMarks
-
- collVal, collDiags := e.CollExpr.Value(ctx)
- diags = append(diags, collDiags...)
-
- if collVal.IsNull() {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Iteration over null value",
- Detail: "A null value cannot be used as the collection in a 'for' expression.",
- Subject: e.CollExpr.Range().Ptr(),
- Context: &e.SrcRange,
- Expression: e.CollExpr,
- EvalContext: ctx,
- })
- return cty.DynamicVal, diags
- }
- if collVal.Type() == cty.DynamicPseudoType {
- return cty.DynamicVal, diags
- }
- // Unmark collection before checking for iterability, because marked
- // values cannot be iterated
- collVal, collMarks := collVal.Unmark()
- marks = append(marks, collMarks)
- if !collVal.CanIterateElements() {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Iteration over non-iterable value",
- Detail: fmt.Sprintf(
- "A value of type %s cannot be used as the collection in a 'for' expression.",
- collVal.Type().FriendlyName(),
- ),
- Subject: e.CollExpr.Range().Ptr(),
- Context: &e.SrcRange,
- Expression: e.CollExpr,
- EvalContext: ctx,
- })
- return cty.DynamicVal, diags
- }
- if !collVal.IsKnown() {
- return cty.DynamicVal, diags
- }
-
- // Before we start we'll do an early check to see if any CondExpr we've
- // been given is of the wrong type. This isn't 100% reliable (it may
- // be DynamicVal until real values are given) but it should catch some
- // straightforward cases and prevent a barrage of repeated errors.
- if e.CondExpr != nil {
- childCtx := ctx.NewChild()
- childCtx.Variables = map[string]cty.Value{}
- if e.KeyVar != "" {
- childCtx.Variables[e.KeyVar] = cty.DynamicVal
- }
- childCtx.Variables[e.ValVar] = cty.DynamicVal
-
- result, condDiags := e.CondExpr.Value(childCtx)
- diags = append(diags, condDiags...)
- if result.IsNull() {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Condition is null",
- Detail: "The value of the 'if' clause must not be null.",
- Subject: e.CondExpr.Range().Ptr(),
- Context: &e.SrcRange,
- Expression: e.CondExpr,
- EvalContext: ctx,
- })
- return cty.DynamicVal, diags
- }
- _, err := convert.Convert(result, cty.Bool)
- if err != nil {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid 'for' condition",
- Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()),
- Subject: e.CondExpr.Range().Ptr(),
- Context: &e.SrcRange,
- Expression: e.CondExpr,
- EvalContext: ctx,
- })
- return cty.DynamicVal, diags
- }
- if condDiags.HasErrors() {
- return cty.DynamicVal, diags
- }
- }
-
- if e.KeyExpr != nil {
- // Producing an object
- var vals map[string]cty.Value
- var groupVals map[string][]cty.Value
- if e.Group {
- groupVals = map[string][]cty.Value{}
- } else {
- vals = map[string]cty.Value{}
- }
-
- it := collVal.ElementIterator()
-
- known := true
- for it.Next() {
- k, v := it.Element()
- childCtx := ctx.NewChild()
- childCtx.Variables = map[string]cty.Value{}
- if e.KeyVar != "" {
- childCtx.Variables[e.KeyVar] = k
- }
- childCtx.Variables[e.ValVar] = v
-
- if e.CondExpr != nil {
- includeRaw, condDiags := e.CondExpr.Value(childCtx)
- diags = append(diags, condDiags...)
- if includeRaw.IsNull() {
- if known {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid 'for' condition",
- Detail: "The value of the 'if' clause must not be null.",
- Subject: e.CondExpr.Range().Ptr(),
- Context: &e.SrcRange,
- Expression: e.CondExpr,
- EvalContext: childCtx,
- })
- }
- known = false
- continue
- }
- include, err := convert.Convert(includeRaw, cty.Bool)
- if err != nil {
- if known {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid 'for' condition",
- Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()),
- Subject: e.CondExpr.Range().Ptr(),
- Context: &e.SrcRange,
- Expression: e.CondExpr,
- EvalContext: childCtx,
- })
- }
- known = false
- continue
- }
- if !include.IsKnown() {
- known = false
- continue
- }
-
- // Extract and merge marks from the include expression into the
- // main set of marks
- includeUnmarked, includeMarks := include.Unmark()
- marks = append(marks, includeMarks)
- if includeUnmarked.False() {
- // Skip this element
- continue
- }
- }
-
- keyRaw, keyDiags := e.KeyExpr.Value(childCtx)
- diags = append(diags, keyDiags...)
- if keyRaw.IsNull() {
- if known {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid object key",
- Detail: "Key expression in 'for' expression must not produce a null value.",
- Subject: e.KeyExpr.Range().Ptr(),
- Context: &e.SrcRange,
- Expression: e.KeyExpr,
- EvalContext: childCtx,
- })
- }
- known = false
- continue
- }
- if !keyRaw.IsKnown() {
- known = false
- continue
- }
-
- key, err := convert.Convert(keyRaw, cty.String)
- if err != nil {
- if known {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid object key",
- Detail: fmt.Sprintf("The key expression produced an invalid result: %s.", err.Error()),
- Subject: e.KeyExpr.Range().Ptr(),
- Context: &e.SrcRange,
- Expression: e.KeyExpr,
- EvalContext: childCtx,
- })
- }
- known = false
- continue
- }
-
- key, keyMarks := key.Unmark()
- marks = append(marks, keyMarks)
-
- val, valDiags := e.ValExpr.Value(childCtx)
- diags = append(diags, valDiags...)
-
- if e.Group {
- k := key.AsString()
- groupVals[k] = append(groupVals[k], val)
- } else {
- k := key.AsString()
- if _, exists := vals[k]; exists {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Duplicate object key",
- Detail: fmt.Sprintf(
- "Two different items produced the key %q in this 'for' expression. If duplicates are expected, use the ellipsis (...) after the value expression to enable grouping by key.",
- k,
- ),
- Subject: e.KeyExpr.Range().Ptr(),
- Context: &e.SrcRange,
- Expression: e.KeyExpr,
- EvalContext: childCtx,
- })
- } else {
- vals[key.AsString()] = val
- }
- }
- }
-
- if !known {
- return cty.DynamicVal, diags
- }
-
- if e.Group {
- vals = map[string]cty.Value{}
- for k, gvs := range groupVals {
- vals[k] = cty.TupleVal(gvs)
- }
- }
-
- return cty.ObjectVal(vals).WithMarks(marks...), diags
-
- } else {
- // Producing a tuple
- vals := []cty.Value{}
-
- it := collVal.ElementIterator()
-
- known := true
- for it.Next() {
- k, v := it.Element()
- childCtx := ctx.NewChild()
- childCtx.Variables = map[string]cty.Value{}
- if e.KeyVar != "" {
- childCtx.Variables[e.KeyVar] = k
- }
- childCtx.Variables[e.ValVar] = v
-
- if e.CondExpr != nil {
- includeRaw, condDiags := e.CondExpr.Value(childCtx)
- diags = append(diags, condDiags...)
- if includeRaw.IsNull() {
- if known {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid 'for' condition",
- Detail: "The value of the 'if' clause must not be null.",
- Subject: e.CondExpr.Range().Ptr(),
- Context: &e.SrcRange,
- Expression: e.CondExpr,
- EvalContext: childCtx,
- })
- }
- known = false
- continue
- }
- if !includeRaw.IsKnown() {
- // We will eventually return DynamicVal, but we'll continue
- // iterating in case there are other diagnostics to gather
- // for later elements.
- known = false
- continue
- }
-
- include, err := convert.Convert(includeRaw, cty.Bool)
- if err != nil {
- if known {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid 'for' condition",
- Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()),
- Subject: e.CondExpr.Range().Ptr(),
- Context: &e.SrcRange,
- Expression: e.CondExpr,
- EvalContext: childCtx,
- })
- }
- known = false
- continue
- }
-
- // Extract and merge marks from the include expression into the
- // main set of marks
- includeUnmarked, includeMarks := include.Unmark()
- marks = append(marks, includeMarks)
- if includeUnmarked.False() {
- // Skip this element
- continue
- }
- }
-
- val, valDiags := e.ValExpr.Value(childCtx)
- diags = append(diags, valDiags...)
- vals = append(vals, val)
- }
-
- if !known {
- return cty.DynamicVal, diags
- }
-
- return cty.TupleVal(vals).WithMarks(marks...), diags
- }
-}
-
-func (e *ForExpr) walkChildNodes(w internalWalkFunc) {
- w(e.CollExpr)
-
- scopeNames := map[string]struct{}{}
- if e.KeyVar != "" {
- scopeNames[e.KeyVar] = struct{}{}
- }
- if e.ValVar != "" {
- scopeNames[e.ValVar] = struct{}{}
- }
-
- if e.KeyExpr != nil {
- w(ChildScope{
- LocalNames: scopeNames,
- Expr: e.KeyExpr,
- })
- }
- w(ChildScope{
- LocalNames: scopeNames,
- Expr: e.ValExpr,
- })
- if e.CondExpr != nil {
- w(ChildScope{
- LocalNames: scopeNames,
- Expr: e.CondExpr,
- })
- }
-}
-
-func (e *ForExpr) Range() hcl.Range {
- return e.SrcRange
-}
-
-func (e *ForExpr) StartRange() hcl.Range {
- return e.OpenRange
-}
-
-type SplatExpr struct {
- Source Expression
- Each Expression
- Item *AnonSymbolExpr
-
- SrcRange hcl.Range
- MarkerRange hcl.Range
-}
-
-func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
- sourceVal, diags := e.Source.Value(ctx)
- if diags.HasErrors() {
- // We'll evaluate our "Each" expression here just to see if it
- // produces any more diagnostics we can report. Since we're not
- // assigning a value to our AnonSymbolExpr here it will return
- // DynamicVal, which should short-circuit any use of it.
- _, itemDiags := e.Item.Value(ctx)
- diags = append(diags, itemDiags...)
- return cty.DynamicVal, diags
- }
-
- sourceTy := sourceVal.Type()
-
- // A "special power" of splat expressions is that they can be applied
- // both to tuples/lists and to other values, and in the latter case
- // the value will be treated as an implicit single-item tuple, or as
- // an empty tuple if the value is null.
- autoUpgrade := !(sourceTy.IsTupleType() || sourceTy.IsListType() || sourceTy.IsSetType())
-
- if sourceVal.IsNull() {
- if autoUpgrade {
- return cty.EmptyTupleVal.WithSameMarks(sourceVal), diags
- }
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Splat of null value",
- Detail: "Splat expressions (with the * symbol) cannot be applied to null sequences.",
- Subject: e.Source.Range().Ptr(),
- Context: hcl.RangeBetween(e.Source.Range(), e.MarkerRange).Ptr(),
- Expression: e.Source,
- EvalContext: ctx,
- })
- return cty.DynamicVal, diags
- }
-
- if sourceTy == cty.DynamicPseudoType {
- // If we don't even know the _type_ of our source value yet then
- // we'll need to defer all processing, since we can't decide our
- // result type either.
- return cty.DynamicVal.WithSameMarks(sourceVal), diags
- }
-
- upgradedUnknown := false
- if autoUpgrade {
- // If we're upgrading an unknown value to a tuple/list, the result
- // cannot be known. Otherwise a tuple containing an unknown value will
- // upgrade to a different number of elements depending on whether
- // sourceVal becomes null or not.
- // We record this condition here so we can process any remaining
- // expression after the * to verify the result of the traversal. For
- // example, it is valid to use a splat on a single object to retrieve a
- // list of a single attribute, but we still need to check if that
- // attribute actually exists.
- if !sourceVal.IsKnown() {
- unmarkedVal, _ := sourceVal.Unmark()
- sourceRng := unmarkedVal.Range()
- if sourceRng.CouldBeNull() {
- upgradedUnknown = true
- }
- }
-
- sourceVal = cty.TupleVal([]cty.Value{sourceVal}).WithSameMarks(sourceVal)
- sourceTy = sourceVal.Type()
- }
-
- // We'll compute our result type lazily if we need it. In the normal case
- // it's inferred automatically from the value we construct.
- resultTy := func() (cty.Type, hcl.Diagnostics) {
- chiCtx := ctx.NewChild()
- var diags hcl.Diagnostics
- switch {
- case sourceTy.IsListType() || sourceTy.IsSetType():
- ety := sourceTy.ElementType()
- e.Item.setValue(chiCtx, cty.UnknownVal(ety))
- val, itemDiags := e.Each.Value(chiCtx)
- diags = append(diags, itemDiags...)
- e.Item.clearValue(chiCtx) // clean up our temporary value
- return cty.List(val.Type()), diags
- case sourceTy.IsTupleType():
- etys := sourceTy.TupleElementTypes()
- resultTys := make([]cty.Type, 0, len(etys))
- for _, ety := range etys {
- e.Item.setValue(chiCtx, cty.UnknownVal(ety))
- val, itemDiags := e.Each.Value(chiCtx)
- diags = append(diags, itemDiags...)
- e.Item.clearValue(chiCtx) // clean up our temporary value
- resultTys = append(resultTys, val.Type())
- }
- return cty.Tuple(resultTys), diags
- default:
- // Should never happen because of our promotion to list above.
- return cty.DynamicPseudoType, diags
- }
- }
-
- if !sourceVal.IsKnown() {
- // We can't produce a known result in this case, but we'll still
- // indicate what the result type would be, allowing any downstream type
- // checking to proceed.
- ty, tyDiags := resultTy()
- diags = append(diags, tyDiags...)
- ret := cty.UnknownVal(ty)
- if ty != cty.DynamicPseudoType {
- ret = ret.RefineNotNull()
- }
- if ty.IsListType() && sourceVal.Type().IsCollectionType() {
- // We can refine the length of an unknown list result based on
- // the source collection's own length.
- sv, _ := sourceVal.Unmark()
- sourceRng := sv.Range()
- ret = ret.Refine().
- CollectionLengthLowerBound(sourceRng.LengthLowerBound()).
- CollectionLengthUpperBound(sourceRng.LengthUpperBound()).
- NewValue()
- }
- return ret.WithSameMarks(sourceVal), diags
- }
-
- // Unmark the collection, and save the marks to apply to the returned
- // collection result
- sourceVal, marks := sourceVal.Unmark()
- vals := make([]cty.Value, 0, sourceVal.LengthInt())
- it := sourceVal.ElementIterator()
- if ctx == nil {
- // we need a context to use our AnonSymbolExpr, so we'll just
- // make an empty one here to use as a placeholder.
- ctx = ctx.NewChild()
- }
- isKnown := true
- for it.Next() {
- _, sourceItem := it.Element()
- e.Item.setValue(ctx, sourceItem)
- newItem, itemDiags := e.Each.Value(ctx)
- diags = append(diags, itemDiags...)
- if itemDiags.HasErrors() {
- isKnown = false
- }
- vals = append(vals, newItem)
- }
- e.Item.clearValue(ctx) // clean up our temporary value
-
- if upgradedUnknown {
- return cty.DynamicVal.WithMarks(marks), diags
- }
-
- if !isKnown {
- // We'll ingore the resultTy diagnostics in this case since they
- // will just be the same errors we saw while iterating above.
- ty, _ := resultTy()
- return cty.UnknownVal(ty).WithMarks(marks), diags
- }
-
- switch {
- case sourceTy.IsListType() || sourceTy.IsSetType():
- if len(vals) == 0 {
- ty, tyDiags := resultTy()
- diags = append(diags, tyDiags...)
- return cty.ListValEmpty(ty.ElementType()).WithMarks(marks), diags
- }
- return cty.ListVal(vals).WithMarks(marks), diags
- default:
- return cty.TupleVal(vals).WithMarks(marks), diags
- }
-}
-
-func (e *SplatExpr) walkChildNodes(w internalWalkFunc) {
- w(e.Source)
- w(e.Each)
-}
-
-func (e *SplatExpr) Range() hcl.Range {
- return e.SrcRange
-}
-
-func (e *SplatExpr) StartRange() hcl.Range {
- return e.MarkerRange
-}
-
-// AnonSymbolExpr is used as a placeholder for a value in an expression that
-// can be applied dynamically to any value at runtime.
-//
-// This is a rather odd, synthetic expression. It is used as part of the
-// representation of splat expressions as a placeholder for the current item
-// being visited in the splat evaluation.
-//
-// AnonSymbolExpr cannot be evaluated in isolation. If its Value is called
-// directly then cty.DynamicVal will be returned. Instead, it is evaluated
-// in terms of another node (i.e. a splat expression) which temporarily
-// assigns it a value.
-type AnonSymbolExpr struct {
- SrcRange hcl.Range
-
- // values and its associated lock are used to isolate concurrent
- // evaluations of a symbol from one another. It is the calling application's
- // responsibility to ensure that the same splat expression is not evalauted
- // concurrently within the _same_ EvalContext, but it is fine and safe to
- // do cuncurrent evaluations with distinct EvalContexts.
- values map[*hcl.EvalContext]cty.Value
- valuesLock sync.RWMutex
-}
-
-func (e *AnonSymbolExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
- if ctx == nil {
- return cty.DynamicVal, nil
- }
-
- e.valuesLock.RLock()
- defer e.valuesLock.RUnlock()
-
- val, exists := e.values[ctx]
- if !exists {
- return cty.DynamicVal, nil
- }
- return val, nil
-}
-
-// setValue sets a temporary local value for the expression when evaluated
-// in the given context, which must be non-nil.
-func (e *AnonSymbolExpr) setValue(ctx *hcl.EvalContext, val cty.Value) {
- e.valuesLock.Lock()
- defer e.valuesLock.Unlock()
-
- if e.values == nil {
- e.values = make(map[*hcl.EvalContext]cty.Value)
- }
- if ctx == nil {
- panic("can't setValue for a nil EvalContext")
- }
- e.values[ctx] = val
-}
-
-func (e *AnonSymbolExpr) clearValue(ctx *hcl.EvalContext) {
- e.valuesLock.Lock()
- defer e.valuesLock.Unlock()
-
- if e.values == nil {
- return
- }
- if ctx == nil {
- panic("can't clearValue for a nil EvalContext")
- }
- delete(e.values, ctx)
-}
-
-func (e *AnonSymbolExpr) walkChildNodes(w internalWalkFunc) {
- // AnonSymbolExpr is a leaf node in the tree
-}
-
-func (e *AnonSymbolExpr) Range() hcl.Range {
- return e.SrcRange
-}
-
-func (e *AnonSymbolExpr) StartRange() hcl.Range {
- return e.SrcRange
-}
-
-// ExprSyntaxError is a placeholder for an invalid expression that could not
-// be parsed due to syntax errors.
-type ExprSyntaxError struct {
- Placeholder cty.Value
- ParseDiags hcl.Diagnostics
- SrcRange hcl.Range
-}
-
-func (e *ExprSyntaxError) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
- return e.Placeholder, e.ParseDiags
-}
-
-func (e *ExprSyntaxError) walkChildNodes(w internalWalkFunc) {
- // ExprSyntaxError is a leaf node in the tree
-}
-
-func (e *ExprSyntaxError) Range() hcl.Range {
- return e.SrcRange
-}
-
-func (e *ExprSyntaxError) StartRange() hcl.Range {
- return e.SrcRange
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_ops.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_ops.go
deleted file mode 100644
index 6585612c..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_ops.go
+++ /dev/null
@@ -1,271 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hclsyntax
-
-import (
- "fmt"
-
- "github.com/hashicorp/hcl/v2"
- "github.com/zclconf/go-cty/cty"
- "github.com/zclconf/go-cty/cty/convert"
- "github.com/zclconf/go-cty/cty/function"
- "github.com/zclconf/go-cty/cty/function/stdlib"
-)
-
-type Operation struct {
- Impl function.Function
- Type cty.Type
-}
-
-var (
- OpLogicalOr = &Operation{
- Impl: stdlib.OrFunc,
- Type: cty.Bool,
- }
- OpLogicalAnd = &Operation{
- Impl: stdlib.AndFunc,
- Type: cty.Bool,
- }
- OpLogicalNot = &Operation{
- Impl: stdlib.NotFunc,
- Type: cty.Bool,
- }
-
- OpEqual = &Operation{
- Impl: stdlib.EqualFunc,
- Type: cty.Bool,
- }
- OpNotEqual = &Operation{
- Impl: stdlib.NotEqualFunc,
- Type: cty.Bool,
- }
-
- OpGreaterThan = &Operation{
- Impl: stdlib.GreaterThanFunc,
- Type: cty.Bool,
- }
- OpGreaterThanOrEqual = &Operation{
- Impl: stdlib.GreaterThanOrEqualToFunc,
- Type: cty.Bool,
- }
- OpLessThan = &Operation{
- Impl: stdlib.LessThanFunc,
- Type: cty.Bool,
- }
- OpLessThanOrEqual = &Operation{
- Impl: stdlib.LessThanOrEqualToFunc,
- Type: cty.Bool,
- }
-
- OpAdd = &Operation{
- Impl: stdlib.AddFunc,
- Type: cty.Number,
- }
- OpSubtract = &Operation{
- Impl: stdlib.SubtractFunc,
- Type: cty.Number,
- }
- OpMultiply = &Operation{
- Impl: stdlib.MultiplyFunc,
- Type: cty.Number,
- }
- OpDivide = &Operation{
- Impl: stdlib.DivideFunc,
- Type: cty.Number,
- }
- OpModulo = &Operation{
- Impl: stdlib.ModuloFunc,
- Type: cty.Number,
- }
- OpNegate = &Operation{
- Impl: stdlib.NegateFunc,
- Type: cty.Number,
- }
-)
-
-var binaryOps []map[TokenType]*Operation
-
-func init() {
- // This operation table maps from the operator's token type
- // to the AST operation type. All expressions produced from
- // binary operators are BinaryOp nodes.
- //
- // Binary operator groups are listed in order of precedence, with
- // the *lowest* precedence first. Operators within the same group
- // have left-to-right associativity.
- binaryOps = []map[TokenType]*Operation{
- {
- TokenOr: OpLogicalOr,
- },
- {
- TokenAnd: OpLogicalAnd,
- },
- {
- TokenEqualOp: OpEqual,
- TokenNotEqual: OpNotEqual,
- },
- {
- TokenGreaterThan: OpGreaterThan,
- TokenGreaterThanEq: OpGreaterThanOrEqual,
- TokenLessThan: OpLessThan,
- TokenLessThanEq: OpLessThanOrEqual,
- },
- {
- TokenPlus: OpAdd,
- TokenMinus: OpSubtract,
- },
- {
- TokenStar: OpMultiply,
- TokenSlash: OpDivide,
- TokenPercent: OpModulo,
- },
- }
-}
-
-type BinaryOpExpr struct {
- LHS Expression
- Op *Operation
- RHS Expression
-
- SrcRange hcl.Range
-}
-
-func (e *BinaryOpExpr) walkChildNodes(w internalWalkFunc) {
- w(e.LHS)
- w(e.RHS)
-}
-
-func (e *BinaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
- impl := e.Op.Impl // assumed to be a function taking exactly two arguments
- params := impl.Params()
- lhsParam := params[0]
- rhsParam := params[1]
-
- var diags hcl.Diagnostics
-
- givenLHSVal, lhsDiags := e.LHS.Value(ctx)
- givenRHSVal, rhsDiags := e.RHS.Value(ctx)
- diags = append(diags, lhsDiags...)
- diags = append(diags, rhsDiags...)
-
- lhsVal, err := convert.Convert(givenLHSVal, lhsParam.Type)
- if err != nil {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid operand",
- Detail: fmt.Sprintf("Unsuitable value for left operand: %s.", err),
- Subject: e.LHS.Range().Ptr(),
- Context: &e.SrcRange,
- Expression: e.LHS,
- EvalContext: ctx,
- })
- }
- rhsVal, err := convert.Convert(givenRHSVal, rhsParam.Type)
- if err != nil {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid operand",
- Detail: fmt.Sprintf("Unsuitable value for right operand: %s.", err),
- Subject: e.RHS.Range().Ptr(),
- Context: &e.SrcRange,
- Expression: e.RHS,
- EvalContext: ctx,
- })
- }
-
- if diags.HasErrors() {
- // Don't actually try the call if we have errors already, since the
- // this will probably just produce a confusing duplicative diagnostic.
- return cty.UnknownVal(e.Op.Type), diags
- }
-
- args := []cty.Value{lhsVal, rhsVal}
- result, err := impl.Call(args)
- if err != nil {
- diags = append(diags, &hcl.Diagnostic{
- // FIXME: This diagnostic is useless.
- Severity: hcl.DiagError,
- Summary: "Operation failed",
- Detail: fmt.Sprintf("Error during operation: %s.", err),
- Subject: &e.SrcRange,
- Expression: e,
- EvalContext: ctx,
- })
- return cty.UnknownVal(e.Op.Type), diags
- }
-
- return result, diags
-}
-
-func (e *BinaryOpExpr) Range() hcl.Range {
- return e.SrcRange
-}
-
-func (e *BinaryOpExpr) StartRange() hcl.Range {
- return e.LHS.StartRange()
-}
-
-type UnaryOpExpr struct {
- Op *Operation
- Val Expression
-
- SrcRange hcl.Range
- SymbolRange hcl.Range
-}
-
-func (e *UnaryOpExpr) walkChildNodes(w internalWalkFunc) {
- w(e.Val)
-}
-
-func (e *UnaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
- impl := e.Op.Impl // assumed to be a function taking exactly one argument
- params := impl.Params()
- param := params[0]
-
- givenVal, diags := e.Val.Value(ctx)
-
- val, err := convert.Convert(givenVal, param.Type)
- if err != nil {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid operand",
- Detail: fmt.Sprintf("Unsuitable value for unary operand: %s.", err),
- Subject: e.Val.Range().Ptr(),
- Context: &e.SrcRange,
- Expression: e.Val,
- EvalContext: ctx,
- })
- }
-
- if diags.HasErrors() {
- // Don't actually try the call if we have errors already, since the
- // this will probably just produce a confusing duplicative diagnostic.
- return cty.UnknownVal(e.Op.Type), diags
- }
-
- args := []cty.Value{val}
- result, err := impl.Call(args)
- if err != nil {
- diags = append(diags, &hcl.Diagnostic{
- // FIXME: This diagnostic is useless.
- Severity: hcl.DiagError,
- Summary: "Operation failed",
- Detail: fmt.Sprintf("Error during operation: %s.", err),
- Subject: &e.SrcRange,
- Expression: e,
- EvalContext: ctx,
- })
- return cty.UnknownVal(e.Op.Type), diags
- }
-
- return result, diags
-}
-
-func (e *UnaryOpExpr) Range() hcl.Range {
- return e.SrcRange
-}
-
-func (e *UnaryOpExpr) StartRange() hcl.Range {
- return e.SymbolRange
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go
deleted file mode 100644
index a0dc7c22..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_template.go
+++ /dev/null
@@ -1,262 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hclsyntax
-
-import (
- "bytes"
- "fmt"
-
- "github.com/hashicorp/hcl/v2"
- "github.com/zclconf/go-cty/cty"
- "github.com/zclconf/go-cty/cty/convert"
-)
-
-type TemplateExpr struct {
- Parts []Expression
-
- SrcRange hcl.Range
-}
-
-func (e *TemplateExpr) walkChildNodes(w internalWalkFunc) {
- for _, part := range e.Parts {
- w(part)
- }
-}
-
-func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
- buf := &bytes.Buffer{}
- var diags hcl.Diagnostics
- isKnown := true
-
- // Maintain a set of marks for values used in the template
- marks := make(cty.ValueMarks)
-
- for _, part := range e.Parts {
- partVal, partDiags := part.Value(ctx)
- diags = append(diags, partDiags...)
-
- if partVal.IsNull() {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid template interpolation value",
- Detail: "The expression result is null. Cannot include a null value in a string template.",
- Subject: part.Range().Ptr(),
- Context: &e.SrcRange,
- Expression: part,
- EvalContext: ctx,
- })
- continue
- }
-
- // Unmark the part and merge its marks into the set
- unmarkedVal, partMarks := partVal.Unmark()
- for k, v := range partMarks {
- marks[k] = v
- }
-
- if !partVal.IsKnown() {
- // If any part is unknown then the result as a whole must be
- // unknown too. We'll keep on processing the rest of the parts
- // anyway, because we want to still emit any diagnostics resulting
- // from evaluating those.
- isKnown = false
- continue
- }
-
- strVal, err := convert.Convert(unmarkedVal, cty.String)
- if err != nil {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid template interpolation value",
- Detail: fmt.Sprintf(
- "Cannot include the given value in a string template: %s.",
- err.Error(),
- ),
- Subject: part.Range().Ptr(),
- Context: &e.SrcRange,
- Expression: part,
- EvalContext: ctx,
- })
- continue
- }
-
- // If we're just continuing to validate after we found an unknown value
- // then we'll skip appending so that "buf" will contain only the
- // known prefix of the result.
- if isKnown && !diags.HasErrors() {
- buf.WriteString(strVal.AsString())
- }
- }
-
- var ret cty.Value
- if !isKnown {
- ret = cty.UnknownVal(cty.String)
- if !diags.HasErrors() { // Invalid input means our partial result buffer is suspect
- if knownPrefix := buf.String(); knownPrefix != "" {
- byteLen := len(knownPrefix)
- // Impose a reasonable upper limit to avoid producing too long a prefix.
- // The 128 B is about 10% of the safety limits in cty's msgpack decoder.
- // @see https://github.com/zclconf/go-cty/blob/v1.13.2/cty/msgpack/unknown.go#L170-L175
- //
- // This operation is safe because StringPrefix removes incomplete trailing grapheme clusters.
- if byteLen > 128 { // arbitrarily-decided threshold
- byteLen = 128
- }
- ret = ret.Refine().StringPrefix(knownPrefix[:byteLen]).NewValue()
- }
- }
- } else {
- ret = cty.StringVal(buf.String())
- }
-
- // A template rendering result is never null.
- ret = ret.RefineNotNull()
-
- // Apply the full set of marks to the returned value
- return ret.WithMarks(marks), diags
-}
-
-func (e *TemplateExpr) Range() hcl.Range {
- return e.SrcRange
-}
-
-func (e *TemplateExpr) StartRange() hcl.Range {
- return e.Parts[0].StartRange()
-}
-
-// IsStringLiteral returns true if and only if the template consists only of
-// single string literal, as would be created for a simple quoted string like
-// "foo".
-//
-// If this function returns true, then calling Value on the same expression
-// with a nil EvalContext will return the literal value.
-//
-// Note that "${"foo"}", "${1}", etc aren't considered literal values for the
-// purposes of this method, because the intent of this method is to identify
-// situations where the user seems to be explicitly intending literal string
-// interpretation, not situations that result in literals as a technicality
-// of the template expression unwrapping behavior.
-func (e *TemplateExpr) IsStringLiteral() bool {
- if len(e.Parts) != 1 {
- return false
- }
- _, ok := e.Parts[0].(*LiteralValueExpr)
- return ok
-}
-
-// TemplateJoinExpr is used to convert tuples of strings produced by template
-// constructs (i.e. for loops) into flat strings, by converting the values
-// tos strings and joining them. This AST node is not used directly; it's
-// produced as part of the AST of a "for" loop in a template.
-type TemplateJoinExpr struct {
- Tuple Expression
-}
-
-func (e *TemplateJoinExpr) walkChildNodes(w internalWalkFunc) {
- w(e.Tuple)
-}
-
-func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
- tuple, diags := e.Tuple.Value(ctx)
-
- if tuple.IsNull() {
- // This indicates a bug in the code that constructed the AST.
- panic("TemplateJoinExpr got null tuple")
- }
- if tuple.Type() == cty.DynamicPseudoType {
- return cty.UnknownVal(cty.String), diags
- }
- if !tuple.Type().IsTupleType() {
- // This indicates a bug in the code that constructed the AST.
- panic("TemplateJoinExpr got non-tuple tuple")
- }
- if !tuple.IsKnown() {
- return cty.UnknownVal(cty.String), diags
- }
-
- tuple, marks := tuple.Unmark()
- allMarks := []cty.ValueMarks{marks}
- buf := &bytes.Buffer{}
- it := tuple.ElementIterator()
- for it.Next() {
- _, val := it.Element()
-
- if val.IsNull() {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid template interpolation value",
- Detail: fmt.Sprintf(
- "An iteration result is null. Cannot include a null value in a string template.",
- ),
- Subject: e.Range().Ptr(),
- Expression: e,
- EvalContext: ctx,
- })
- continue
- }
- if val.Type() == cty.DynamicPseudoType {
- return cty.UnknownVal(cty.String).WithMarks(marks), diags
- }
- strVal, err := convert.Convert(val, cty.String)
- if err != nil {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid template interpolation value",
- Detail: fmt.Sprintf(
- "Cannot include one of the interpolation results into the string template: %s.",
- err.Error(),
- ),
- Subject: e.Range().Ptr(),
- Expression: e,
- EvalContext: ctx,
- })
- continue
- }
- if !val.IsKnown() {
- return cty.UnknownVal(cty.String).WithMarks(marks), diags
- }
-
- strVal, strValMarks := strVal.Unmark()
- if len(strValMarks) > 0 {
- allMarks = append(allMarks, strValMarks)
- }
- buf.WriteString(strVal.AsString())
- }
-
- return cty.StringVal(buf.String()).WithMarks(allMarks...), diags
-}
-
-func (e *TemplateJoinExpr) Range() hcl.Range {
- return e.Tuple.Range()
-}
-
-func (e *TemplateJoinExpr) StartRange() hcl.Range {
- return e.Tuple.StartRange()
-}
-
-// TemplateWrapExpr is used instead of a TemplateExpr when a template
-// consists _only_ of a single interpolation sequence. In that case, the
-// template's result is the single interpolation's result, verbatim with
-// no type conversions.
-type TemplateWrapExpr struct {
- Wrapped Expression
-
- SrcRange hcl.Range
-}
-
-func (e *TemplateWrapExpr) walkChildNodes(w internalWalkFunc) {
- w(e.Wrapped)
-}
-
-func (e *TemplateWrapExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
- return e.Wrapped.Value(ctx)
-}
-
-func (e *TemplateWrapExpr) Range() hcl.Range {
- return e.SrcRange
-}
-
-func (e *TemplateWrapExpr) StartRange() hcl.Range {
- return e.SrcRange
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_vars.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_vars.go
deleted file mode 100644
index 6c3e472c..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/expression_vars.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hclsyntax
-
-// Generated by expression_vars_gen.go. DO NOT EDIT.
-// Run 'go generate' on this package to update the set of functions here.
-
-import (
- "github.com/hashicorp/hcl/v2"
-)
-
-func (e *AnonSymbolExpr) Variables() []hcl.Traversal {
- return Variables(e)
-}
-
-func (e *BinaryOpExpr) Variables() []hcl.Traversal {
- return Variables(e)
-}
-
-func (e *ConditionalExpr) Variables() []hcl.Traversal {
- return Variables(e)
-}
-
-func (e *ExprSyntaxError) Variables() []hcl.Traversal {
- return Variables(e)
-}
-
-func (e *ForExpr) Variables() []hcl.Traversal {
- return Variables(e)
-}
-
-func (e *FunctionCallExpr) Variables() []hcl.Traversal {
- return Variables(e)
-}
-
-func (e *IndexExpr) Variables() []hcl.Traversal {
- return Variables(e)
-}
-
-func (e *LiteralValueExpr) Variables() []hcl.Traversal {
- return Variables(e)
-}
-
-func (e *ObjectConsExpr) Variables() []hcl.Traversal {
- return Variables(e)
-}
-
-func (e *ObjectConsKeyExpr) Variables() []hcl.Traversal {
- return Variables(e)
-}
-
-func (e *RelativeTraversalExpr) Variables() []hcl.Traversal {
- return Variables(e)
-}
-
-func (e *ScopeTraversalExpr) Variables() []hcl.Traversal {
- return Variables(e)
-}
-
-func (e *SplatExpr) Variables() []hcl.Traversal {
- return Variables(e)
-}
-
-func (e *TemplateExpr) Variables() []hcl.Traversal {
- return Variables(e)
-}
-
-func (e *TemplateJoinExpr) Variables() []hcl.Traversal {
- return Variables(e)
-}
-
-func (e *TemplateWrapExpr) Variables() []hcl.Traversal {
- return Variables(e)
-}
-
-func (e *TupleConsExpr) Variables() []hcl.Traversal {
- return Variables(e)
-}
-
-func (e *UnaryOpExpr) Variables() []hcl.Traversal {
- return Variables(e)
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/file.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/file.go
deleted file mode 100644
index 7be626ff..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/file.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hclsyntax
-
-import (
- "github.com/hashicorp/hcl/v2"
-)
-
-// File is the top-level object resulting from parsing a configuration file.
-type File struct {
- Body *Body
- Bytes []byte
-}
-
-func (f *File) AsHCLFile() *hcl.File {
- return &hcl.File{
- Body: f.Body,
- Bytes: f.Bytes,
-
- // TODO: The Nav object, once we have an implementation of it
- }
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/generate.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/generate.go
deleted file mode 100644
index 66486074..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/generate.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hclsyntax
-
-//go:generate go run expression_vars_gen.go
-//go:generate ruby unicode2ragel.rb --url=http://www.unicode.org/Public/9.0.0/ucd/DerivedCoreProperties.txt -m UnicodeDerived -p ID_Start,ID_Continue -o unicode_derived.rl
-//go:generate ragel -Z scan_tokens.rl
-//go:generate gofmt -w scan_tokens.go
-//go:generate ragel -Z scan_string_lit.rl
-//go:generate gofmt -w scan_string_lit.go
-//go:generate go run golang.org/x/tools/cmd/stringer -type TokenType -output token_type_string.go
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/keywords.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/keywords.go
deleted file mode 100644
index 5124ae95..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/keywords.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hclsyntax
-
-import (
- "bytes"
-)
-
-type Keyword []byte
-
-var forKeyword = Keyword([]byte{'f', 'o', 'r'})
-var inKeyword = Keyword([]byte{'i', 'n'})
-var ifKeyword = Keyword([]byte{'i', 'f'})
-var elseKeyword = Keyword([]byte{'e', 'l', 's', 'e'})
-var endifKeyword = Keyword([]byte{'e', 'n', 'd', 'i', 'f'})
-var endforKeyword = Keyword([]byte{'e', 'n', 'd', 'f', 'o', 'r'})
-
-func (kw Keyword) TokenMatches(token Token) bool {
- if token.Type != TokenIdent {
- return false
- }
- return bytes.Equal([]byte(kw), token.Bytes)
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/navigation.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/navigation.go
deleted file mode 100644
index 83e1d4ef..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/navigation.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hclsyntax
-
-import (
- "bytes"
- "fmt"
-
- "github.com/hashicorp/hcl/v2"
-)
-
-type navigation struct {
- root *Body
-}
-
-// Implementation of hcled.ContextString
-func (n navigation) ContextString(offset int) string {
- // We will walk our top-level blocks until we find one that contains
- // the given offset, and then construct a representation of the header
- // of the block.
-
- var block *Block
- for _, candidate := range n.root.Blocks {
- if candidate.Range().ContainsOffset(offset) {
- block = candidate
- break
- }
- }
-
- if block == nil {
- return ""
- }
-
- if len(block.Labels) == 0 {
- // Easy case!
- return block.Type
- }
-
- buf := &bytes.Buffer{}
- buf.WriteString(block.Type)
- for _, label := range block.Labels {
- fmt.Fprintf(buf, " %q", label)
- }
- return buf.String()
-}
-
-func (n navigation) ContextDefRange(offset int) hcl.Range {
- var block *Block
- for _, candidate := range n.root.Blocks {
- if candidate.Range().ContainsOffset(offset) {
- block = candidate
- break
- }
- }
-
- if block == nil {
- return hcl.Range{}
- }
-
- return block.DefRange()
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/node.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/node.go
deleted file mode 100644
index 6ead6091..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/node.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hclsyntax
-
-import (
- "github.com/hashicorp/hcl/v2"
-)
-
-// Node is the abstract type that every AST node implements.
-//
-// This is a closed interface, so it cannot be implemented from outside of
-// this package.
-type Node interface {
- // This is the mechanism by which the public-facing walk functions
- // are implemented. Implementations should call the given function
- // for each child node and then replace that node with its return value.
- // The return value might just be the same node, for non-transforming
- // walks.
- walkChildNodes(w internalWalkFunc)
-
- Range() hcl.Range
-}
-
-type internalWalkFunc func(Node)
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go
deleted file mode 100644
index ce96ae35..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser.go
+++ /dev/null
@@ -1,2215 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hclsyntax
-
-import (
- "bytes"
- "fmt"
- "strconv"
- "unicode/utf8"
-
- "github.com/apparentlymart/go-textseg/v15/textseg"
- "github.com/hashicorp/hcl/v2"
- "github.com/zclconf/go-cty/cty"
-)
-
-type parser struct {
- *peeker
-
- // set to true if any recovery is attempted. The parser can use this
- // to attempt to reduce error noise by suppressing "bad token" errors
- // in recovery mode, assuming that the recovery heuristics have failed
- // in this case and left the peeker in a wrong place.
- recovery bool
-}
-
-func (p *parser) ParseBody(end TokenType) (*Body, hcl.Diagnostics) {
- attrs := Attributes{}
- blocks := Blocks{}
- var diags hcl.Diagnostics
-
- startRange := p.PrevRange()
- var endRange hcl.Range
-
-Token:
- for {
- next := p.Peek()
- if next.Type == end {
- endRange = p.NextRange()
- p.Read()
- break Token
- }
-
- switch next.Type {
- case TokenNewline:
- p.Read()
- continue
- case TokenIdent:
- item, itemDiags := p.ParseBodyItem()
- diags = append(diags, itemDiags...)
- switch titem := item.(type) {
- case *Block:
- blocks = append(blocks, titem)
- case *Attribute:
- if existing, exists := attrs[titem.Name]; exists {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Attribute redefined",
- Detail: fmt.Sprintf(
- "The argument %q was already set at %s. Each argument may be set only once.",
- titem.Name, existing.NameRange.String(),
- ),
- Subject: &titem.NameRange,
- })
- } else {
- attrs[titem.Name] = titem
- }
- default:
- // This should never happen for valid input, but may if a
- // syntax error was detected in ParseBodyItem that prevented
- // it from even producing a partially-broken item. In that
- // case, it would've left at least one error in the diagnostics
- // slice we already dealt with above.
- //
- // We'll assume ParseBodyItem attempted recovery to leave
- // us in a reasonable position to try parsing the next item.
- continue
- }
- default:
- bad := p.Read()
- if !p.recovery {
- switch bad.Type {
- case TokenOQuote:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid argument name",
- Detail: "Argument names must not be quoted.",
- Subject: &bad.Range,
- })
- case TokenEOF:
- switch end {
- case TokenCBrace:
- // If we're looking for a closing brace then we're parsing a block
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Unclosed configuration block",
- Detail: "There is no closing brace for this block before the end of the file. This may be caused by incorrect brace nesting elsewhere in this file.",
- Subject: &startRange,
- })
- default:
- // The only other "end" should itself be TokenEOF (for
- // the top-level body) and so we shouldn't get here,
- // but we'll return a generic error message anyway to
- // be resilient.
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Unclosed configuration body",
- Detail: "Found end of file before the end of this configuration body.",
- Subject: &startRange,
- })
- }
- default:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Argument or block definition required",
- Detail: "An argument or block definition is required here.",
- Subject: &bad.Range,
- })
- }
- }
- endRange = p.PrevRange() // arbitrary, but somewhere inside the body means better diagnostics
-
- p.recover(end) // attempt to recover to the token after the end of this body
- break Token
- }
- }
-
- return &Body{
- Attributes: attrs,
- Blocks: blocks,
-
- SrcRange: hcl.RangeBetween(startRange, endRange),
- EndRange: hcl.Range{
- Filename: endRange.Filename,
- Start: endRange.End,
- End: endRange.End,
- },
- }, diags
-}
-
-func (p *parser) ParseBodyItem() (Node, hcl.Diagnostics) {
- ident := p.Read()
- if ident.Type != TokenIdent {
- p.recoverAfterBodyItem()
- return nil, hcl.Diagnostics{
- {
- Severity: hcl.DiagError,
- Summary: "Argument or block definition required",
- Detail: "An argument or block definition is required here.",
- Subject: &ident.Range,
- },
- }
- }
-
- next := p.Peek()
-
- switch next.Type {
- case TokenEqual:
- return p.finishParsingBodyAttribute(ident, false)
- case TokenOQuote, TokenOBrace, TokenIdent:
- return p.finishParsingBodyBlock(ident)
- default:
- p.recoverAfterBodyItem()
- return nil, hcl.Diagnostics{
- {
- Severity: hcl.DiagError,
- Summary: "Argument or block definition required",
- Detail: "An argument or block definition is required here. To set an argument, use the equals sign \"=\" to introduce the argument value.",
- Subject: &ident.Range,
- },
- }
- }
-}
-
-// parseSingleAttrBody is a weird variant of ParseBody that deals with the
-// body of a nested block containing only one attribute value all on a single
-// line, like foo { bar = baz } . It expects to find a single attribute item
-// immediately followed by the end token type with no intervening newlines.
-func (p *parser) parseSingleAttrBody(end TokenType) (*Body, hcl.Diagnostics) {
- ident := p.Read()
- if ident.Type != TokenIdent {
- p.recoverAfterBodyItem()
- return nil, hcl.Diagnostics{
- {
- Severity: hcl.DiagError,
- Summary: "Argument or block definition required",
- Detail: "An argument or block definition is required here.",
- Subject: &ident.Range,
- },
- }
- }
-
- var attr *Attribute
- var diags hcl.Diagnostics
-
- next := p.Peek()
-
- switch next.Type {
- case TokenEqual:
- node, attrDiags := p.finishParsingBodyAttribute(ident, true)
- diags = append(diags, attrDiags...)
- attr = node.(*Attribute)
- case TokenOQuote, TokenOBrace, TokenIdent:
- p.recoverAfterBodyItem()
- return nil, hcl.Diagnostics{
- {
- Severity: hcl.DiagError,
- Summary: "Argument definition required",
- Detail: fmt.Sprintf("A single-line block definition can contain only a single argument. If you meant to define argument %q, use an equals sign to assign it a value. To define a nested block, place it on a line of its own within its parent block.", ident.Bytes),
- Subject: hcl.RangeBetween(ident.Range, next.Range).Ptr(),
- },
- }
- default:
- p.recoverAfterBodyItem()
- return nil, hcl.Diagnostics{
- {
- Severity: hcl.DiagError,
- Summary: "Argument or block definition required",
- Detail: "An argument or block definition is required here. To set an argument, use the equals sign \"=\" to introduce the argument value.",
- Subject: &ident.Range,
- },
- }
- }
-
- return &Body{
- Attributes: Attributes{
- string(ident.Bytes): attr,
- },
-
- SrcRange: attr.SrcRange,
- EndRange: hcl.Range{
- Filename: attr.SrcRange.Filename,
- Start: attr.SrcRange.End,
- End: attr.SrcRange.End,
- },
- }, diags
-
-}
-
-func (p *parser) finishParsingBodyAttribute(ident Token, singleLine bool) (Node, hcl.Diagnostics) {
- eqTok := p.Read() // eat equals token
- if eqTok.Type != TokenEqual {
- // should never happen if caller behaves
- panic("finishParsingBodyAttribute called with next not equals")
- }
-
- var endRange hcl.Range
-
- expr, diags := p.ParseExpression()
- if p.recovery && diags.HasErrors() {
- // recovery within expressions tends to be tricky, so we've probably
- // landed somewhere weird. We'll try to reset to the start of a body
- // item so parsing can continue.
- endRange = p.PrevRange()
- p.recoverAfterBodyItem()
- } else {
- endRange = p.PrevRange()
- if !singleLine {
- end := p.Peek()
- if end.Type != TokenNewline && end.Type != TokenEOF {
- if !p.recovery {
- summary := "Missing newline after argument"
- detail := "An argument definition must end with a newline."
-
- if end.Type == TokenComma {
- summary = "Unexpected comma after argument"
- detail = "Argument definitions must be separated by newlines, not commas. " + detail
- }
-
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: summary,
- Detail: detail,
- Subject: &end.Range,
- Context: hcl.RangeBetween(ident.Range, end.Range).Ptr(),
- })
- }
- endRange = p.PrevRange()
- p.recoverAfterBodyItem()
- } else {
- endRange = p.PrevRange()
- p.Read() // eat newline
- }
- }
- }
-
- return &Attribute{
- Name: string(ident.Bytes),
- Expr: expr,
-
- SrcRange: hcl.RangeBetween(ident.Range, endRange),
- NameRange: ident.Range,
- EqualsRange: eqTok.Range,
- }, diags
-}
-
-func (p *parser) finishParsingBodyBlock(ident Token) (Node, hcl.Diagnostics) {
- var blockType = string(ident.Bytes)
- var diags hcl.Diagnostics
- var labels []string
- var labelRanges []hcl.Range
-
- var oBrace Token
-
-Token:
- for {
- tok := p.Peek()
-
- switch tok.Type {
-
- case TokenOBrace:
- oBrace = p.Read()
- break Token
-
- case TokenOQuote:
- label, labelRange, labelDiags := p.parseQuotedStringLiteral()
- diags = append(diags, labelDiags...)
- labels = append(labels, label)
- labelRanges = append(labelRanges, labelRange)
- // parseQuoteStringLiteral recovers up to the closing quote
- // if it encounters problems, so we can continue looking for
- // more labels and eventually the block body even.
-
- case TokenIdent:
- tok = p.Read() // eat token
- label, labelRange := string(tok.Bytes), tok.Range
- labels = append(labels, label)
- labelRanges = append(labelRanges, labelRange)
-
- default:
- switch tok.Type {
- case TokenEqual:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid block definition",
- Detail: "The equals sign \"=\" indicates an argument definition, and must not be used when defining a block.",
- Subject: &tok.Range,
- Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(),
- })
- case TokenNewline:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid block definition",
- Detail: "A block definition must have block content delimited by \"{\" and \"}\", starting on the same line as the block header.",
- Subject: &tok.Range,
- Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(),
- })
- default:
- if !p.recovery {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid block definition",
- Detail: "Either a quoted string block label or an opening brace (\"{\") is expected here.",
- Subject: &tok.Range,
- Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(),
- })
- }
- }
-
- p.recoverAfterBodyItem()
-
- return &Block{
- Type: blockType,
- Labels: labels,
- Body: &Body{
- SrcRange: ident.Range,
- EndRange: ident.Range,
- },
-
- TypeRange: ident.Range,
- LabelRanges: labelRanges,
- OpenBraceRange: ident.Range, // placeholder
- CloseBraceRange: ident.Range, // placeholder
- }, diags
- }
- }
-
- // Once we fall out here, the peeker is pointed just after our opening
- // brace, so we can begin our nested body parsing.
- var body *Body
- var bodyDiags hcl.Diagnostics
- switch p.Peek().Type {
- case TokenNewline, TokenEOF, TokenCBrace:
- body, bodyDiags = p.ParseBody(TokenCBrace)
- default:
- // Special one-line, single-attribute block parsing mode.
- body, bodyDiags = p.parseSingleAttrBody(TokenCBrace)
- switch p.Peek().Type {
- case TokenCBrace:
- p.Read() // the happy path - just consume the closing brace
- case TokenComma:
- // User seems to be trying to use the object-constructor
- // comma-separated style, which isn't permitted for blocks.
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid single-argument block definition",
- Detail: "Single-line block syntax can include only one argument definition. To define multiple arguments, use the multi-line block syntax with one argument definition per line.",
- Subject: p.Peek().Range.Ptr(),
- })
- p.recover(TokenCBrace)
- case TokenNewline:
- // We don't allow weird mixtures of single and multi-line syntax.
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid single-argument block definition",
- Detail: "An argument definition on the same line as its containing block creates a single-line block definition, which must also be closed on the same line. Place the block's closing brace immediately after the argument definition.",
- Subject: p.Peek().Range.Ptr(),
- })
- p.recover(TokenCBrace)
- default:
- // Some other weird thing is going on. Since we can't guess a likely
- // user intent for this one, we'll skip it if we're already in
- // recovery mode.
- if !p.recovery {
- switch p.Peek().Type {
- case TokenEOF:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Unclosed configuration block",
- Detail: "There is no closing brace for this block before the end of the file. This may be caused by incorrect brace nesting elsewhere in this file.",
- Subject: oBrace.Range.Ptr(),
- Context: hcl.RangeBetween(ident.Range, oBrace.Range).Ptr(),
- })
- default:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid single-argument block definition",
- Detail: "A single-line block definition must end with a closing brace immediately after its single argument definition.",
- Subject: p.Peek().Range.Ptr(),
- })
- }
- }
- p.recover(TokenCBrace)
- }
- }
- diags = append(diags, bodyDiags...)
- cBraceRange := p.PrevRange()
-
- eol := p.Peek()
- if eol.Type == TokenNewline || eol.Type == TokenEOF {
- p.Read() // eat newline
- } else {
- if !p.recovery {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Missing newline after block definition",
- Detail: "A block definition must end with a newline.",
- Subject: &eol.Range,
- Context: hcl.RangeBetween(ident.Range, eol.Range).Ptr(),
- })
- }
- p.recoverAfterBodyItem()
- }
-
- // We must never produce a nil body, since the caller may attempt to
- // do analysis of a partial result when there's an error, so we'll
- // insert a placeholder if we otherwise failed to produce a valid
- // body due to one of the syntax error paths above.
- if body == nil && diags.HasErrors() {
- body = &Body{
- SrcRange: hcl.RangeBetween(oBrace.Range, cBraceRange),
- EndRange: cBraceRange,
- }
- }
-
- return &Block{
- Type: blockType,
- Labels: labels,
- Body: body,
-
- TypeRange: ident.Range,
- LabelRanges: labelRanges,
- OpenBraceRange: oBrace.Range,
- CloseBraceRange: cBraceRange,
- }, diags
-}
-
-func (p *parser) ParseExpression() (Expression, hcl.Diagnostics) {
- return p.parseTernaryConditional()
-}
-
-func (p *parser) parseTernaryConditional() (Expression, hcl.Diagnostics) {
- // The ternary conditional operator (.. ? .. : ..) behaves somewhat
- // like a binary operator except that the "symbol" is itself
- // an expression enclosed in two punctuation characters.
- // The middle expression is parsed as if the ? and : symbols
- // were parentheses. The "rhs" (the "false expression") is then
- // treated right-associatively so it behaves similarly to the
- // middle in terms of precedence.
-
- startRange := p.NextRange()
- var condExpr, trueExpr, falseExpr Expression
- var diags hcl.Diagnostics
-
- condExpr, condDiags := p.parseBinaryOps(binaryOps)
- diags = append(diags, condDiags...)
- if p.recovery && condDiags.HasErrors() {
- return condExpr, diags
- }
-
- questionMark := p.Peek()
- if questionMark.Type != TokenQuestion {
- return condExpr, diags
- }
-
- p.Read() // eat question mark
-
- trueExpr, trueDiags := p.ParseExpression()
- diags = append(diags, trueDiags...)
- if p.recovery && trueDiags.HasErrors() {
- return condExpr, diags
- }
-
- colon := p.Peek()
- if colon.Type != TokenColon {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Missing false expression in conditional",
- Detail: "The conditional operator (...?...:...) requires a false expression, delimited by a colon.",
- Subject: &colon.Range,
- Context: hcl.RangeBetween(startRange, colon.Range).Ptr(),
- })
- return condExpr, diags
- }
-
- p.Read() // eat colon
-
- falseExpr, falseDiags := p.ParseExpression()
- diags = append(diags, falseDiags...)
- if p.recovery && falseDiags.HasErrors() {
- return condExpr, diags
- }
-
- return &ConditionalExpr{
- Condition: condExpr,
- TrueResult: trueExpr,
- FalseResult: falseExpr,
-
- SrcRange: hcl.RangeBetween(startRange, falseExpr.Range()),
- }, diags
-}
-
-// parseBinaryOps calls itself recursively to work through all of the
-// operator precedence groups, and then eventually calls parseExpressionTerm
-// for each operand.
-func (p *parser) parseBinaryOps(ops []map[TokenType]*Operation) (Expression, hcl.Diagnostics) {
- if len(ops) == 0 {
- // We've run out of operators, so now we'll just try to parse a term.
- return p.parseExpressionWithTraversals()
- }
-
- thisLevel := ops[0]
- remaining := ops[1:]
-
- var lhs, rhs Expression
- var operation *Operation
- var diags hcl.Diagnostics
-
- // Parse a term that might be the first operand of a binary
- // operation or it might just be a standalone term.
- // We won't know until we've parsed it and can look ahead
- // to see if there's an operator token for this level.
- lhs, lhsDiags := p.parseBinaryOps(remaining)
- diags = append(diags, lhsDiags...)
- if p.recovery && lhsDiags.HasErrors() {
- return lhs, diags
- }
-
- // We'll keep eating up operators until we run out, so that operators
- // with the same precedence will combine in a left-associative manner:
- // a+b+c => (a+b)+c, not a+(b+c)
- //
- // Should we later want to have right-associative operators, a way
- // to achieve that would be to call back up to ParseExpression here
- // instead of iteratively parsing only the remaining operators.
- for {
- next := p.Peek()
- var newOp *Operation
- var ok bool
- if newOp, ok = thisLevel[next.Type]; !ok {
- break
- }
-
- // Are we extending an expression started on the previous iteration?
- if operation != nil {
- lhs = &BinaryOpExpr{
- LHS: lhs,
- Op: operation,
- RHS: rhs,
-
- SrcRange: hcl.RangeBetween(lhs.Range(), rhs.Range()),
- }
- }
-
- operation = newOp
- p.Read() // eat operator token
- var rhsDiags hcl.Diagnostics
- rhs, rhsDiags = p.parseBinaryOps(remaining)
- diags = append(diags, rhsDiags...)
- if p.recovery && rhsDiags.HasErrors() {
- return lhs, diags
- }
- }
-
- if operation == nil {
- return lhs, diags
- }
-
- return &BinaryOpExpr{
- LHS: lhs,
- Op: operation,
- RHS: rhs,
-
- SrcRange: hcl.RangeBetween(lhs.Range(), rhs.Range()),
- }, diags
-}
-
-func (p *parser) parseExpressionWithTraversals() (Expression, hcl.Diagnostics) {
- term, diags := p.parseExpressionTerm()
- ret, moreDiags := p.parseExpressionTraversals(term)
- diags = append(diags, moreDiags...)
- return ret, diags
-}
-
-func (p *parser) parseExpressionTraversals(from Expression) (Expression, hcl.Diagnostics) {
- var diags hcl.Diagnostics
- ret := from
-
-Traversal:
- for {
- next := p.Peek()
-
- switch next.Type {
- case TokenDot:
- // Attribute access or splat
- dot := p.Read()
- attrTok := p.Peek()
-
- switch attrTok.Type {
- case TokenIdent:
- attrTok = p.Read() // eat token
- name := string(attrTok.Bytes)
- rng := hcl.RangeBetween(dot.Range, attrTok.Range)
- step := hcl.TraverseAttr{
- Name: name,
- SrcRange: rng,
- }
-
- ret = makeRelativeTraversal(ret, step, rng)
-
- case TokenNumberLit:
- // This is a weird form we inherited from HIL, allowing numbers
- // to be used as attributes as a weird way of writing [n].
- // This was never actually a first-class thing in HIL, but
- // HIL tolerated sequences like .0. in its variable names and
- // calling applications like Terraform exploited that to
- // introduce indexing syntax where none existed.
- numTok := p.Read() // eat token
- attrTok = numTok
-
- // This syntax is ambiguous if multiple indices are used in
- // succession, like foo.0.1.baz: that actually parses as
- // a fractional number 0.1. Since we're only supporting this
- // syntax for compatibility with legacy Terraform
- // configurations, and Terraform does not tend to have lists
- // of lists, we'll choose to reject that here with a helpful
- // error message, rather than failing later because the index
- // isn't a whole number.
- if dotIdx := bytes.IndexByte(numTok.Bytes, '.'); dotIdx >= 0 {
- first := numTok.Bytes[:dotIdx]
- second := numTok.Bytes[dotIdx+1:]
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid legacy index syntax",
- Detail: fmt.Sprintf("When using the legacy index syntax, chaining two indexes together is not permitted. Use the proper index syntax instead, like [%s][%s].", first, second),
- Subject: &attrTok.Range,
- })
- rng := hcl.RangeBetween(dot.Range, numTok.Range)
- step := hcl.TraverseIndex{
- Key: cty.DynamicVal,
- SrcRange: rng,
- }
- ret = makeRelativeTraversal(ret, step, rng)
- break
- }
-
- numVal, numDiags := p.numberLitValue(numTok)
- diags = append(diags, numDiags...)
-
- rng := hcl.RangeBetween(dot.Range, numTok.Range)
- step := hcl.TraverseIndex{
- Key: numVal,
- SrcRange: rng,
- }
-
- ret = makeRelativeTraversal(ret, step, rng)
-
- case TokenStar:
- // "Attribute-only" splat expression.
- // (This is a kinda weird construct inherited from HIL, which
- // behaves a bit like a [*] splat except that it is only able
- // to do attribute traversals into each of its elements,
- // whereas foo[*] can support _any_ traversal.
- marker := p.Read() // eat star
- trav := make(hcl.Traversal, 0, 1)
- var firstRange, lastRange hcl.Range
- firstRange = p.NextRange()
- lastRange = marker.Range
- for p.Peek().Type == TokenDot {
- dot := p.Read()
-
- if p.Peek().Type == TokenNumberLit {
- // Continuing the "weird stuff inherited from HIL"
- // theme, we also allow numbers as attribute names
- // inside splats and interpret them as indexing
- // into a list, for expressions like:
- // foo.bar.*.baz.0.foo
- numTok := p.Read()
-
- // Weird special case if the user writes something
- // like foo.bar.*.baz.0.0.foo, where 0.0 parses
- // as a number.
- if dotIdx := bytes.IndexByte(numTok.Bytes, '.'); dotIdx >= 0 {
- first := numTok.Bytes[:dotIdx]
- second := numTok.Bytes[dotIdx+1:]
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid legacy index syntax",
- Detail: fmt.Sprintf("When using the legacy index syntax, chaining two indexes together is not permitted. Use the proper index syntax with a full splat expression [*] instead, like [%s][%s].", first, second),
- Subject: &attrTok.Range,
- })
- trav = append(trav, hcl.TraverseIndex{
- Key: cty.DynamicVal,
- SrcRange: hcl.RangeBetween(dot.Range, numTok.Range),
- })
- lastRange = numTok.Range
- continue
- }
-
- numVal, numDiags := p.numberLitValue(numTok)
- diags = append(diags, numDiags...)
- trav = append(trav, hcl.TraverseIndex{
- Key: numVal,
- SrcRange: hcl.RangeBetween(dot.Range, numTok.Range),
- })
- lastRange = numTok.Range
- continue
- }
-
- if p.Peek().Type != TokenIdent {
- if !p.recovery {
- if p.Peek().Type == TokenStar {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Nested splat expression not allowed",
- Detail: "A splat expression (*) cannot be used inside another attribute-only splat expression.",
- Subject: p.Peek().Range.Ptr(),
- })
- } else {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid attribute name",
- Detail: "An attribute name is required after a dot.",
- Subject: &attrTok.Range,
- })
- }
- }
- p.setRecovery()
- continue Traversal
- }
-
- attrTok := p.Read()
- trav = append(trav, hcl.TraverseAttr{
- Name: string(attrTok.Bytes),
- SrcRange: hcl.RangeBetween(dot.Range, attrTok.Range),
- })
- lastRange = attrTok.Range
- }
-
- itemExpr := &AnonSymbolExpr{
- SrcRange: hcl.RangeBetween(dot.Range, marker.Range),
- }
- var travExpr Expression
- if len(trav) == 0 {
- travExpr = itemExpr
- } else {
- travExpr = &RelativeTraversalExpr{
- Source: itemExpr,
- Traversal: trav,
- SrcRange: hcl.RangeBetween(firstRange, lastRange),
- }
- }
-
- ret = &SplatExpr{
- Source: ret,
- Each: travExpr,
- Item: itemExpr,
-
- SrcRange: hcl.RangeBetween(from.Range(), lastRange),
- MarkerRange: hcl.RangeBetween(dot.Range, marker.Range),
- }
-
- default:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid attribute name",
- Detail: "An attribute name is required after a dot.",
- Subject: &attrTok.Range,
- })
- // This leaves the peeker in a bad place, so following items
- // will probably be misparsed until we hit something that
- // allows us to re-sync.
- //
- // We will probably need to do something better here eventually
- // in order to support autocomplete triggered by typing a
- // period.
- p.setRecovery()
- }
-
- case TokenOBrack:
- // Indexing of a collection.
- // This may or may not be a hcl.Traverser, depending on whether
- // the key value is something constant.
-
- open := p.Read()
- switch p.Peek().Type {
- case TokenStar:
- // This is a full splat expression, like foo[*], which consumes
- // the rest of the traversal steps after it using a recursive
- // call to this function.
- p.Read() // consume star
- close := p.Read()
- if close.Type != TokenCBrack && !p.recovery {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Missing close bracket on splat index",
- Detail: "The star for a full splat operator must be immediately followed by a closing bracket (\"]\").",
- Subject: &close.Range,
- })
- close = p.recover(TokenCBrack)
- }
- // Splat expressions use a special "anonymous symbol" as a
- // placeholder in an expression to be evaluated once for each
- // item in the source expression.
- itemExpr := &AnonSymbolExpr{
- SrcRange: hcl.RangeBetween(open.Range, close.Range),
- }
- // Now we'll recursively call this same function to eat any
- // remaining traversal steps against the anonymous symbol.
- travExpr, nestedDiags := p.parseExpressionTraversals(itemExpr)
- diags = append(diags, nestedDiags...)
-
- ret = &SplatExpr{
- Source: ret,
- Each: travExpr,
- Item: itemExpr,
-
- SrcRange: hcl.RangeBetween(from.Range(), travExpr.Range()),
- MarkerRange: hcl.RangeBetween(open.Range, close.Range),
- }
-
- default:
-
- var close Token
- p.PushIncludeNewlines(false) // arbitrary newlines allowed in brackets
- keyExpr, keyDiags := p.ParseExpression()
- diags = append(diags, keyDiags...)
- if p.recovery && keyDiags.HasErrors() {
- close = p.recover(TokenCBrack)
- } else {
- close = p.Read()
- if close.Type != TokenCBrack && !p.recovery {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Missing close bracket on index",
- Detail: "The index operator must end with a closing bracket (\"]\").",
- Subject: &close.Range,
- })
- close = p.recover(TokenCBrack)
- }
- }
- p.PopIncludeNewlines()
-
- if lit, isLit := keyExpr.(*LiteralValueExpr); isLit {
- litKey, _ := lit.Value(nil)
- rng := hcl.RangeBetween(open.Range, close.Range)
- step := hcl.TraverseIndex{
- Key: litKey,
- SrcRange: rng,
- }
- ret = makeRelativeTraversal(ret, step, rng)
- } else if tmpl, isTmpl := keyExpr.(*TemplateExpr); isTmpl && tmpl.IsStringLiteral() {
- litKey, _ := tmpl.Value(nil)
- rng := hcl.RangeBetween(open.Range, close.Range)
- step := hcl.TraverseIndex{
- Key: litKey,
- SrcRange: rng,
- }
- ret = makeRelativeTraversal(ret, step, rng)
- } else {
- rng := hcl.RangeBetween(open.Range, close.Range)
- ret = &IndexExpr{
- Collection: ret,
- Key: keyExpr,
-
- SrcRange: hcl.RangeBetween(from.Range(), rng),
- OpenRange: open.Range,
- BracketRange: rng,
- }
- }
- }
-
- default:
- break Traversal
- }
- }
-
- return ret, diags
-}
-
-// makeRelativeTraversal takes an expression and a traverser and returns
-// a traversal expression that combines the two. If the given expression
-// is already a traversal, it is extended in place (mutating it) and
-// returned. If it isn't, a new RelativeTraversalExpr is created and returned.
-func makeRelativeTraversal(expr Expression, next hcl.Traverser, rng hcl.Range) Expression {
- switch texpr := expr.(type) {
- case *ScopeTraversalExpr:
- texpr.Traversal = append(texpr.Traversal, next)
- texpr.SrcRange = hcl.RangeBetween(texpr.SrcRange, rng)
- return texpr
- case *RelativeTraversalExpr:
- texpr.Traversal = append(texpr.Traversal, next)
- texpr.SrcRange = hcl.RangeBetween(texpr.SrcRange, rng)
- return texpr
- default:
- return &RelativeTraversalExpr{
- Source: expr,
- Traversal: hcl.Traversal{next},
- SrcRange: hcl.RangeBetween(expr.Range(), rng),
- }
- }
-}
-
-func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) {
- start := p.Peek()
-
- switch start.Type {
- case TokenOParen:
- oParen := p.Read() // eat open paren
-
- p.PushIncludeNewlines(false)
-
- expr, diags := p.ParseExpression()
- if diags.HasErrors() {
- // attempt to place the peeker after our closing paren
- // before we return, so that the next parser has some
- // chance of finding a valid expression.
- p.recover(TokenCParen)
- p.PopIncludeNewlines()
- return expr, diags
- }
-
- close := p.Peek()
- if close.Type != TokenCParen {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Unbalanced parentheses",
- Detail: "Expected a closing parenthesis to terminate the expression.",
- Subject: &close.Range,
- Context: hcl.RangeBetween(start.Range, close.Range).Ptr(),
- })
- p.setRecovery()
- }
-
- cParen := p.Read() // eat closing paren
- p.PopIncludeNewlines()
-
- // Our parser's already taken care of the precedence effect of the
- // parentheses by considering them to be a kind of "term", but we
- // still need to include the parentheses in our AST so we can give
- // an accurate representation of the source range that includes the
- // open and closing parentheses.
- expr = &ParenthesesExpr{
- Expression: expr,
- SrcRange: hcl.RangeBetween(oParen.Range, cParen.Range),
- }
-
- return expr, diags
-
- case TokenNumberLit:
- tok := p.Read() // eat number token
-
- numVal, diags := p.numberLitValue(tok)
- return &LiteralValueExpr{
- Val: numVal,
- SrcRange: tok.Range,
- }, diags
-
- case TokenIdent:
- tok := p.Read() // eat identifier token
-
- if p.Peek().Type == TokenOParen || p.Peek().Type == TokenDoubleColon {
- return p.finishParsingFunctionCall(tok)
- }
-
- name := string(tok.Bytes)
- switch name {
- case "true":
- return &LiteralValueExpr{
- Val: cty.True,
- SrcRange: tok.Range,
- }, nil
- case "false":
- return &LiteralValueExpr{
- Val: cty.False,
- SrcRange: tok.Range,
- }, nil
- case "null":
- return &LiteralValueExpr{
- Val: cty.NullVal(cty.DynamicPseudoType),
- SrcRange: tok.Range,
- }, nil
- default:
- return &ScopeTraversalExpr{
- Traversal: hcl.Traversal{
- hcl.TraverseRoot{
- Name: name,
- SrcRange: tok.Range,
- },
- },
- SrcRange: tok.Range,
- }, nil
- }
-
- case TokenOQuote, TokenOHeredoc:
- open := p.Read() // eat opening marker
- closer := p.oppositeBracket(open.Type)
- exprs, passthru, _, diags := p.parseTemplateInner(closer, tokenOpensFlushHeredoc(open))
-
- closeRange := p.PrevRange()
-
- if passthru {
- if len(exprs) != 1 {
- panic("passthru set with len(exprs) != 1")
- }
- return &TemplateWrapExpr{
- Wrapped: exprs[0],
- SrcRange: hcl.RangeBetween(open.Range, closeRange),
- }, diags
- }
-
- return &TemplateExpr{
- Parts: exprs,
- SrcRange: hcl.RangeBetween(open.Range, closeRange),
- }, diags
-
- case TokenMinus:
- tok := p.Read() // eat minus token
-
- // Important to use parseExpressionWithTraversals rather than parseExpression
- // here, otherwise we can capture a following binary expression into
- // our negation.
- // e.g. -46+5 should parse as (-46)+5, not -(46+5)
- operand, diags := p.parseExpressionWithTraversals()
- return &UnaryOpExpr{
- Op: OpNegate,
- Val: operand,
-
- SrcRange: hcl.RangeBetween(tok.Range, operand.Range()),
- SymbolRange: tok.Range,
- }, diags
-
- case TokenBang:
- tok := p.Read() // eat bang token
-
- // Important to use parseExpressionWithTraversals rather than parseExpression
- // here, otherwise we can capture a following binary expression into
- // our negation.
- operand, diags := p.parseExpressionWithTraversals()
- return &UnaryOpExpr{
- Op: OpLogicalNot,
- Val: operand,
-
- SrcRange: hcl.RangeBetween(tok.Range, operand.Range()),
- SymbolRange: tok.Range,
- }, diags
-
- case TokenOBrack:
- return p.parseTupleCons()
-
- case TokenOBrace:
- return p.parseObjectCons()
-
- default:
- var diags hcl.Diagnostics
- if !p.recovery {
- switch start.Type {
- case TokenEOF:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Missing expression",
- Detail: "Expected the start of an expression, but found the end of the file.",
- Subject: &start.Range,
- })
- default:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid expression",
- Detail: "Expected the start of an expression, but found an invalid expression token.",
- Subject: &start.Range,
- })
- }
- }
- p.setRecovery()
-
- // Return a placeholder so that the AST is still structurally sound
- // even in the presence of parse errors.
- return &LiteralValueExpr{
- Val: cty.DynamicVal,
- SrcRange: start.Range,
- }, diags
- }
-}
-
-func (p *parser) numberLitValue(tok Token) (cty.Value, hcl.Diagnostics) {
- // The cty.ParseNumberVal is always the same behavior as converting a
- // string to a number, ensuring we always interpret decimal numbers in
- // the same way.
- numVal, err := cty.ParseNumberVal(string(tok.Bytes))
- if err != nil {
- ret := cty.UnknownVal(cty.Number)
- return ret, hcl.Diagnostics{
- {
- Severity: hcl.DiagError,
- Summary: "Invalid number literal",
- // FIXME: not a very good error message, but convert only
- // gives us "a number is required", so not much help either.
- Detail: "Failed to recognize the value of this number literal.",
- Subject: &tok.Range,
- },
- }
- }
- return numVal, nil
-}
-
-// finishParsingFunctionCall parses a function call assuming that the function
-// name was already read, and so the peeker should be pointing at the opening
-// parenthesis after the name, or at the double-colon after the initial
-// function scope name.
-func (p *parser) finishParsingFunctionCall(name Token) (Expression, hcl.Diagnostics) {
- var diags hcl.Diagnostics
-
- openTok := p.Read()
- if openTok.Type != TokenOParen && openTok.Type != TokenDoubleColon {
- // should never happen if callers behave
- panic("finishParsingFunctionCall called with unsupported next token")
- }
-
- nameStr := string(name.Bytes)
- nameEndPos := name.Range.End
- for openTok.Type == TokenDoubleColon {
- nextName := p.Read()
- if nextName.Type != TokenIdent {
- diag := hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Missing function name",
- Detail: "Function scope resolution symbol :: must be followed by a function name in this scope.",
- Subject: &nextName.Range,
- Context: hcl.RangeBetween(name.Range, nextName.Range).Ptr(),
- }
- diags = append(diags, &diag)
- p.recoverOver(TokenOParen)
- return &ExprSyntaxError{
- ParseDiags: hcl.Diagnostics{&diag},
- Placeholder: cty.DynamicVal,
- SrcRange: hcl.RangeBetween(name.Range, nextName.Range),
- }, diags
- }
-
- // Initial versions of HCLv2 didn't support function namespaces, and
- // so for backward compatibility we just treat namespaced functions
- // as weird names with "::" separators in them, saved as a string
- // to keep the API unchanged. FunctionCallExpr also has some special
- // handling of names containing :: when referring to a function that
- // doesn't exist in EvalContext, to return better error messages
- // when namespaces are used incorrectly.
- nameStr = nameStr + "::" + string(nextName.Bytes)
- nameEndPos = nextName.Range.End
-
- openTok = p.Read()
- }
-
- nameRange := hcl.Range{
- Filename: name.Range.Filename,
- Start: name.Range.Start,
- End: nameEndPos,
- }
-
- if openTok.Type != TokenOParen {
- diag := hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Missing open parenthesis",
- Detail: "Function selector must be followed by an open parenthesis to begin the function call.",
- Subject: &openTok.Range,
- Context: hcl.RangeBetween(name.Range, openTok.Range).Ptr(),
- }
-
- diags = append(diags, &diag)
- p.recoverOver(TokenOParen)
- return &ExprSyntaxError{
- ParseDiags: hcl.Diagnostics{&diag},
- Placeholder: cty.DynamicVal,
- SrcRange: hcl.RangeBetween(name.Range, openTok.Range),
- }, diags
- }
-
- var args []Expression
- var expandFinal bool
- var closeTok Token
-
- // Arbitrary newlines are allowed inside the function call parentheses.
- p.PushIncludeNewlines(false)
-
-Token:
- for {
- tok := p.Peek()
-
- if tok.Type == TokenCParen {
- closeTok = p.Read() // eat closing paren
- break Token
- }
-
- arg, argDiags := p.ParseExpression()
- args = append(args, arg)
- diags = append(diags, argDiags...)
- if p.recovery && argDiags.HasErrors() {
- // if there was a parse error in the argument then we've
- // probably been left in a weird place in the token stream,
- // so we'll bail out with a partial argument list.
- recoveredTok := p.recover(TokenCParen)
-
- // record the recovered token, if one was found
- if recoveredTok.Type == TokenCParen {
- closeTok = recoveredTok
- }
- break Token
- }
-
- sep := p.Read()
- if sep.Type == TokenCParen {
- closeTok = sep
- break Token
- }
-
- if sep.Type == TokenEllipsis {
- expandFinal = true
-
- if p.Peek().Type != TokenCParen {
- if !p.recovery {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Missing closing parenthesis",
- Detail: "An expanded function argument (with ...) must be immediately followed by closing parentheses.",
- Subject: &sep.Range,
- Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(),
- })
- }
- closeTok = p.recover(TokenCParen)
- } else {
- closeTok = p.Read() // eat closing paren
- }
- break Token
- }
-
- if sep.Type != TokenComma {
- switch sep.Type {
- case TokenEOF:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Unterminated function call",
- Detail: "There is no closing parenthesis for this function call before the end of the file. This may be caused by incorrect parenthesis nesting elsewhere in this file.",
- Subject: hcl.RangeBetween(name.Range, openTok.Range).Ptr(),
- })
- default:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Missing argument separator",
- Detail: "A comma is required to separate each function argument from the next.",
- Subject: &sep.Range,
- Context: hcl.RangeBetween(name.Range, sep.Range).Ptr(),
- })
- }
- closeTok = p.recover(TokenCParen)
- break Token
- }
-
- if p.Peek().Type == TokenCParen {
- // A trailing comma after the last argument gets us in here.
- closeTok = p.Read() // eat closing paren
- break Token
- }
-
- }
-
- p.PopIncludeNewlines()
-
- return &FunctionCallExpr{
- Name: nameStr,
- Args: args,
-
- ExpandFinal: expandFinal,
-
- NameRange: nameRange,
- OpenParenRange: openTok.Range,
- CloseParenRange: closeTok.Range,
- }, diags
-}
-
-func (p *parser) parseTupleCons() (Expression, hcl.Diagnostics) {
- open := p.Read()
- if open.Type != TokenOBrack {
- // Should never happen if callers are behaving
- panic("parseTupleCons called without peeker pointing to open bracket")
- }
-
- p.PushIncludeNewlines(false)
- defer p.PopIncludeNewlines()
-
- if forKeyword.TokenMatches(p.Peek()) {
- return p.finishParsingForExpr(open)
- }
-
- var close Token
-
- var diags hcl.Diagnostics
- var exprs []Expression
-
- for {
- next := p.Peek()
- if next.Type == TokenCBrack {
- close = p.Read() // eat closer
- break
- }
-
- expr, exprDiags := p.ParseExpression()
- exprs = append(exprs, expr)
- diags = append(diags, exprDiags...)
-
- if p.recovery && exprDiags.HasErrors() {
- // If expression parsing failed then we are probably in a strange
- // place in the token stream, so we'll bail out and try to reset
- // to after our closing bracket to allow parsing to continue.
- close = p.recover(TokenCBrack)
- break
- }
-
- next = p.Peek()
- if next.Type == TokenCBrack {
- close = p.Read() // eat closer
- break
- }
-
- if next.Type != TokenComma {
- if !p.recovery {
- switch next.Type {
- case TokenEOF:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Unterminated tuple constructor expression",
- Detail: "There is no corresponding closing bracket before the end of the file. This may be caused by incorrect bracket nesting elsewhere in this file.",
- Subject: open.Range.Ptr(),
- })
- default:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Missing item separator",
- Detail: "Expected a comma to mark the beginning of the next item.",
- Subject: &next.Range,
- Context: hcl.RangeBetween(open.Range, next.Range).Ptr(),
- })
- }
- }
- close = p.recover(TokenCBrack)
- break
- }
-
- p.Read() // eat comma
-
- }
-
- return &TupleConsExpr{
- Exprs: exprs,
-
- SrcRange: hcl.RangeBetween(open.Range, close.Range),
- OpenRange: open.Range,
- }, diags
-}
-
-func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) {
- open := p.Read()
- if open.Type != TokenOBrace {
- // Should never happen if callers are behaving
- panic("parseObjectCons called without peeker pointing to open brace")
- }
-
- // We must temporarily stop looking at newlines here while we check for
- // a "for" keyword, since for expressions are _not_ newline-sensitive,
- // even though object constructors are.
- p.PushIncludeNewlines(false)
- isFor := forKeyword.TokenMatches(p.Peek())
- p.PopIncludeNewlines()
- if isFor {
- return p.finishParsingForExpr(open)
- }
-
- p.PushIncludeNewlines(true)
- defer p.PopIncludeNewlines()
-
- var close Token
-
- var diags hcl.Diagnostics
- var items []ObjectConsItem
-
- for {
- next := p.Peek()
- if next.Type == TokenNewline {
- p.Read() // eat newline
- continue
- }
-
- if next.Type == TokenCBrace {
- close = p.Read() // eat closer
- break
- }
-
- // Wrapping parens are not explicitly represented in the AST, but
- // we want to use them here to disambiguate intepreting a mapping
- // key as a full expression rather than just a name, and so
- // we'll remember this was present and use it to force the
- // behavior of our final ObjectConsKeyExpr.
- forceNonLiteral := (p.Peek().Type == TokenOParen)
-
- var key Expression
- var keyDiags hcl.Diagnostics
- key, keyDiags = p.ParseExpression()
- diags = append(diags, keyDiags...)
-
- if p.recovery && keyDiags.HasErrors() {
- // If expression parsing failed then we are probably in a strange
- // place in the token stream, so we'll bail out and try to reset
- // to after our closing brace to allow parsing to continue.
- close = p.recover(TokenCBrace)
- break
- }
-
- // We wrap up the key expression in a special wrapper that deals
- // with our special case that naked identifiers as object keys
- // are interpreted as literal strings.
- key = &ObjectConsKeyExpr{
- Wrapped: key,
- ForceNonLiteral: forceNonLiteral,
- }
-
- next = p.Peek()
- if next.Type != TokenEqual && next.Type != TokenColon {
- if !p.recovery {
- switch next.Type {
- case TokenNewline, TokenComma:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Missing attribute value",
- Detail: "Expected an attribute value, introduced by an equals sign (\"=\").",
- Subject: &next.Range,
- Context: hcl.RangeBetween(open.Range, next.Range).Ptr(),
- })
- case TokenIdent:
- // Although this might just be a plain old missing equals
- // sign before a reference, one way to get here is to try
- // to write an attribute name containing a period followed
- // by a digit, which was valid in HCL1, like this:
- // foo1.2_bar = "baz"
- // We can't know exactly what the user intended here, but
- // we'll augment our message with an extra hint in this case
- // in case it is helpful.
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Missing key/value separator",
- Detail: "Expected an equals sign (\"=\") to mark the beginning of the attribute value. If you intended to given an attribute name containing periods or spaces, write the name in quotes to create a string literal.",
- Subject: &next.Range,
- Context: hcl.RangeBetween(open.Range, next.Range).Ptr(),
- })
- case TokenEOF:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Unterminated object constructor expression",
- Detail: "There is no corresponding closing brace before the end of the file. This may be caused by incorrect brace nesting elsewhere in this file.",
- Subject: open.Range.Ptr(),
- })
- default:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Missing key/value separator",
- Detail: "Expected an equals sign (\"=\") to mark the beginning of the attribute value.",
- Subject: &next.Range,
- Context: hcl.RangeBetween(open.Range, next.Range).Ptr(),
- })
- }
- }
- close = p.recover(TokenCBrace)
- break
- }
-
- p.Read() // eat equals sign or colon
-
- value, valueDiags := p.ParseExpression()
- diags = append(diags, valueDiags...)
-
- if p.recovery && valueDiags.HasErrors() {
- // If expression parsing failed then we are probably in a strange
- // place in the token stream, so we'll bail out and try to reset
- // to after our closing brace to allow parsing to continue.
- close = p.recover(TokenCBrace)
- break
- }
-
- items = append(items, ObjectConsItem{
- KeyExpr: key,
- ValueExpr: value,
- })
-
- next = p.Peek()
- if next.Type == TokenCBrace {
- close = p.Read() // eat closer
- break
- }
-
- if next.Type != TokenComma && next.Type != TokenNewline {
- if !p.recovery {
- switch next.Type {
- case TokenEOF:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Unterminated object constructor expression",
- Detail: "There is no corresponding closing brace before the end of the file. This may be caused by incorrect brace nesting elsewhere in this file.",
- Subject: open.Range.Ptr(),
- })
- default:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Missing attribute separator",
- Detail: "Expected a newline or comma to mark the beginning of the next attribute.",
- Subject: &next.Range,
- Context: hcl.RangeBetween(open.Range, next.Range).Ptr(),
- })
- }
- }
- close = p.recover(TokenCBrace)
- break
- }
-
- p.Read() // eat comma or newline
-
- }
-
- return &ObjectConsExpr{
- Items: items,
-
- SrcRange: hcl.RangeBetween(open.Range, close.Range),
- OpenRange: open.Range,
- }, diags
-}
-
-func (p *parser) finishParsingForExpr(open Token) (Expression, hcl.Diagnostics) {
- p.PushIncludeNewlines(false)
- defer p.PopIncludeNewlines()
- introducer := p.Read()
- if !forKeyword.TokenMatches(introducer) {
- // Should never happen if callers are behaving
- panic("finishParsingForExpr called without peeker pointing to 'for' identifier")
- }
-
- var makeObj bool
- var closeType TokenType
- switch open.Type {
- case TokenOBrace:
- makeObj = true
- closeType = TokenCBrace
- case TokenOBrack:
- makeObj = false // making a tuple
- closeType = TokenCBrack
- default:
- // Should never happen if callers are behaving
- panic("finishParsingForExpr called with invalid open token")
- }
-
- var diags hcl.Diagnostics
- var keyName, valName string
-
- if p.Peek().Type != TokenIdent {
- if !p.recovery {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid 'for' expression",
- Detail: "For expression requires variable name after 'for'.",
- Subject: p.Peek().Range.Ptr(),
- Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(),
- })
- }
- close := p.recover(closeType)
- return &LiteralValueExpr{
- Val: cty.DynamicVal,
- SrcRange: hcl.RangeBetween(open.Range, close.Range),
- }, diags
- }
-
- valName = string(p.Read().Bytes)
-
- if p.Peek().Type == TokenComma {
- // What we just read was actually the key, then.
- keyName = valName
- p.Read() // eat comma
-
- if p.Peek().Type != TokenIdent {
- if !p.recovery {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid 'for' expression",
- Detail: "For expression requires value variable name after comma.",
- Subject: p.Peek().Range.Ptr(),
- Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(),
- })
- }
- close := p.recover(closeType)
- return &LiteralValueExpr{
- Val: cty.DynamicVal,
- SrcRange: hcl.RangeBetween(open.Range, close.Range),
- }, diags
- }
-
- valName = string(p.Read().Bytes)
- }
-
- if !inKeyword.TokenMatches(p.Peek()) {
- if !p.recovery {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid 'for' expression",
- Detail: "For expression requires the 'in' keyword after its name declarations.",
- Subject: p.Peek().Range.Ptr(),
- Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(),
- })
- }
- close := p.recover(closeType)
- return &LiteralValueExpr{
- Val: cty.DynamicVal,
- SrcRange: hcl.RangeBetween(open.Range, close.Range),
- }, diags
- }
- p.Read() // eat 'in' keyword
-
- collExpr, collDiags := p.ParseExpression()
- diags = append(diags, collDiags...)
- if p.recovery && collDiags.HasErrors() {
- close := p.recover(closeType)
- return &LiteralValueExpr{
- Val: cty.DynamicVal,
- SrcRange: hcl.RangeBetween(open.Range, close.Range),
- }, diags
- }
-
- if p.Peek().Type != TokenColon {
- if !p.recovery {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid 'for' expression",
- Detail: "For expression requires a colon after the collection expression.",
- Subject: p.Peek().Range.Ptr(),
- Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(),
- })
- }
- close := p.recover(closeType)
- return &LiteralValueExpr{
- Val: cty.DynamicVal,
- SrcRange: hcl.RangeBetween(open.Range, close.Range),
- }, diags
- }
- p.Read() // eat colon
-
- var keyExpr, valExpr Expression
- var keyDiags, valDiags hcl.Diagnostics
- valExpr, valDiags = p.ParseExpression()
- if p.Peek().Type == TokenFatArrow {
- // What we just parsed was actually keyExpr
- p.Read() // eat the fat arrow
- keyExpr, keyDiags = valExpr, valDiags
-
- valExpr, valDiags = p.ParseExpression()
- }
- diags = append(diags, keyDiags...)
- diags = append(diags, valDiags...)
- if p.recovery && (keyDiags.HasErrors() || valDiags.HasErrors()) {
- close := p.recover(closeType)
- return &LiteralValueExpr{
- Val: cty.DynamicVal,
- SrcRange: hcl.RangeBetween(open.Range, close.Range),
- }, diags
- }
-
- group := false
- var ellipsis Token
- if p.Peek().Type == TokenEllipsis {
- ellipsis = p.Read()
- group = true
- }
-
- var condExpr Expression
- var condDiags hcl.Diagnostics
- if ifKeyword.TokenMatches(p.Peek()) {
- p.Read() // eat "if"
- condExpr, condDiags = p.ParseExpression()
- diags = append(diags, condDiags...)
- if p.recovery && condDiags.HasErrors() {
- close := p.recover(p.oppositeBracket(open.Type))
- return &LiteralValueExpr{
- Val: cty.DynamicVal,
- SrcRange: hcl.RangeBetween(open.Range, close.Range),
- }, diags
- }
- }
-
- var close Token
- if p.Peek().Type == closeType {
- close = p.Read()
- } else {
- if !p.recovery {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid 'for' expression",
- Detail: "Extra characters after the end of the 'for' expression.",
- Subject: p.Peek().Range.Ptr(),
- Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(),
- })
- }
- close = p.recover(closeType)
- }
-
- if !makeObj {
- if keyExpr != nil {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid 'for' expression",
- Detail: "Key expression is not valid when building a tuple.",
- Subject: keyExpr.Range().Ptr(),
- Context: hcl.RangeBetween(open.Range, close.Range).Ptr(),
- })
- }
-
- if group {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid 'for' expression",
- Detail: "Grouping ellipsis (...) cannot be used when building a tuple.",
- Subject: &ellipsis.Range,
- Context: hcl.RangeBetween(open.Range, close.Range).Ptr(),
- })
- }
- } else {
- if keyExpr == nil {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid 'for' expression",
- Detail: "Key expression is required when building an object.",
- Subject: valExpr.Range().Ptr(),
- Context: hcl.RangeBetween(open.Range, close.Range).Ptr(),
- })
- }
- }
-
- return &ForExpr{
- KeyVar: keyName,
- ValVar: valName,
- CollExpr: collExpr,
- KeyExpr: keyExpr,
- ValExpr: valExpr,
- CondExpr: condExpr,
- Group: group,
-
- SrcRange: hcl.RangeBetween(open.Range, close.Range),
- OpenRange: open.Range,
- CloseRange: close.Range,
- }, diags
-}
-
-// parseQuotedStringLiteral is a helper for parsing quoted strings that
-// aren't allowed to contain any interpolations, such as block labels.
-func (p *parser) parseQuotedStringLiteral() (string, hcl.Range, hcl.Diagnostics) {
- oQuote := p.Read()
- if oQuote.Type != TokenOQuote {
- return "", oQuote.Range, hcl.Diagnostics{
- {
- Severity: hcl.DiagError,
- Summary: "Invalid string literal",
- Detail: "A quoted string is required here.",
- Subject: &oQuote.Range,
- },
- }
- }
-
- var diags hcl.Diagnostics
- ret := &bytes.Buffer{}
- var endRange hcl.Range
-
-Token:
- for {
- tok := p.Read()
- switch tok.Type {
-
- case TokenCQuote:
- endRange = tok.Range
- break Token
-
- case TokenQuotedLit:
- s, sDiags := ParseStringLiteralToken(tok)
- diags = append(diags, sDiags...)
- ret.WriteString(s)
-
- case TokenTemplateControl, TokenTemplateInterp:
- which := "$"
- if tok.Type == TokenTemplateControl {
- which = "%"
- }
-
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid string literal",
- Detail: fmt.Sprintf(
- "Template sequences are not allowed in this string. To include a literal %q, double it (as \"%s%s\") to escape it.",
- which, which, which,
- ),
- Subject: &tok.Range,
- Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(),
- })
-
- // Now that we're returning an error callers won't attempt to use
- // the result for any real operations, but they might try to use
- // the partial AST for other analyses, so we'll leave a marker
- // to indicate that there was something invalid in the string to
- // help avoid misinterpretation of the partial result
- ret.WriteString(which)
- ret.WriteString("{ ... }")
-
- p.recover(TokenTemplateSeqEnd) // we'll try to keep parsing after the sequence ends
-
- case TokenEOF:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Unterminated string literal",
- Detail: "Unable to find the closing quote mark before the end of the file.",
- Subject: &tok.Range,
- Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(),
- })
- endRange = tok.Range
- break Token
-
- default:
- // Should never happen, as long as the scanner is behaving itself
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid string literal",
- Detail: "This item is not valid in a string literal.",
- Subject: &tok.Range,
- Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(),
- })
- p.recover(TokenCQuote)
- endRange = tok.Range
- break Token
-
- }
-
- }
-
- return ret.String(), hcl.RangeBetween(oQuote.Range, endRange), diags
-}
-
-// ParseStringLiteralToken processes the given token, which must be either a
-// TokenQuotedLit or a TokenStringLit, returning the string resulting from
-// resolving any escape sequences.
-//
-// If any error diagnostics are returned, the returned string may be incomplete
-// or otherwise invalid.
-func ParseStringLiteralToken(tok Token) (string, hcl.Diagnostics) {
- var quoted bool
- switch tok.Type {
- case TokenQuotedLit:
- quoted = true
- case TokenStringLit:
- quoted = false
- default:
- panic("ParseStringLiteralToken can only be used with TokenStringLit and TokenQuotedLit tokens")
- }
- var diags hcl.Diagnostics
-
- ret := make([]byte, 0, len(tok.Bytes))
- slices := scanStringLit(tok.Bytes, quoted)
-
- // We will mutate rng constantly as we walk through our token slices below.
- // Any diagnostics must take a copy of this rng rather than simply pointing
- // to it, e.g. by using rng.Ptr() rather than &rng.
- rng := tok.Range
- rng.End = rng.Start
-
-Slices:
- for _, slice := range slices {
- if len(slice) == 0 {
- continue
- }
-
- // Advance the start of our range to where the previous token ended
- rng.Start = rng.End
-
- // Advance the end of our range to after our token.
- b := slice
- for len(b) > 0 {
- adv, ch, _ := textseg.ScanGraphemeClusters(b, true)
- rng.End.Byte += adv
- switch ch[0] {
- case '\r', '\n':
- rng.End.Line++
- rng.End.Column = 1
- default:
- rng.End.Column++
- }
- b = b[adv:]
- }
-
- TokenType:
- switch slice[0] {
- case '\\':
- if !quoted {
- // If we're not in quoted mode then just treat this token as
- // normal. (Slices can still start with backslash even if we're
- // not specifically looking for backslash sequences.)
- break TokenType
- }
- if len(slice) < 2 {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid escape sequence",
- Detail: "Backslash must be followed by an escape sequence selector character.",
- Subject: rng.Ptr(),
- })
- break TokenType
- }
-
- switch slice[1] {
-
- case 'n':
- ret = append(ret, '\n')
- continue Slices
- case 'r':
- ret = append(ret, '\r')
- continue Slices
- case 't':
- ret = append(ret, '\t')
- continue Slices
- case '"':
- ret = append(ret, '"')
- continue Slices
- case '\\':
- ret = append(ret, '\\')
- continue Slices
- case 'u', 'U':
- if slice[1] == 'u' && len(slice) != 6 {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid escape sequence",
- Detail: "The \\u escape sequence must be followed by four hexadecimal digits.",
- Subject: rng.Ptr(),
- })
- break TokenType
- } else if slice[1] == 'U' && len(slice) != 10 {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid escape sequence",
- Detail: "The \\U escape sequence must be followed by eight hexadecimal digits.",
- Subject: rng.Ptr(),
- })
- break TokenType
- }
-
- numHex := string(slice[2:])
- num, err := strconv.ParseUint(numHex, 16, 32)
- if err != nil {
- // Should never happen because the scanner won't match
- // a sequence of digits that isn't valid.
- panic(err)
- }
-
- r := rune(num)
- l := utf8.RuneLen(r)
- if l == -1 {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid escape sequence",
- Detail: fmt.Sprintf("Cannot encode character U+%04x in UTF-8.", num),
- Subject: rng.Ptr(),
- })
- break TokenType
- }
- for i := 0; i < l; i++ {
- ret = append(ret, 0)
- }
- rb := ret[len(ret)-l:]
- utf8.EncodeRune(rb, r)
-
- continue Slices
-
- default:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid escape sequence",
- Detail: fmt.Sprintf("The symbol %q is not a valid escape sequence selector.", slice[1:]),
- Subject: rng.Ptr(),
- })
- ret = append(ret, slice[1:]...)
- continue Slices
- }
-
- case '$', '%':
- if len(slice) != 3 {
- // Not long enough to be our escape sequence, so it's literal.
- break TokenType
- }
-
- if slice[1] == slice[0] && slice[2] == '{' {
- ret = append(ret, slice[0])
- ret = append(ret, '{')
- continue Slices
- }
-
- break TokenType
- }
-
- // If we fall out here or break out of here from the switch above
- // then this slice is just a literal.
- ret = append(ret, slice...)
- }
-
- return string(ret), diags
-}
-
-// setRecovery turns on recovery mode without actually doing any recovery.
-// This can be used when a parser knowingly leaves the peeker in a useless
-// place and wants to suppress errors that might result from that decision.
-func (p *parser) setRecovery() {
- p.recovery = true
-}
-
-// recover seeks forward in the token stream until it finds TokenType "end",
-// then returns with the peeker pointed at the following token.
-//
-// If the given token type is a bracketer, this function will additionally
-// count nested instances of the brackets to try to leave the peeker at
-// the end of the _current_ instance of that bracketer, skipping over any
-// nested instances. This is a best-effort operation and may have
-// unpredictable results on input with bad bracketer nesting.
-func (p *parser) recover(end TokenType) Token {
- start := p.oppositeBracket(end)
- p.recovery = true
-
- nest := 0
- for {
- tok := p.Read()
- ty := tok.Type
- if end == TokenTemplateSeqEnd && ty == TokenTemplateControl {
- // normalize so that our matching behavior can work, since
- // TokenTemplateControl/TokenTemplateInterp are asymmetrical
- // with TokenTemplateSeqEnd and thus we need to count both
- // openers if that's the closer we're looking for.
- ty = TokenTemplateInterp
- }
-
- switch ty {
- case start:
- nest++
- case end:
- if nest < 1 {
- return tok
- }
-
- nest--
- case TokenEOF:
- return tok
- }
- }
-}
-
-// recoverOver seeks forward in the token stream until it finds a block
-// starting with TokenType "start", then finds the corresponding end token,
-// leaving the peeker pointed at the token after that end token.
-//
-// The given token type _must_ be a bracketer. For example, if the given
-// start token is TokenOBrace then the parser will be left at the _end_ of
-// the next brace-delimited block encountered, or at EOF if no such block
-// is found or it is unclosed.
-func (p *parser) recoverOver(start TokenType) {
- end := p.oppositeBracket(start)
-
- // find the opening bracket first
-Token:
- for {
- tok := p.Read()
- switch tok.Type {
- case start, TokenEOF:
- break Token
- }
- }
-
- // Now use our existing recover function to locate the _end_ of the
- // container we've found.
- p.recover(end)
-}
-
-func (p *parser) recoverAfterBodyItem() {
- p.recovery = true
- var open []TokenType
-
-Token:
- for {
- tok := p.Read()
-
- switch tok.Type {
-
- case TokenNewline:
- if len(open) == 0 {
- break Token
- }
-
- case TokenEOF:
- break Token
-
- case TokenOBrace, TokenOBrack, TokenOParen, TokenOQuote, TokenOHeredoc, TokenTemplateInterp, TokenTemplateControl:
- open = append(open, tok.Type)
-
- case TokenCBrace, TokenCBrack, TokenCParen, TokenCQuote, TokenCHeredoc:
- opener := p.oppositeBracket(tok.Type)
- for len(open) > 0 && open[len(open)-1] != opener {
- open = open[:len(open)-1]
- }
- if len(open) > 0 {
- open = open[:len(open)-1]
- }
-
- case TokenTemplateSeqEnd:
- for len(open) > 0 && open[len(open)-1] != TokenTemplateInterp && open[len(open)-1] != TokenTemplateControl {
- open = open[:len(open)-1]
- }
- if len(open) > 0 {
- open = open[:len(open)-1]
- }
-
- }
- }
-}
-
-// oppositeBracket finds the bracket that opposes the given bracketer, or
-// NilToken if the given token isn't a bracketer.
-//
-// "Bracketer", for the sake of this function, is one end of a matching
-// open/close set of tokens that establish a bracketing context.
-func (p *parser) oppositeBracket(ty TokenType) TokenType {
- switch ty {
-
- case TokenOBrace:
- return TokenCBrace
- case TokenOBrack:
- return TokenCBrack
- case TokenOParen:
- return TokenCParen
- case TokenOQuote:
- return TokenCQuote
- case TokenOHeredoc:
- return TokenCHeredoc
-
- case TokenCBrace:
- return TokenOBrace
- case TokenCBrack:
- return TokenOBrack
- case TokenCParen:
- return TokenOParen
- case TokenCQuote:
- return TokenOQuote
- case TokenCHeredoc:
- return TokenOHeredoc
-
- case TokenTemplateControl:
- return TokenTemplateSeqEnd
- case TokenTemplateInterp:
- return TokenTemplateSeqEnd
- case TokenTemplateSeqEnd:
- // This is ambigous, but we return Interp here because that's
- // what's assumed by the "recover" method.
- return TokenTemplateInterp
-
- default:
- return TokenNil
- }
-}
-
-func errPlaceholderExpr(rng hcl.Range) Expression {
- return &LiteralValueExpr{
- Val: cty.DynamicVal,
- SrcRange: rng,
- }
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go
deleted file mode 100644
index 19e98806..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_template.go
+++ /dev/null
@@ -1,865 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hclsyntax
-
-import (
- "fmt"
- "strings"
- "unicode"
-
- "github.com/apparentlymart/go-textseg/v15/textseg"
- "github.com/hashicorp/hcl/v2"
- "github.com/zclconf/go-cty/cty"
-)
-
-func (p *parser) ParseTemplate() (Expression, hcl.Diagnostics) {
- return p.parseTemplate(TokenEOF, false)
-}
-
-func (p *parser) parseTemplate(end TokenType, flushHeredoc bool) (Expression, hcl.Diagnostics) {
- exprs, passthru, rng, diags := p.parseTemplateInner(end, flushHeredoc)
-
- if passthru {
- if len(exprs) != 1 {
- panic("passthru set with len(exprs) != 1")
- }
- return &TemplateWrapExpr{
- Wrapped: exprs[0],
- SrcRange: rng,
- }, diags
- }
-
- return &TemplateExpr{
- Parts: exprs,
- SrcRange: rng,
- }, diags
-}
-
-func (p *parser) parseTemplateInner(end TokenType, flushHeredoc bool) ([]Expression, bool, hcl.Range, hcl.Diagnostics) {
- parts, diags := p.parseTemplateParts(end)
- if flushHeredoc {
- flushHeredocTemplateParts(parts) // Trim off leading spaces on lines per the flush heredoc spec
- }
- meldConsecutiveStringLiterals(parts)
- tp := templateParser{
- Tokens: parts.Tokens,
- SrcRange: parts.SrcRange,
- }
- exprs, exprsDiags := tp.parseRoot()
- diags = append(diags, exprsDiags...)
-
- passthru := false
- if len(parts.Tokens) == 2 { // one real token and one synthetic "end" token
- if _, isInterp := parts.Tokens[0].(*templateInterpToken); isInterp {
- passthru = true
- }
- }
-
- return exprs, passthru, parts.SrcRange, diags
-}
-
-type templateParser struct {
- Tokens []templateToken
- SrcRange hcl.Range
-
- pos int
-}
-
-func (p *templateParser) parseRoot() ([]Expression, hcl.Diagnostics) {
- var exprs []Expression
- var diags hcl.Diagnostics
-
- for {
- next := p.Peek()
- if _, isEnd := next.(*templateEndToken); isEnd {
- break
- }
-
- expr, exprDiags := p.parseExpr()
- diags = append(diags, exprDiags...)
- exprs = append(exprs, expr)
- }
-
- return exprs, diags
-}
-
-func (p *templateParser) parseExpr() (Expression, hcl.Diagnostics) {
- next := p.Peek()
- switch tok := next.(type) {
-
- case *templateLiteralToken:
- p.Read() // eat literal
- return &LiteralValueExpr{
- Val: cty.StringVal(tok.Val),
- SrcRange: tok.SrcRange,
- }, nil
-
- case *templateInterpToken:
- p.Read() // eat interp
- return tok.Expr, nil
-
- case *templateIfToken:
- return p.parseIf()
-
- case *templateForToken:
- return p.parseFor()
-
- case *templateEndToken:
- p.Read() // eat erroneous token
- return errPlaceholderExpr(tok.SrcRange), hcl.Diagnostics{
- {
- // This is a particularly unhelpful diagnostic, so callers
- // should attempt to pre-empt it and produce a more helpful
- // diagnostic that is context-aware.
- Severity: hcl.DiagError,
- Summary: "Unexpected end of template",
- Detail: "The control directives within this template are unbalanced.",
- Subject: &tok.SrcRange,
- },
- }
-
- case *templateEndCtrlToken:
- p.Read() // eat erroneous token
- return errPlaceholderExpr(tok.SrcRange), hcl.Diagnostics{
- {
- Severity: hcl.DiagError,
- Summary: fmt.Sprintf("Unexpected %s directive", tok.Name()),
- Detail: "The control directives within this template are unbalanced.",
- Subject: &tok.SrcRange,
- },
- }
-
- default:
- // should never happen, because above should be exhaustive
- panic(fmt.Sprintf("unhandled template token type %T", next))
- }
-}
-
-func (p *templateParser) parseIf() (Expression, hcl.Diagnostics) {
- open := p.Read()
- openIf, isIf := open.(*templateIfToken)
- if !isIf {
- // should never happen if caller is behaving
- panic("parseIf called with peeker not pointing at if token")
- }
-
- var ifExprs, elseExprs []Expression
- var diags hcl.Diagnostics
- var endifRange hcl.Range
-
- currentExprs := &ifExprs
-Token:
- for {
- next := p.Peek()
- if end, isEnd := next.(*templateEndToken); isEnd {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Unexpected end of template",
- Detail: fmt.Sprintf(
- "The if directive at %s is missing its corresponding endif directive.",
- openIf.SrcRange,
- ),
- Subject: &end.SrcRange,
- })
- return errPlaceholderExpr(end.SrcRange), diags
- }
- if end, isCtrlEnd := next.(*templateEndCtrlToken); isCtrlEnd {
- p.Read() // eat end directive
-
- switch end.Type {
-
- case templateElse:
- if currentExprs == &ifExprs {
- currentExprs = &elseExprs
- continue Token
- }
-
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Unexpected else directive",
- Detail: fmt.Sprintf(
- "Already in the else clause for the if started at %s.",
- openIf.SrcRange,
- ),
- Subject: &end.SrcRange,
- })
-
- case templateEndIf:
- endifRange = end.SrcRange
- break Token
-
- default:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: fmt.Sprintf("Unexpected %s directive", end.Name()),
- Detail: fmt.Sprintf(
- "Expecting an endif directive for the if started at %s.",
- openIf.SrcRange,
- ),
- Subject: &end.SrcRange,
- })
- }
-
- return errPlaceholderExpr(end.SrcRange), diags
- }
-
- expr, exprDiags := p.parseExpr()
- diags = append(diags, exprDiags...)
- *currentExprs = append(*currentExprs, expr)
- }
-
- if len(ifExprs) == 0 {
- ifExprs = append(ifExprs, &LiteralValueExpr{
- Val: cty.StringVal(""),
- SrcRange: hcl.Range{
- Filename: openIf.SrcRange.Filename,
- Start: openIf.SrcRange.End,
- End: openIf.SrcRange.End,
- },
- })
- }
- if len(elseExprs) == 0 {
- elseExprs = append(elseExprs, &LiteralValueExpr{
- Val: cty.StringVal(""),
- SrcRange: hcl.Range{
- Filename: endifRange.Filename,
- Start: endifRange.Start,
- End: endifRange.Start,
- },
- })
- }
-
- trueExpr := &TemplateExpr{
- Parts: ifExprs,
- SrcRange: hcl.RangeBetween(ifExprs[0].Range(), ifExprs[len(ifExprs)-1].Range()),
- }
- falseExpr := &TemplateExpr{
- Parts: elseExprs,
- SrcRange: hcl.RangeBetween(elseExprs[0].Range(), elseExprs[len(elseExprs)-1].Range()),
- }
-
- return &ConditionalExpr{
- Condition: openIf.CondExpr,
- TrueResult: trueExpr,
- FalseResult: falseExpr,
-
- SrcRange: hcl.RangeBetween(openIf.SrcRange, endifRange),
- }, diags
-}
-
-func (p *templateParser) parseFor() (Expression, hcl.Diagnostics) {
- open := p.Read()
- openFor, isFor := open.(*templateForToken)
- if !isFor {
- // should never happen if caller is behaving
- panic("parseFor called with peeker not pointing at for token")
- }
-
- var contentExprs []Expression
- var diags hcl.Diagnostics
- var endforRange hcl.Range
-
-Token:
- for {
- next := p.Peek()
- if end, isEnd := next.(*templateEndToken); isEnd {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Unexpected end of template",
- Detail: fmt.Sprintf(
- "The for directive at %s is missing its corresponding endfor directive.",
- openFor.SrcRange,
- ),
- Subject: &end.SrcRange,
- })
- return errPlaceholderExpr(end.SrcRange), diags
- }
- if end, isCtrlEnd := next.(*templateEndCtrlToken); isCtrlEnd {
- p.Read() // eat end directive
-
- switch end.Type {
-
- case templateElse:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Unexpected else directive",
- Detail: "An else clause is not expected for a for directive.",
- Subject: &end.SrcRange,
- })
-
- case templateEndFor:
- endforRange = end.SrcRange
- break Token
-
- default:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: fmt.Sprintf("Unexpected %s directive", end.Name()),
- Detail: fmt.Sprintf(
- "Expecting an endfor directive corresponding to the for directive at %s.",
- openFor.SrcRange,
- ),
- Subject: &end.SrcRange,
- })
- }
-
- return errPlaceholderExpr(end.SrcRange), diags
- }
-
- expr, exprDiags := p.parseExpr()
- diags = append(diags, exprDiags...)
- contentExprs = append(contentExprs, expr)
- }
-
- if len(contentExprs) == 0 {
- contentExprs = append(contentExprs, &LiteralValueExpr{
- Val: cty.StringVal(""),
- SrcRange: hcl.Range{
- Filename: openFor.SrcRange.Filename,
- Start: openFor.SrcRange.End,
- End: openFor.SrcRange.End,
- },
- })
- }
-
- contentExpr := &TemplateExpr{
- Parts: contentExprs,
- SrcRange: hcl.RangeBetween(contentExprs[0].Range(), contentExprs[len(contentExprs)-1].Range()),
- }
-
- forExpr := &ForExpr{
- KeyVar: openFor.KeyVar,
- ValVar: openFor.ValVar,
-
- CollExpr: openFor.CollExpr,
- ValExpr: contentExpr,
-
- SrcRange: hcl.RangeBetween(openFor.SrcRange, endforRange),
- OpenRange: openFor.SrcRange,
- CloseRange: endforRange,
- }
-
- return &TemplateJoinExpr{
- Tuple: forExpr,
- }, diags
-}
-
-func (p *templateParser) Peek() templateToken {
- return p.Tokens[p.pos]
-}
-
-func (p *templateParser) Read() templateToken {
- ret := p.Peek()
- if _, end := ret.(*templateEndToken); !end {
- p.pos++
- }
- return ret
-}
-
-// parseTemplateParts produces a flat sequence of "template tokens", which are
-// either literal values (with any "trimming" already applied), interpolation
-// sequences, or control flow markers.
-//
-// A further pass is required on the result to turn it into an AST.
-func (p *parser) parseTemplateParts(end TokenType) (*templateParts, hcl.Diagnostics) {
- var parts []templateToken
- var diags hcl.Diagnostics
-
- startRange := p.NextRange()
- ltrimNext := false
- nextCanTrimPrev := false
- var endRange hcl.Range
-
-Token:
- for {
- next := p.Read()
- if next.Type == end {
- // all done!
- endRange = next.Range
- break
- }
-
- ltrim := ltrimNext
- ltrimNext = false
- canTrimPrev := nextCanTrimPrev
- nextCanTrimPrev = false
-
- switch next.Type {
- case TokenStringLit, TokenQuotedLit:
- str, strDiags := ParseStringLiteralToken(next)
- diags = append(diags, strDiags...)
-
- if ltrim {
- str = strings.TrimLeftFunc(str, unicode.IsSpace)
- }
-
- parts = append(parts, &templateLiteralToken{
- Val: str,
- SrcRange: next.Range,
- })
- nextCanTrimPrev = true
-
- case TokenTemplateInterp:
- // if the opener is ${~ then we want to eat any trailing whitespace
- // in the preceding literal token, assuming it is indeed a literal
- // token.
- if canTrimPrev && len(next.Bytes) == 3 && next.Bytes[2] == '~' && len(parts) > 0 {
- prevExpr := parts[len(parts)-1]
- if lexpr, ok := prevExpr.(*templateLiteralToken); ok {
- lexpr.Val = strings.TrimRightFunc(lexpr.Val, unicode.IsSpace)
- }
- }
-
- p.PushIncludeNewlines(false)
- expr, exprDiags := p.ParseExpression()
- diags = append(diags, exprDiags...)
- close := p.Peek()
- if close.Type != TokenTemplateSeqEnd {
- if !p.recovery {
- switch close.Type {
- case TokenEOF:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Unclosed template interpolation sequence",
- Detail: "There is no closing brace for this interpolation sequence before the end of the file. This might be caused by incorrect nesting inside the given expression.",
- Subject: &startRange,
- })
- case TokenColon:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Extra characters after interpolation expression",
- Detail: "Template interpolation doesn't expect a colon at this location. Did you intend this to be a literal sequence to be processed as part of another language? If so, you can escape it by starting with \"$${\" instead of just \"${\".",
- Subject: &close.Range,
- Context: hcl.RangeBetween(startRange, close.Range).Ptr(),
- })
- default:
- if (close.Type == TokenCQuote || close.Type == TokenOQuote) && end == TokenCQuote {
- // We'll get here if we're processing a _quoted_
- // template and we find an errant quote inside an
- // interpolation sequence, which suggests that
- // the interpolation sequence is missing its terminator.
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Unclosed template interpolation sequence",
- Detail: "There is no closing brace for this interpolation sequence before the end of the quoted template. This might be caused by incorrect nesting inside the given expression.",
- Subject: &startRange,
- })
- } else {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Extra characters after interpolation expression",
- Detail: "Expected a closing brace to end the interpolation expression, but found extra characters.\n\nThis can happen when you include interpolation syntax for another language, such as shell scripting, but forget to escape the interpolation start token. If this is an embedded sequence for another language, escape it by starting with \"$${\" instead of just \"${\".",
- Subject: &close.Range,
- Context: hcl.RangeBetween(startRange, close.Range).Ptr(),
- })
- }
- }
- }
- p.recover(TokenTemplateSeqEnd)
- } else {
- p.Read() // eat closing brace
-
- // If the closer is ~} then we want to eat any leading
- // whitespace on the next token, if it turns out to be a
- // literal token.
- if len(close.Bytes) == 2 && close.Bytes[0] == '~' {
- ltrimNext = true
- }
- }
- p.PopIncludeNewlines()
- parts = append(parts, &templateInterpToken{
- Expr: expr,
- SrcRange: hcl.RangeBetween(next.Range, close.Range),
- })
-
- case TokenTemplateControl:
- // if the opener is %{~ then we want to eat any trailing whitespace
- // in the preceding literal token, assuming it is indeed a literal
- // token.
- if canTrimPrev && len(next.Bytes) == 3 && next.Bytes[2] == '~' && len(parts) > 0 {
- prevExpr := parts[len(parts)-1]
- if lexpr, ok := prevExpr.(*templateLiteralToken); ok {
- lexpr.Val = strings.TrimRightFunc(lexpr.Val, unicode.IsSpace)
- }
- }
- p.PushIncludeNewlines(false)
-
- kw := p.Peek()
- if kw.Type != TokenIdent {
- if !p.recovery {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid template directive",
- Detail: "A template directive keyword (\"if\", \"for\", etc) is expected at the beginning of a %{ sequence.",
- Subject: &kw.Range,
- Context: hcl.RangeBetween(next.Range, kw.Range).Ptr(),
- })
- }
- p.recover(TokenTemplateSeqEnd)
- p.PopIncludeNewlines()
- continue Token
- }
- p.Read() // eat keyword token
-
- switch {
-
- case ifKeyword.TokenMatches(kw):
- condExpr, exprDiags := p.ParseExpression()
- diags = append(diags, exprDiags...)
- parts = append(parts, &templateIfToken{
- CondExpr: condExpr,
- SrcRange: hcl.RangeBetween(next.Range, p.NextRange()),
- })
-
- case elseKeyword.TokenMatches(kw):
- parts = append(parts, &templateEndCtrlToken{
- Type: templateElse,
- SrcRange: hcl.RangeBetween(next.Range, p.NextRange()),
- })
-
- case endifKeyword.TokenMatches(kw):
- parts = append(parts, &templateEndCtrlToken{
- Type: templateEndIf,
- SrcRange: hcl.RangeBetween(next.Range, p.NextRange()),
- })
-
- case forKeyword.TokenMatches(kw):
- var keyName, valName string
- if p.Peek().Type != TokenIdent {
- if !p.recovery {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid 'for' directive",
- Detail: "For directive requires variable name after 'for'.",
- Subject: p.Peek().Range.Ptr(),
- })
- }
- p.recover(TokenTemplateSeqEnd)
- p.PopIncludeNewlines()
- continue Token
- }
-
- valName = string(p.Read().Bytes)
-
- if p.Peek().Type == TokenComma {
- // What we just read was actually the key, then.
- keyName = valName
- p.Read() // eat comma
-
- if p.Peek().Type != TokenIdent {
- if !p.recovery {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid 'for' directive",
- Detail: "For directive requires value variable name after comma.",
- Subject: p.Peek().Range.Ptr(),
- })
- }
- p.recover(TokenTemplateSeqEnd)
- p.PopIncludeNewlines()
- continue Token
- }
-
- valName = string(p.Read().Bytes)
- }
-
- if !inKeyword.TokenMatches(p.Peek()) {
- if !p.recovery {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid 'for' directive",
- Detail: "For directive requires 'in' keyword after names.",
- Subject: p.Peek().Range.Ptr(),
- })
- }
- p.recover(TokenTemplateSeqEnd)
- p.PopIncludeNewlines()
- continue Token
- }
- p.Read() // eat 'in' keyword
-
- collExpr, collDiags := p.ParseExpression()
- diags = append(diags, collDiags...)
- parts = append(parts, &templateForToken{
- KeyVar: keyName,
- ValVar: valName,
- CollExpr: collExpr,
-
- SrcRange: hcl.RangeBetween(next.Range, p.NextRange()),
- })
-
- case endforKeyword.TokenMatches(kw):
- parts = append(parts, &templateEndCtrlToken{
- Type: templateEndFor,
- SrcRange: hcl.RangeBetween(next.Range, p.NextRange()),
- })
-
- default:
- if !p.recovery {
- suggestions := []string{"if", "for", "else", "endif", "endfor"}
- given := string(kw.Bytes)
- suggestion := nameSuggestion(given, suggestions)
- if suggestion != "" {
- suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
- }
-
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid template control keyword",
- Detail: fmt.Sprintf("%q is not a valid template control keyword.%s", given, suggestion),
- Subject: &kw.Range,
- Context: hcl.RangeBetween(next.Range, kw.Range).Ptr(),
- })
- }
- p.recover(TokenTemplateSeqEnd)
- p.PopIncludeNewlines()
- continue Token
-
- }
-
- close := p.Peek()
- if close.Type != TokenTemplateSeqEnd {
- if !p.recovery {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: fmt.Sprintf("Extra characters in %s marker", kw.Bytes),
- Detail: "Expected a closing brace to end the sequence, but found extra characters.",
- Subject: &close.Range,
- Context: hcl.RangeBetween(startRange, close.Range).Ptr(),
- })
- }
- p.recover(TokenTemplateSeqEnd)
- } else {
- p.Read() // eat closing brace
-
- // If the closer is ~} then we want to eat any leading
- // whitespace on the next token, if it turns out to be a
- // literal token.
- if len(close.Bytes) == 2 && close.Bytes[0] == '~' {
- ltrimNext = true
- }
- }
- p.PopIncludeNewlines()
-
- default:
- if !p.recovery {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Unterminated template string",
- Detail: "No closing marker was found for the string.",
- Subject: &next.Range,
- Context: hcl.RangeBetween(startRange, next.Range).Ptr(),
- })
- }
- final := p.recover(end)
- endRange = final.Range
- break Token
- }
- }
-
- if len(parts) == 0 {
- // If a sequence has no content, we'll treat it as if it had an
- // empty string in it because that's what the user probably means
- // if they write "" in configuration.
- parts = append(parts, &templateLiteralToken{
- Val: "",
- SrcRange: hcl.Range{
- // Range is the zero-character span immediately after the
- // opening quote.
- Filename: startRange.Filename,
- Start: startRange.End,
- End: startRange.End,
- },
- })
- }
-
- // Always end with an end token, so the parser can produce diagnostics
- // about unclosed items with proper position information.
- parts = append(parts, &templateEndToken{
- SrcRange: endRange,
- })
-
- ret := &templateParts{
- Tokens: parts,
- SrcRange: hcl.RangeBetween(startRange, endRange),
- }
-
- return ret, diags
-}
-
-// flushHeredocTemplateParts modifies in-place the line-leading literal strings
-// to apply the flush heredoc processing rule: find the line with the smallest
-// number of whitespace characters as prefix and then trim that number of
-// characters from all of the lines.
-//
-// This rule is applied to static tokens rather than to the rendered result,
-// so interpolating a string with leading whitespace cannot affect the chosen
-// prefix length.
-func flushHeredocTemplateParts(parts *templateParts) {
- if len(parts.Tokens) == 0 {
- // Nothing to do
- return
- }
-
- const maxInt = int((^uint(0)) >> 1)
-
- minSpaces := maxInt
- newline := true
- var adjust []*templateLiteralToken
- for _, ttok := range parts.Tokens {
- if newline {
- newline = false
- var spaces int
- if lit, ok := ttok.(*templateLiteralToken); ok {
- orig := lit.Val
- trimmed := strings.TrimLeftFunc(orig, unicode.IsSpace)
- // If a token is entirely spaces and ends with a newline
- // then it's a "blank line" and thus not considered for
- // space-prefix-counting purposes.
- if len(trimmed) == 0 && strings.HasSuffix(orig, "\n") {
- spaces = maxInt
- } else {
- spaceBytes := len(lit.Val) - len(trimmed)
- spaces, _ = textseg.TokenCount([]byte(orig[:spaceBytes]), textseg.ScanGraphemeClusters)
- adjust = append(adjust, lit)
- }
- } else if _, ok := ttok.(*templateEndToken); ok {
- break // don't process the end token since it never has spaces before it
- }
- if spaces < minSpaces {
- minSpaces = spaces
- }
- }
- if lit, ok := ttok.(*templateLiteralToken); ok {
- if strings.HasSuffix(lit.Val, "\n") {
- newline = true // The following token, if any, begins a new line
- }
- }
- }
-
- for _, lit := range adjust {
- // Since we want to count space _characters_ rather than space _bytes_,
- // we can't just do a straightforward slice operation here and instead
- // need to hunt for the split point with a scanner.
- valBytes := []byte(lit.Val)
- spaceByteCount := 0
- for i := 0; i < minSpaces; i++ {
- adv, _, _ := textseg.ScanGraphemeClusters(valBytes, true)
- spaceByteCount += adv
- valBytes = valBytes[adv:]
- }
- lit.Val = lit.Val[spaceByteCount:]
- lit.SrcRange.Start.Column += minSpaces
- lit.SrcRange.Start.Byte += spaceByteCount
- }
-}
-
-// meldConsecutiveStringLiterals simplifies the AST output by combining a
-// sequence of string literal tokens into a single string literal. This must be
-// performed after any whitespace trimming operations.
-func meldConsecutiveStringLiterals(parts *templateParts) {
- if len(parts.Tokens) == 0 {
- return
- }
-
- // Loop over all tokens starting at the second element, as we want to join
- // pairs of consecutive string literals.
- i := 1
- for i < len(parts.Tokens) {
- if prevLiteral, ok := parts.Tokens[i-1].(*templateLiteralToken); ok {
- if literal, ok := parts.Tokens[i].(*templateLiteralToken); ok {
- // The current and previous tokens are both literals: combine
- prevLiteral.Val = prevLiteral.Val + literal.Val
- prevLiteral.SrcRange.End = literal.SrcRange.End
-
- // Remove the current token from the slice
- parts.Tokens = append(parts.Tokens[:i], parts.Tokens[i+1:]...)
-
- // Continue without moving forward in the slice
- continue
- }
- }
-
- // Try the next pair of tokens
- i++
- }
-}
-
-type templateParts struct {
- Tokens []templateToken
- SrcRange hcl.Range
-}
-
-// templateToken is a higher-level token that represents a single atom within
-// the template language. Our template parsing first raises the raw token
-// stream to a sequence of templateToken, and then transforms the result into
-// an expression tree.
-type templateToken interface {
- templateToken() templateToken
-}
-
-type templateLiteralToken struct {
- Val string
- SrcRange hcl.Range
- isTemplateToken
-}
-
-type templateInterpToken struct {
- Expr Expression
- SrcRange hcl.Range
- isTemplateToken
-}
-
-type templateIfToken struct {
- CondExpr Expression
- SrcRange hcl.Range
- isTemplateToken
-}
-
-type templateForToken struct {
- KeyVar string // empty if ignoring key
- ValVar string
- CollExpr Expression
- SrcRange hcl.Range
- isTemplateToken
-}
-
-type templateEndCtrlType int
-
-const (
- templateEndIf templateEndCtrlType = iota
- templateElse
- templateEndFor
-)
-
-type templateEndCtrlToken struct {
- Type templateEndCtrlType
- SrcRange hcl.Range
- isTemplateToken
-}
-
-func (t *templateEndCtrlToken) Name() string {
- switch t.Type {
- case templateEndIf:
- return "endif"
- case templateElse:
- return "else"
- case templateEndFor:
- return "endfor"
- default:
- // should never happen
- panic("invalid templateEndCtrlType")
- }
-}
-
-type templateEndToken struct {
- SrcRange hcl.Range
- isTemplateToken
-}
-
-type isTemplateToken [0]int
-
-func (t isTemplateToken) templateToken() templateToken {
- return t
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_traversal.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_traversal.go
deleted file mode 100644
index f7d4062f..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/parser_traversal.go
+++ /dev/null
@@ -1,211 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hclsyntax
-
-import (
- "github.com/zclconf/go-cty/cty"
-
- "github.com/hashicorp/hcl/v2"
-)
-
-// ParseTraversalAbs parses an absolute traversal that is assumed to consume
-// all of the remaining tokens in the peeker. The usual parser recovery
-// behavior is not supported here because traversals are not expected to
-// be parsed as part of a larger program.
-func (p *parser) ParseTraversalAbs() (hcl.Traversal, hcl.Diagnostics) {
- return p.parseTraversal(false)
-}
-
-// ParseTraversalPartial parses an absolute traversal that is permitted
-// to contain splat ([*]) expressions. Only splat expressions within square
-// brackets are permitted ([*]); splat expressions within attribute names are
-// not permitted (.*).
-//
-// The meaning of partial here is that the traversal may be incomplete, in that
-// any splat expression indicates reference to a potentially unknown number of
-// elements.
-//
-// Traversals that include splats cannot be automatically traversed by HCL using
-// the TraversalAbs or TraversalRel methods. Instead, the caller must handle
-// the traversals manually.
-func (p *parser) ParseTraversalPartial() (hcl.Traversal, hcl.Diagnostics) {
- return p.parseTraversal(true)
-}
-
-func (p *parser) parseTraversal(allowSplats bool) (hcl.Traversal, hcl.Diagnostics) {
- var ret hcl.Traversal
- var diags hcl.Diagnostics
-
- // Absolute traversal must always begin with a variable name
- varTok := p.Read()
- if varTok.Type != TokenIdent {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Variable name required",
- Detail: "Must begin with a variable name.",
- Subject: &varTok.Range,
- })
- return ret, diags
- }
-
- varName := string(varTok.Bytes)
- ret = append(ret, hcl.TraverseRoot{
- Name: varName,
- SrcRange: varTok.Range,
- })
-
- for {
- next := p.Peek()
-
- if next.Type == TokenEOF {
- return ret, diags
- }
-
- switch next.Type {
- case TokenDot:
- // Attribute access
- dot := p.Read() // eat dot
- nameTok := p.Read()
- if nameTok.Type != TokenIdent {
- if nameTok.Type == TokenStar {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Attribute name required",
- Detail: "Splat expressions (.*) may not be used here.",
- Subject: &nameTok.Range,
- Context: hcl.RangeBetween(varTok.Range, nameTok.Range).Ptr(),
- })
- } else {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Attribute name required",
- Detail: "Dot must be followed by attribute name.",
- Subject: &nameTok.Range,
- Context: hcl.RangeBetween(varTok.Range, nameTok.Range).Ptr(),
- })
- }
- return ret, diags
- }
-
- attrName := string(nameTok.Bytes)
- ret = append(ret, hcl.TraverseAttr{
- Name: attrName,
- SrcRange: hcl.RangeBetween(dot.Range, nameTok.Range),
- })
- case TokenOBrack:
- // Index
- open := p.Read() // eat open bracket
- next := p.Peek()
-
- switch next.Type {
- case TokenNumberLit:
- tok := p.Read() // eat number
- numVal, numDiags := p.numberLitValue(tok)
- diags = append(diags, numDiags...)
-
- close := p.Read()
- if close.Type != TokenCBrack {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Unclosed index brackets",
- Detail: "Index key must be followed by a closing bracket.",
- Subject: &close.Range,
- Context: hcl.RangeBetween(open.Range, close.Range).Ptr(),
- })
- }
-
- ret = append(ret, hcl.TraverseIndex{
- Key: numVal,
- SrcRange: hcl.RangeBetween(open.Range, close.Range),
- })
-
- if diags.HasErrors() {
- return ret, diags
- }
-
- case TokenOQuote:
- str, _, strDiags := p.parseQuotedStringLiteral()
- diags = append(diags, strDiags...)
-
- close := p.Read()
- if close.Type != TokenCBrack {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Unclosed index brackets",
- Detail: "Index key must be followed by a closing bracket.",
- Subject: &close.Range,
- Context: hcl.RangeBetween(open.Range, close.Range).Ptr(),
- })
- }
-
- ret = append(ret, hcl.TraverseIndex{
- Key: cty.StringVal(str),
- SrcRange: hcl.RangeBetween(open.Range, close.Range),
- })
-
- if diags.HasErrors() {
- return ret, diags
- }
-
- case TokenStar:
- if allowSplats {
-
- p.Read() // Eat the star.
- close := p.Read()
- if close.Type != TokenCBrack {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Unclosed index brackets",
- Detail: "Index key must be followed by a closing bracket.",
- Subject: &close.Range,
- Context: hcl.RangeBetween(open.Range, close.Range).Ptr(),
- })
- }
-
- ret = append(ret, hcl.TraverseSplat{
- SrcRange: hcl.RangeBetween(open.Range, close.Range),
- })
-
- if diags.HasErrors() {
- return ret, diags
- }
-
- continue
- }
-
- // Otherwise, return the error below for the star.
- fallthrough
- default:
- if next.Type == TokenStar {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Attribute name required",
- Detail: "Splat expressions ([*]) may not be used here.",
- Subject: &next.Range,
- Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(),
- })
- } else {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Index value required",
- Detail: "Index brackets must contain either a literal number or a literal string.",
- Subject: &next.Range,
- Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(),
- })
- }
- return ret, diags
- }
-
- default:
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid character",
- Detail: "Expected an attribute access or an index operator.",
- Subject: &next.Range,
- Context: hcl.RangeBetween(varTok.Range, next.Range).Ptr(),
- })
- return ret, diags
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/peeker.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/peeker.go
deleted file mode 100644
index 74fa3fb3..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/peeker.go
+++ /dev/null
@@ -1,215 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hclsyntax
-
-import (
- "bytes"
- "fmt"
- "path/filepath"
- "runtime"
- "strings"
-
- "github.com/hashicorp/hcl/v2"
-)
-
-// This is set to true at init() time in tests, to enable more useful output
-// if a stack discipline error is detected. It should not be enabled in
-// normal mode since there is a performance penalty from accessing the
-// runtime stack to produce the traces, but could be temporarily set to
-// true for debugging if desired.
-var tracePeekerNewlinesStack = false
-
-type peeker struct {
- Tokens Tokens
- NextIndex int
-
- IncludeComments bool
- IncludeNewlinesStack []bool
-
- // used only when tracePeekerNewlinesStack is set
- newlineStackChanges []peekerNewlineStackChange
-}
-
-// for use in debugging the stack usage only
-type peekerNewlineStackChange struct {
- Pushing bool // if false, then popping
- Frame runtime.Frame
- Include bool
-}
-
-func newPeeker(tokens Tokens, includeComments bool) *peeker {
- return &peeker{
- Tokens: tokens,
- IncludeComments: includeComments,
-
- IncludeNewlinesStack: []bool{true},
- }
-}
-
-func (p *peeker) Peek() Token {
- ret, _ := p.nextToken()
- return ret
-}
-
-func (p *peeker) Read() Token {
- ret, nextIdx := p.nextToken()
- p.NextIndex = nextIdx
- return ret
-}
-
-func (p *peeker) NextRange() hcl.Range {
- return p.Peek().Range
-}
-
-func (p *peeker) PrevRange() hcl.Range {
- if p.NextIndex == 0 {
- return p.NextRange()
- }
-
- return p.Tokens[p.NextIndex-1].Range
-}
-
-func (p *peeker) nextToken() (Token, int) {
- for i := p.NextIndex; i < len(p.Tokens); i++ {
- tok := p.Tokens[i]
- switch tok.Type {
- case TokenComment:
- if !p.IncludeComments {
- // Single-line comment tokens, starting with # or //, absorb
- // the trailing newline that terminates them as part of their
- // bytes. When we're filtering out comments, we must as a
- // special case transform these to newline tokens in order
- // to properly parse newline-terminated block items.
-
- if p.includingNewlines() {
- if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' {
- fakeNewline := Token{
- Type: TokenNewline,
- Bytes: tok.Bytes[len(tok.Bytes)-1 : len(tok.Bytes)],
-
- // We use the whole token range as the newline
- // range, even though that's a little... weird,
- // because otherwise we'd need to go count
- // characters again in order to figure out the
- // column of the newline, and that complexity
- // isn't justified when ranges of newlines are
- // so rarely printed anyway.
- Range: tok.Range,
- }
- return fakeNewline, i + 1
- }
- }
-
- continue
- }
- case TokenNewline:
- if !p.includingNewlines() {
- continue
- }
- }
-
- return tok, i + 1
- }
-
- // if we fall out here then we'll return the EOF token, and leave
- // our index pointed off the end of the array so we'll keep
- // returning EOF in future too.
- return p.Tokens[len(p.Tokens)-1], len(p.Tokens)
-}
-
-func (p *peeker) includingNewlines() bool {
- return p.IncludeNewlinesStack[len(p.IncludeNewlinesStack)-1]
-}
-
-func (p *peeker) PushIncludeNewlines(include bool) {
- if tracePeekerNewlinesStack {
- // Record who called us so that we can more easily track down any
- // mismanagement of the stack in the parser.
- callers := []uintptr{0}
- runtime.Callers(2, callers)
- frames := runtime.CallersFrames(callers)
- frame, _ := frames.Next()
- p.newlineStackChanges = append(p.newlineStackChanges, peekerNewlineStackChange{
- true, frame, include,
- })
- }
-
- p.IncludeNewlinesStack = append(p.IncludeNewlinesStack, include)
-}
-
-func (p *peeker) PopIncludeNewlines() bool {
- stack := p.IncludeNewlinesStack
- remain, ret := stack[:len(stack)-1], stack[len(stack)-1]
- p.IncludeNewlinesStack = remain
-
- if tracePeekerNewlinesStack {
- // Record who called us so that we can more easily track down any
- // mismanagement of the stack in the parser.
- callers := []uintptr{0}
- runtime.Callers(2, callers)
- frames := runtime.CallersFrames(callers)
- frame, _ := frames.Next()
- p.newlineStackChanges = append(p.newlineStackChanges, peekerNewlineStackChange{
- false, frame, ret,
- })
- }
-
- return ret
-}
-
-// AssertEmptyNewlinesStack checks if the IncludeNewlinesStack is empty, doing
-// panicking if it is not. This can be used to catch stack mismanagement that
-// might otherwise just cause confusing downstream errors.
-//
-// This function is a no-op if the stack is empty when called.
-//
-// If newlines stack tracing is enabled by setting the global variable
-// tracePeekerNewlinesStack at init time, a full log of all of the push/pop
-// calls will be produced to help identify which caller in the parser is
-// misbehaving.
-func (p *peeker) AssertEmptyIncludeNewlinesStack() {
- if len(p.IncludeNewlinesStack) != 1 {
- // Should never happen; indicates mismanagement of the stack inside
- // the parser.
- if p.newlineStackChanges != nil { // only if traceNewlinesStack is enabled above
- panic(fmt.Errorf(
- "non-empty IncludeNewlinesStack after parse with %d calls unaccounted for:\n%s",
- len(p.IncludeNewlinesStack)-1,
- formatPeekerNewlineStackChanges(p.newlineStackChanges),
- ))
- } else {
- panic(fmt.Errorf("non-empty IncludeNewlinesStack after parse: %#v", p.IncludeNewlinesStack))
- }
- }
-}
-
-func formatPeekerNewlineStackChanges(changes []peekerNewlineStackChange) string {
- indent := 0
- var buf bytes.Buffer
- for _, change := range changes {
- funcName := change.Frame.Function
- if idx := strings.LastIndexByte(funcName, '.'); idx != -1 {
- funcName = funcName[idx+1:]
- }
- filename := change.Frame.File
- if idx := strings.LastIndexByte(filename, filepath.Separator); idx != -1 {
- filename = filename[idx+1:]
- }
-
- switch change.Pushing {
-
- case true:
- buf.WriteString(strings.Repeat(" ", indent))
- fmt.Fprintf(&buf, "PUSH %#v (%s at %s:%d)\n", change.Include, funcName, filename, change.Frame.Line)
- indent++
-
- case false:
- indent--
- buf.WriteString(strings.Repeat(" ", indent))
- fmt.Fprintf(&buf, "POP %#v (%s at %s:%d)\n", change.Include, funcName, filename, change.Frame.Line)
-
- }
- }
- return buf.String()
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/public.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/public.go
deleted file mode 100644
index 17dc1ed4..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/public.go
+++ /dev/null
@@ -1,205 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hclsyntax
-
-import (
- "github.com/hashicorp/hcl/v2"
-)
-
-// ParseConfig parses the given buffer as a whole HCL config file, returning
-// a *hcl.File representing its contents. If HasErrors called on the returned
-// diagnostics returns true, the returned body is likely to be incomplete
-// and should therefore be used with care.
-//
-// The body in the returned file has dynamic type *hclsyntax.Body, so callers
-// may freely type-assert this to get access to the full hclsyntax API in
-// situations where detailed access is required. However, most common use-cases
-// should be served using the hcl.Body interface to ensure compatibility with
-// other configurationg syntaxes, such as JSON.
-func ParseConfig(src []byte, filename string, start hcl.Pos) (*hcl.File, hcl.Diagnostics) {
- tokens, diags := LexConfig(src, filename, start)
- peeker := newPeeker(tokens, false)
- parser := &parser{peeker: peeker}
- body, parseDiags := parser.ParseBody(TokenEOF)
- diags = append(diags, parseDiags...)
-
- // Panic if the parser uses incorrect stack discipline with the peeker's
- // newlines stack, since otherwise it will produce confusing downstream
- // errors.
- peeker.AssertEmptyIncludeNewlinesStack()
-
- return &hcl.File{
- Body: body,
- Bytes: src,
-
- Nav: navigation{
- root: body,
- },
- }, diags
-}
-
-// ParseExpression parses the given buffer as a standalone HCL expression,
-// returning it as an instance of Expression.
-func ParseExpression(src []byte, filename string, start hcl.Pos) (Expression, hcl.Diagnostics) {
- tokens, diags := LexExpression(src, filename, start)
- peeker := newPeeker(tokens, false)
- parser := &parser{peeker: peeker}
-
- // Bare expressions are always parsed in "ignore newlines" mode, as if
- // they were wrapped in parentheses.
- parser.PushIncludeNewlines(false)
-
- expr, parseDiags := parser.ParseExpression()
- diags = append(diags, parseDiags...)
-
- next := parser.Peek()
- if next.Type != TokenEOF && !parser.recovery {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Extra characters after expression",
- Detail: "An expression was successfully parsed, but extra characters were found after it.",
- Subject: &next.Range,
- })
- }
-
- parser.PopIncludeNewlines()
-
- // Panic if the parser uses incorrect stack discipline with the peeker's
- // newlines stack, since otherwise it will produce confusing downstream
- // errors.
- peeker.AssertEmptyIncludeNewlinesStack()
-
- return expr, diags
-}
-
-// ParseTemplate parses the given buffer as a standalone HCL template,
-// returning it as an instance of Expression.
-func ParseTemplate(src []byte, filename string, start hcl.Pos) (Expression, hcl.Diagnostics) {
- tokens, diags := LexTemplate(src, filename, start)
- peeker := newPeeker(tokens, false)
- parser := &parser{peeker: peeker}
- expr, parseDiags := parser.ParseTemplate()
- diags = append(diags, parseDiags...)
-
- // Panic if the parser uses incorrect stack discipline with the peeker's
- // newlines stack, since otherwise it will produce confusing downstream
- // errors.
- peeker.AssertEmptyIncludeNewlinesStack()
-
- return expr, diags
-}
-
-// ParseTraversalAbs parses the given buffer as a standalone absolute traversal.
-//
-// Parsing as a traversal is more limited than parsing as an expession since
-// it allows only attribute and indexing operations on variables. Traverals
-// are useful as a syntax for referring to objects without necessarily
-// evaluating them.
-func ParseTraversalAbs(src []byte, filename string, start hcl.Pos) (hcl.Traversal, hcl.Diagnostics) {
- tokens, diags := LexExpression(src, filename, start)
- peeker := newPeeker(tokens, false)
- parser := &parser{peeker: peeker}
-
- // Bare traverals are always parsed in "ignore newlines" mode, as if
- // they were wrapped in parentheses.
- parser.PushIncludeNewlines(false)
-
- expr, parseDiags := parser.ParseTraversalAbs()
- diags = append(diags, parseDiags...)
-
- parser.PopIncludeNewlines()
-
- // Panic if the parser uses incorrect stack discipline with the peeker's
- // newlines stack, since otherwise it will produce confusing downstream
- // errors.
- peeker.AssertEmptyIncludeNewlinesStack()
-
- return expr, diags
-}
-
-// ParseTraversalPartial matches the behavior of ParseTraversalAbs except
-// that it allows splat expressions ([*]) to appear in the traversal.
-//
-// The returned traversals are "partial" in that the splat expression indicates
-// an unknown value for the index.
-//
-// Traversals that include splats cannot be automatically traversed by HCL using
-// the TraversalAbs or TraversalRel methods. Instead, the caller must handle
-// the traversals manually.
-func ParseTraversalPartial(src []byte, filename string, start hcl.Pos) (hcl.Traversal, hcl.Diagnostics) {
- tokens, diags := LexExpression(src, filename, start)
- peeker := newPeeker(tokens, false)
- parser := &parser{peeker: peeker}
-
- // Bare traverals are always parsed in "ignore newlines" mode, as if
- // they were wrapped in parentheses.
- parser.PushIncludeNewlines(false)
-
- expr, parseDiags := parser.ParseTraversalPartial()
- diags = append(diags, parseDiags...)
-
- parser.PopIncludeNewlines()
-
- // Panic if the parser uses incorrect stack discipline with the peeker's
- // newlines stack, since otherwise it will produce confusing downstream
- // errors.
- peeker.AssertEmptyIncludeNewlinesStack()
-
- return expr, diags
-}
-
-// LexConfig performs lexical analysis on the given buffer, treating it as a
-// whole HCL config file, and returns the resulting tokens.
-//
-// Only minimal validation is done during lexical analysis, so the returned
-// diagnostics may include errors about lexical issues such as bad character
-// encodings or unrecognized characters, but full parsing is required to
-// detect _all_ syntax errors.
-func LexConfig(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) {
- tokens := scanTokens(src, filename, start, scanNormal)
- diags := checkInvalidTokens(tokens)
- return tokens, diags
-}
-
-// LexExpression performs lexical analysis on the given buffer, treating it as
-// a standalone HCL expression, and returns the resulting tokens.
-//
-// Only minimal validation is done during lexical analysis, so the returned
-// diagnostics may include errors about lexical issues such as bad character
-// encodings or unrecognized characters, but full parsing is required to
-// detect _all_ syntax errors.
-func LexExpression(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) {
- // This is actually just the same thing as LexConfig, since configs
- // and expressions lex in the same way.
- tokens := scanTokens(src, filename, start, scanNormal)
- diags := checkInvalidTokens(tokens)
- return tokens, diags
-}
-
-// LexTemplate performs lexical analysis on the given buffer, treating it as a
-// standalone HCL template, and returns the resulting tokens.
-//
-// Only minimal validation is done during lexical analysis, so the returned
-// diagnostics may include errors about lexical issues such as bad character
-// encodings or unrecognized characters, but full parsing is required to
-// detect _all_ syntax errors.
-func LexTemplate(src []byte, filename string, start hcl.Pos) (Tokens, hcl.Diagnostics) {
- tokens := scanTokens(src, filename, start, scanTemplate)
- diags := checkInvalidTokens(tokens)
- return tokens, diags
-}
-
-// ValidIdentifier tests if the given string could be a valid identifier in
-// a native syntax expression.
-//
-// This is useful when accepting names from the user that will be used as
-// variable or attribute names in the scope, to ensure that any name chosen
-// will be traversable using the variable or attribute traversal syntax.
-func ValidIdentifier(s string) bool {
- // This is a kinda-expensive way to do something pretty simple, but it
- // is easiest to do with our existing scanner-related infrastructure here
- // and nobody should be validating identifiers in a tight loop.
- tokens := scanTokens([]byte(s), "", hcl.Pos{}, scanIdentOnly)
- return len(tokens) == 2 && tokens[0].Type == TokenIdent && tokens[1].Type == TokenEOF
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.go
deleted file mode 100644
index 6b44d992..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.go
+++ /dev/null
@@ -1,303 +0,0 @@
-//line scan_string_lit.rl:1
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hclsyntax
-
-// This file is generated from scan_string_lit.rl. DO NOT EDIT.
-
-//line scan_string_lit.go:11
-var _hclstrtok_actions []byte = []byte{
- 0, 1, 0, 1, 1, 2, 1, 0,
-}
-
-var _hclstrtok_key_offsets []byte = []byte{
- 0, 0, 2, 4, 6, 10, 14, 18,
- 22, 27, 31, 36, 41, 46, 51, 57,
- 62, 74, 85, 96, 107, 118, 129, 140,
- 151,
-}
-
-var _hclstrtok_trans_keys []byte = []byte{
- 128, 191, 128, 191, 128, 191, 10, 13,
- 36, 37, 10, 13, 36, 37, 10, 13,
- 36, 37, 10, 13, 36, 37, 10, 13,
- 36, 37, 123, 10, 13, 36, 37, 10,
- 13, 36, 37, 92, 10, 13, 36, 37,
- 92, 10, 13, 36, 37, 92, 10, 13,
- 36, 37, 92, 10, 13, 36, 37, 92,
- 123, 10, 13, 36, 37, 92, 85, 117,
- 128, 191, 192, 223, 224, 239, 240, 247,
- 248, 255, 10, 13, 36, 37, 92, 48,
- 57, 65, 70, 97, 102, 10, 13, 36,
- 37, 92, 48, 57, 65, 70, 97, 102,
- 10, 13, 36, 37, 92, 48, 57, 65,
- 70, 97, 102, 10, 13, 36, 37, 92,
- 48, 57, 65, 70, 97, 102, 10, 13,
- 36, 37, 92, 48, 57, 65, 70, 97,
- 102, 10, 13, 36, 37, 92, 48, 57,
- 65, 70, 97, 102, 10, 13, 36, 37,
- 92, 48, 57, 65, 70, 97, 102, 10,
- 13, 36, 37, 92, 48, 57, 65, 70,
- 97, 102,
-}
-
-var _hclstrtok_single_lengths []byte = []byte{
- 0, 0, 0, 0, 4, 4, 4, 4,
- 5, 4, 5, 5, 5, 5, 6, 5,
- 2, 5, 5, 5, 5, 5, 5, 5,
- 5,
-}
-
-var _hclstrtok_range_lengths []byte = []byte{
- 0, 1, 1, 1, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 5, 3, 3, 3, 3, 3, 3, 3,
- 3,
-}
-
-var _hclstrtok_index_offsets []byte = []byte{
- 0, 0, 2, 4, 6, 11, 16, 21,
- 26, 32, 37, 43, 49, 55, 61, 68,
- 74, 82, 91, 100, 109, 118, 127, 136,
- 145,
-}
-
-var _hclstrtok_indicies []byte = []byte{
- 0, 1, 2, 1, 3, 1, 5, 6,
- 7, 8, 4, 10, 11, 12, 13, 9,
- 14, 11, 12, 13, 9, 10, 11, 15,
- 13, 9, 10, 11, 12, 13, 14, 9,
- 10, 11, 12, 15, 9, 17, 18, 19,
- 20, 21, 16, 23, 24, 25, 26, 27,
- 22, 0, 24, 25, 26, 27, 22, 23,
- 24, 28, 26, 27, 22, 23, 24, 25,
- 26, 27, 0, 22, 23, 24, 25, 28,
- 27, 22, 29, 30, 22, 2, 3, 31,
- 22, 0, 23, 24, 25, 26, 27, 32,
- 32, 32, 22, 23, 24, 25, 26, 27,
- 33, 33, 33, 22, 23, 24, 25, 26,
- 27, 34, 34, 34, 22, 23, 24, 25,
- 26, 27, 30, 30, 30, 22, 23, 24,
- 25, 26, 27, 35, 35, 35, 22, 23,
- 24, 25, 26, 27, 36, 36, 36, 22,
- 23, 24, 25, 26, 27, 37, 37, 37,
- 22, 23, 24, 25, 26, 27, 0, 0,
- 0, 22,
-}
-
-var _hclstrtok_trans_targs []byte = []byte{
- 11, 0, 1, 2, 4, 5, 6, 7,
- 9, 4, 5, 6, 7, 9, 5, 8,
- 10, 11, 12, 13, 15, 16, 10, 11,
- 12, 13, 15, 16, 14, 17, 21, 3,
- 18, 19, 20, 22, 23, 24,
-}
-
-var _hclstrtok_trans_actions []byte = []byte{
- 0, 0, 0, 0, 0, 1, 1, 1,
- 1, 3, 5, 5, 5, 5, 0, 0,
- 0, 1, 1, 1, 1, 1, 3, 5,
- 5, 5, 5, 5, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0,
-}
-
-var _hclstrtok_eof_actions []byte = []byte{
- 0, 0, 0, 0, 0, 3, 3, 3,
- 3, 3, 0, 3, 3, 3, 3, 3,
- 3, 3, 3, 3, 3, 3, 3, 3,
- 3,
-}
-
-const hclstrtok_start int = 4
-const hclstrtok_first_final int = 4
-const hclstrtok_error int = 0
-
-const hclstrtok_en_quoted int = 10
-const hclstrtok_en_unquoted int = 4
-
-//line scan_string_lit.rl:12
-
-func scanStringLit(data []byte, quoted bool) [][]byte {
- var ret [][]byte
-
-//line scan_string_lit.rl:63
-
- // Ragel state
- p := 0 // "Pointer" into data
- pe := len(data) // End-of-data "pointer"
- ts := 0
- te := 0
- eof := pe
-
- var cs int // current state
- switch {
- case quoted:
- cs = hclstrtok_en_quoted
- default:
- cs = hclstrtok_en_unquoted
- }
-
- // Make Go compiler happy
- _ = ts
- _ = eof
-
- /*token := func () {
- ret = append(ret, data[ts:te])
- }*/
-
-//line scan_string_lit.go:156
- {
- }
-
-//line scan_string_lit.go:160
- {
- var _klen int
- var _trans int
- var _acts int
- var _nacts uint
- var _keys int
- if p == pe {
- goto _test_eof
- }
- if cs == 0 {
- goto _out
- }
- _resume:
- _keys = int(_hclstrtok_key_offsets[cs])
- _trans = int(_hclstrtok_index_offsets[cs])
-
- _klen = int(_hclstrtok_single_lengths[cs])
- if _klen > 0 {
- _lower := int(_keys)
- var _mid int
- _upper := int(_keys + _klen - 1)
- for {
- if _upper < _lower {
- break
- }
-
- _mid = _lower + ((_upper - _lower) >> 1)
- switch {
- case data[p] < _hclstrtok_trans_keys[_mid]:
- _upper = _mid - 1
- case data[p] > _hclstrtok_trans_keys[_mid]:
- _lower = _mid + 1
- default:
- _trans += int(_mid - int(_keys))
- goto _match
- }
- }
- _keys += _klen
- _trans += _klen
- }
-
- _klen = int(_hclstrtok_range_lengths[cs])
- if _klen > 0 {
- _lower := int(_keys)
- var _mid int
- _upper := int(_keys + (_klen << 1) - 2)
- for {
- if _upper < _lower {
- break
- }
-
- _mid = _lower + (((_upper - _lower) >> 1) & ^1)
- switch {
- case data[p] < _hclstrtok_trans_keys[_mid]:
- _upper = _mid - 2
- case data[p] > _hclstrtok_trans_keys[_mid+1]:
- _lower = _mid + 2
- default:
- _trans += int((_mid - int(_keys)) >> 1)
- goto _match
- }
- }
- _trans += _klen
- }
-
- _match:
- _trans = int(_hclstrtok_indicies[_trans])
- cs = int(_hclstrtok_trans_targs[_trans])
-
- if _hclstrtok_trans_actions[_trans] == 0 {
- goto _again
- }
-
- _acts = int(_hclstrtok_trans_actions[_trans])
- _nacts = uint(_hclstrtok_actions[_acts])
- _acts++
- for ; _nacts > 0; _nacts-- {
- _acts++
- switch _hclstrtok_actions[_acts-1] {
- case 0:
-//line scan_string_lit.rl:42
-
- // If te is behind p then we've skipped over some literal
- // characters which we must now return.
- if te < p {
- ret = append(ret, data[te:p])
- }
- ts = p
-
- case 1:
-//line scan_string_lit.rl:50
-
- te = p
- ret = append(ret, data[ts:te])
-
-//line scan_string_lit.go:255
- }
- }
-
- _again:
- if cs == 0 {
- goto _out
- }
- p++
- if p != pe {
- goto _resume
- }
- _test_eof:
- {
- }
- if p == eof {
- __acts := _hclstrtok_eof_actions[cs]
- __nacts := uint(_hclstrtok_actions[__acts])
- __acts++
- for ; __nacts > 0; __nacts-- {
- __acts++
- switch _hclstrtok_actions[__acts-1] {
- case 1:
-//line scan_string_lit.rl:50
-
- te = p
- ret = append(ret, data[ts:te])
-
-//line scan_string_lit.go:280
- }
- }
- }
-
- _out:
- {
- }
- }
-
-//line scan_string_lit.rl:91
-
- if te < p {
- // Collect any leftover literal characters at the end of the input
- ret = append(ret, data[te:p])
- }
-
- // If we fall out here without being in a final state then we've
- // encountered something that the scanner can't match, which should
- // be impossible (the scanner matches all bytes _somehow_) but we'll
- // tolerate it and let the caller deal with it.
- if cs < hclstrtok_first_final {
- ret = append(ret, data[p:len(data)])
- }
-
- return ret
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.rl b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.rl
deleted file mode 100644
index 21d2c8bc..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_string_lit.rl
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hclsyntax
-
-// This file is generated from scan_string_lit.rl. DO NOT EDIT.
-%%{
- # (except you are actually in scan_string_lit.rl here, so edit away!)
-
- machine hclstrtok;
- write data;
-}%%
-
-func scanStringLit(data []byte, quoted bool) [][]byte {
- var ret [][]byte
-
- %%{
- include UnicodeDerived "unicode_derived.rl";
-
- UTF8Cont = 0x80 .. 0xBF;
- AnyUTF8 = (
- 0x00..0x7F |
- 0xC0..0xDF . UTF8Cont |
- 0xE0..0xEF . UTF8Cont . UTF8Cont |
- 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont
- );
- BadUTF8 = any - AnyUTF8;
-
- Hex = ('0'..'9' | 'a'..'f' | 'A'..'F');
-
- # Our goal with this patterns is to capture user intent as best as
- # possible, even if the input is invalid. The caller will then verify
- # whether each token is valid and generate suitable error messages
- # if not.
- UnicodeEscapeShort = "\\u" . Hex{0,4};
- UnicodeEscapeLong = "\\U" . Hex{0,8};
- UnicodeEscape = (UnicodeEscapeShort | UnicodeEscapeLong);
- SimpleEscape = "\\" . (AnyUTF8 - ('U'|'u'))?;
- TemplateEscape = ("$" . ("$" . ("{"?))?) | ("%" . ("%" . ("{"?))?);
- Newline = ("\r\n" | "\r" | "\n");
-
- action Begin {
- // If te is behind p then we've skipped over some literal
- // characters which we must now return.
- if te < p {
- ret = append(ret, data[te:p])
- }
- ts = p;
- }
- action End {
- te = p;
- ret = append(ret, data[ts:te]);
- }
-
- QuotedToken = (UnicodeEscape | SimpleEscape | TemplateEscape | Newline) >Begin %End;
- UnquotedToken = (TemplateEscape | Newline) >Begin %End;
- QuotedLiteral = (any - ("\\" | "$" | "%" | "\r" | "\n"));
- UnquotedLiteral = (any - ("$" | "%" | "\r" | "\n"));
-
- quoted := (QuotedToken | QuotedLiteral)**;
- unquoted := (UnquotedToken | UnquotedLiteral)**;
-
- }%%
-
- // Ragel state
- p := 0 // "Pointer" into data
- pe := len(data) // End-of-data "pointer"
- ts := 0
- te := 0
- eof := pe
-
- var cs int // current state
- switch {
- case quoted:
- cs = hclstrtok_en_quoted
- default:
- cs = hclstrtok_en_unquoted
- }
-
- // Make Go compiler happy
- _ = ts
- _ = eof
-
- /*token := func () {
- ret = append(ret, data[ts:te])
- }*/
-
- %%{
- write init nocs;
- write exec;
- }%%
-
- if te < p {
- // Collect any leftover literal characters at the end of the input
- ret = append(ret, data[te:p])
- }
-
- // If we fall out here without being in a final state then we've
- // encountered something that the scanner can't match, which should
- // be impossible (the scanner matches all bytes _somehow_) but we'll
- // tolerate it and let the caller deal with it.
- if cs < hclstrtok_first_final {
- ret = append(ret, data[p:len(data)])
- }
-
- return ret
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.go
deleted file mode 100644
index 3ed8455f..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.go
+++ /dev/null
@@ -1,5273 +0,0 @@
-//line scan_tokens.rl:1
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hclsyntax
-
-import (
- "bytes"
-
- "github.com/hashicorp/hcl/v2"
-)
-
-// This file is generated from scan_tokens.rl. DO NOT EDIT.
-
-//line scan_tokens.go:17
-var _hcltok_actions []byte = []byte{
- 0, 1, 0, 1, 1, 1, 3, 1, 4,
- 1, 7, 1, 8, 1, 9, 1, 10,
- 1, 11, 1, 12, 1, 13, 1, 14,
- 1, 15, 1, 16, 1, 17, 1, 18,
- 1, 19, 1, 20, 1, 23, 1, 24,
- 1, 25, 1, 26, 1, 27, 1, 28,
- 1, 29, 1, 30, 1, 31, 1, 32,
- 1, 35, 1, 36, 1, 37, 1, 38,
- 1, 39, 1, 40, 1, 41, 1, 42,
- 1, 43, 1, 44, 1, 47, 1, 48,
- 1, 49, 1, 50, 1, 51, 1, 52,
- 1, 53, 1, 56, 1, 57, 1, 58,
- 1, 59, 1, 60, 1, 61, 1, 62,
- 1, 63, 1, 64, 1, 65, 1, 66,
- 1, 67, 1, 68, 1, 69, 1, 70,
- 1, 71, 1, 72, 1, 73, 1, 74,
- 1, 75, 1, 76, 1, 77, 1, 78,
- 1, 79, 1, 80, 1, 81, 1, 82,
- 1, 83, 1, 84, 1, 85, 1, 86,
- 2, 0, 14, 2, 0, 25, 2, 0,
- 29, 2, 0, 37, 2, 0, 41, 2,
- 1, 2, 2, 4, 5, 2, 4, 6,
- 2, 4, 21, 2, 4, 22, 2, 4,
- 33, 2, 4, 34, 2, 4, 45, 2,
- 4, 46, 2, 4, 54, 2, 4, 55,
-}
-
-var _hcltok_key_offsets []int16 = []int16{
- 0, 0, 1, 2, 4, 9, 13, 15,
- 57, 98, 144, 145, 149, 155, 155, 157,
- 159, 168, 174, 181, 182, 185, 186, 190,
- 195, 204, 208, 212, 220, 222, 224, 226,
- 229, 261, 263, 265, 269, 273, 276, 287,
- 300, 319, 332, 348, 360, 376, 391, 412,
- 422, 434, 445, 459, 474, 484, 496, 505,
- 517, 519, 523, 544, 553, 563, 569, 575,
- 576, 625, 627, 631, 633, 639, 646, 654,
- 661, 664, 670, 674, 678, 680, 684, 688,
- 692, 698, 706, 714, 720, 722, 726, 728,
- 734, 738, 742, 746, 750, 755, 762, 768,
- 770, 772, 776, 778, 784, 788, 792, 802,
- 807, 821, 836, 838, 846, 848, 853, 867,
- 872, 874, 878, 879, 883, 889, 895, 905,
- 915, 926, 934, 937, 940, 944, 948, 950,
- 953, 953, 956, 958, 988, 990, 992, 996,
- 1001, 1005, 1010, 1012, 1014, 1016, 1025, 1029,
- 1033, 1039, 1041, 1049, 1057, 1069, 1072, 1078,
- 1082, 1084, 1088, 1108, 1110, 1112, 1123, 1129,
- 1131, 1133, 1135, 1139, 1145, 1151, 1153, 1158,
- 1162, 1164, 1172, 1190, 1230, 1240, 1244, 1246,
- 1248, 1249, 1253, 1257, 1261, 1265, 1269, 1274,
- 1278, 1282, 1286, 1288, 1290, 1294, 1304, 1308,
- 1310, 1314, 1318, 1322, 1335, 1337, 1339, 1343,
- 1345, 1349, 1351, 1353, 1383, 1387, 1391, 1395,
- 1398, 1405, 1410, 1421, 1425, 1441, 1455, 1459,
- 1464, 1468, 1472, 1478, 1480, 1486, 1488, 1492,
- 1494, 1500, 1505, 1510, 1520, 1522, 1524, 1528,
- 1532, 1534, 1547, 1549, 1553, 1557, 1565, 1567,
- 1571, 1573, 1574, 1577, 1582, 1584, 1586, 1590,
- 1592, 1596, 1602, 1622, 1628, 1634, 1636, 1637,
- 1647, 1648, 1656, 1663, 1665, 1668, 1670, 1672,
- 1674, 1679, 1683, 1687, 1692, 1702, 1712, 1716,
- 1720, 1734, 1760, 1770, 1772, 1774, 1777, 1779,
- 1782, 1784, 1788, 1790, 1791, 1795, 1797, 1800,
- 1807, 1815, 1817, 1819, 1823, 1825, 1831, 1842,
- 1845, 1847, 1851, 1856, 1886, 1891, 1893, 1896,
- 1901, 1915, 1922, 1936, 1941, 1954, 1958, 1971,
- 1976, 1994, 1995, 2004, 2008, 2020, 2025, 2032,
- 2039, 2046, 2048, 2052, 2074, 2079, 2080, 2084,
- 2086, 2136, 2139, 2150, 2154, 2156, 2162, 2168,
- 2170, 2175, 2177, 2181, 2183, 2184, 2186, 2188,
- 2194, 2196, 2198, 2202, 2208, 2221, 2223, 2229,
- 2233, 2241, 2252, 2260, 2263, 2293, 2299, 2302,
- 2307, 2309, 2313, 2317, 2321, 2323, 2330, 2332,
- 2341, 2348, 2356, 2358, 2378, 2390, 2394, 2396,
- 2414, 2453, 2455, 2459, 2461, 2468, 2472, 2500,
- 2502, 2504, 2506, 2508, 2511, 2513, 2517, 2521,
- 2523, 2526, 2528, 2530, 2533, 2535, 2537, 2538,
- 2540, 2542, 2546, 2550, 2553, 2566, 2568, 2574,
- 2578, 2580, 2584, 2588, 2602, 2605, 2614, 2616,
- 2620, 2626, 2626, 2628, 2630, 2639, 2645, 2652,
- 2653, 2656, 2657, 2661, 2666, 2675, 2679, 2683,
- 2691, 2693, 2695, 2697, 2700, 2732, 2734, 2736,
- 2740, 2744, 2747, 2758, 2771, 2790, 2803, 2819,
- 2831, 2847, 2862, 2883, 2893, 2905, 2916, 2930,
- 2945, 2955, 2967, 2976, 2988, 2990, 2994, 3015,
- 3024, 3034, 3040, 3046, 3047, 3096, 3098, 3102,
- 3104, 3110, 3117, 3125, 3132, 3135, 3141, 3145,
- 3149, 3151, 3155, 3159, 3163, 3169, 3177, 3185,
- 3191, 3193, 3197, 3199, 3205, 3209, 3213, 3217,
- 3221, 3226, 3233, 3239, 3241, 3243, 3247, 3249,
- 3255, 3259, 3263, 3273, 3278, 3292, 3307, 3309,
- 3317, 3319, 3324, 3338, 3343, 3345, 3349, 3350,
- 3354, 3360, 3366, 3376, 3386, 3397, 3405, 3408,
- 3411, 3415, 3419, 3421, 3424, 3424, 3427, 3429,
- 3459, 3461, 3463, 3467, 3472, 3476, 3481, 3483,
- 3485, 3487, 3496, 3500, 3504, 3510, 3512, 3520,
- 3528, 3540, 3543, 3549, 3553, 3555, 3559, 3579,
- 3581, 3583, 3594, 3600, 3602, 3604, 3606, 3610,
- 3616, 3622, 3624, 3629, 3633, 3635, 3643, 3661,
- 3701, 3711, 3715, 3717, 3719, 3720, 3724, 3728,
- 3732, 3736, 3740, 3745, 3749, 3753, 3757, 3759,
- 3761, 3765, 3775, 3779, 3781, 3785, 3789, 3793,
- 3806, 3808, 3810, 3814, 3816, 3820, 3822, 3824,
- 3854, 3858, 3862, 3866, 3869, 3876, 3881, 3892,
- 3896, 3912, 3926, 3930, 3935, 3939, 3943, 3949,
- 3951, 3957, 3959, 3963, 3965, 3971, 3976, 3981,
- 3991, 3993, 3995, 3999, 4003, 4005, 4018, 4020,
- 4024, 4028, 4036, 4038, 4042, 4044, 4045, 4048,
- 4053, 4055, 4057, 4061, 4063, 4067, 4073, 4093,
- 4099, 4105, 4107, 4108, 4118, 4119, 4127, 4134,
- 4136, 4139, 4141, 4143, 4145, 4150, 4154, 4158,
- 4163, 4173, 4183, 4187, 4191, 4205, 4231, 4241,
- 4243, 4245, 4248, 4250, 4253, 4255, 4259, 4261,
- 4262, 4266, 4268, 4270, 4277, 4281, 4288, 4295,
- 4304, 4320, 4332, 4350, 4361, 4373, 4381, 4399,
- 4407, 4437, 4440, 4450, 4460, 4472, 4483, 4492,
- 4505, 4517, 4521, 4527, 4554, 4563, 4566, 4571,
- 4577, 4582, 4603, 4607, 4613, 4613, 4620, 4629,
- 4637, 4640, 4644, 4650, 4656, 4659, 4663, 4670,
- 4676, 4685, 4694, 4698, 4702, 4706, 4710, 4717,
- 4721, 4725, 4735, 4741, 4745, 4751, 4755, 4758,
- 4764, 4770, 4782, 4786, 4790, 4800, 4804, 4815,
- 4817, 4819, 4823, 4835, 4840, 4864, 4868, 4874,
- 4896, 4905, 4909, 4912, 4913, 4921, 4929, 4935,
- 4945, 4952, 4970, 4973, 4976, 4984, 4990, 4994,
- 4998, 5002, 5008, 5016, 5021, 5027, 5031, 5039,
- 5046, 5050, 5057, 5063, 5071, 5079, 5085, 5091,
- 5102, 5106, 5118, 5127, 5144, 5161, 5164, 5168,
- 5170, 5176, 5178, 5182, 5197, 5201, 5205, 5209,
- 5213, 5217, 5219, 5225, 5230, 5234, 5240, 5247,
- 5250, 5268, 5270, 5315, 5321, 5327, 5331, 5335,
- 5341, 5345, 5351, 5357, 5364, 5366, 5372, 5378,
- 5382, 5386, 5394, 5407, 5413, 5420, 5428, 5434,
- 5443, 5449, 5453, 5458, 5462, 5470, 5474, 5478,
- 5508, 5514, 5520, 5526, 5532, 5539, 5545, 5552,
- 5557, 5567, 5571, 5578, 5584, 5588, 5595, 5599,
- 5605, 5608, 5612, 5616, 5620, 5624, 5629, 5634,
- 5638, 5649, 5653, 5657, 5663, 5671, 5675, 5692,
- 5696, 5702, 5712, 5718, 5724, 5727, 5732, 5741,
- 5745, 5749, 5755, 5759, 5765, 5773, 5791, 5792,
- 5802, 5803, 5812, 5820, 5822, 5825, 5827, 5829,
- 5831, 5836, 5849, 5853, 5868, 5897, 5908, 5910,
- 5914, 5918, 5923, 5927, 5929, 5936, 5940, 5948,
- 5952, 5964, 5966, 5968, 5970, 5972, 5974, 5975,
- 5977, 5979, 5981, 5983, 5985, 5986, 5988, 5990,
- 5992, 5994, 5996, 6000, 6006, 6006, 6008, 6010,
- 6019, 6025, 6032, 6033, 6036, 6037, 6041, 6046,
- 6055, 6059, 6063, 6071, 6073, 6075, 6077, 6080,
- 6112, 6114, 6116, 6120, 6124, 6127, 6138, 6151,
- 6170, 6183, 6199, 6211, 6227, 6242, 6263, 6273,
- 6285, 6296, 6310, 6325, 6335, 6347, 6356, 6368,
- 6370, 6374, 6395, 6404, 6414, 6420, 6426, 6427,
- 6476, 6478, 6482, 6484, 6490, 6497, 6505, 6512,
- 6515, 6521, 6525, 6529, 6531, 6535, 6539, 6543,
- 6549, 6557, 6565, 6571, 6573, 6577, 6579, 6585,
- 6589, 6593, 6597, 6601, 6606, 6613, 6619, 6621,
- 6623, 6627, 6629, 6635, 6639, 6643, 6653, 6658,
- 6672, 6687, 6689, 6697, 6699, 6704, 6718, 6723,
- 6725, 6729, 6730, 6734, 6740, 6746, 6756, 6766,
- 6777, 6785, 6788, 6791, 6795, 6799, 6801, 6804,
- 6804, 6807, 6809, 6839, 6841, 6843, 6847, 6852,
- 6856, 6861, 6863, 6865, 6867, 6876, 6880, 6884,
- 6890, 6892, 6900, 6908, 6920, 6923, 6929, 6933,
- 6935, 6939, 6959, 6961, 6963, 6974, 6980, 6982,
- 6984, 6986, 6990, 6996, 7002, 7004, 7009, 7013,
- 7015, 7023, 7041, 7081, 7091, 7095, 7097, 7099,
- 7100, 7104, 7108, 7112, 7116, 7120, 7125, 7129,
- 7133, 7137, 7139, 7141, 7145, 7155, 7159, 7161,
- 7165, 7169, 7173, 7186, 7188, 7190, 7194, 7196,
- 7200, 7202, 7204, 7234, 7238, 7242, 7246, 7249,
- 7256, 7261, 7272, 7276, 7292, 7306, 7310, 7315,
- 7319, 7323, 7329, 7331, 7337, 7339, 7343, 7345,
- 7351, 7356, 7361, 7371, 7373, 7375, 7379, 7383,
- 7385, 7398, 7400, 7404, 7408, 7416, 7418, 7422,
- 7424, 7425, 7428, 7433, 7435, 7437, 7441, 7443,
- 7447, 7453, 7473, 7479, 7485, 7487, 7488, 7498,
- 7499, 7507, 7514, 7516, 7519, 7521, 7523, 7525,
- 7530, 7534, 7538, 7543, 7553, 7563, 7567, 7571,
- 7585, 7611, 7621, 7623, 7625, 7628, 7630, 7633,
- 7635, 7639, 7641, 7642, 7646, 7648, 7650, 7657,
- 7661, 7668, 7675, 7684, 7700, 7712, 7730, 7741,
- 7753, 7761, 7779, 7787, 7817, 7820, 7830, 7840,
- 7852, 7863, 7872, 7885, 7897, 7901, 7907, 7934,
- 7943, 7946, 7951, 7957, 7962, 7983, 7987, 7993,
- 7993, 8000, 8009, 8017, 8020, 8024, 8030, 8036,
- 8039, 8043, 8050, 8056, 8065, 8074, 8078, 8082,
- 8086, 8090, 8097, 8101, 8105, 8115, 8121, 8125,
- 8131, 8135, 8138, 8144, 8150, 8162, 8166, 8170,
- 8180, 8184, 8195, 8197, 8199, 8203, 8215, 8220,
- 8244, 8248, 8254, 8276, 8285, 8289, 8292, 8293,
- 8301, 8309, 8315, 8325, 8332, 8350, 8353, 8356,
- 8364, 8370, 8374, 8378, 8382, 8388, 8396, 8401,
- 8407, 8411, 8419, 8426, 8430, 8437, 8443, 8451,
- 8459, 8465, 8471, 8482, 8486, 8498, 8507, 8524,
- 8541, 8544, 8548, 8550, 8556, 8558, 8562, 8577,
- 8581, 8585, 8589, 8593, 8597, 8599, 8605, 8610,
- 8614, 8620, 8627, 8630, 8648, 8650, 8695, 8701,
- 8707, 8711, 8715, 8721, 8725, 8731, 8737, 8744,
- 8746, 8752, 8758, 8762, 8766, 8774, 8787, 8793,
- 8800, 8808, 8814, 8823, 8829, 8833, 8838, 8842,
- 8850, 8854, 8858, 8888, 8894, 8900, 8906, 8912,
- 8919, 8925, 8932, 8937, 8947, 8951, 8958, 8964,
- 8968, 8975, 8979, 8985, 8988, 8992, 8996, 9000,
- 9004, 9009, 9014, 9018, 9029, 9033, 9037, 9043,
- 9051, 9055, 9072, 9076, 9082, 9092, 9098, 9104,
- 9107, 9112, 9121, 9125, 9129, 9135, 9139, 9145,
- 9153, 9171, 9172, 9182, 9183, 9192, 9200, 9202,
- 9205, 9207, 9209, 9211, 9216, 9229, 9233, 9248,
- 9277, 9288, 9290, 9294, 9298, 9303, 9307, 9309,
- 9316, 9320, 9328, 9332, 9408, 9410, 9411, 9412,
- 9413, 9414, 9415, 9417, 9422, 9423, 9425, 9427,
- 9428, 9472, 9473, 9474, 9476, 9481, 9485, 9485,
- 9487, 9489, 9500, 9510, 9518, 9519, 9521, 9522,
- 9526, 9530, 9540, 9544, 9551, 9562, 9569, 9573,
- 9579, 9590, 9622, 9671, 9686, 9701, 9706, 9708,
- 9713, 9745, 9753, 9755, 9777, 9799, 9801, 9817,
- 9833, 9835, 9837, 9837, 9838, 9839, 9840, 9842,
- 9843, 9855, 9857, 9859, 9861, 9875, 9889, 9891,
- 9894, 9897, 9899, 9900, 9901, 9903, 9905, 9907,
- 9921, 9935, 9937, 9940, 9943, 9945, 9946, 9947,
- 9949, 9951, 9953, 10002, 10046, 10048, 10053, 10057,
- 10057, 10059, 10061, 10072, 10082, 10090, 10091, 10093,
- 10094, 10098, 10102, 10112, 10116, 10123, 10134, 10141,
- 10145, 10151, 10162, 10194, 10243, 10258, 10273, 10278,
- 10280, 10285, 10317, 10325, 10327, 10349, 10371,
-}
-
-var _hcltok_trans_keys []byte = []byte{
- 46, 42, 42, 47, 46, 69, 101, 48,
- 57, 43, 45, 48, 57, 48, 57, 45,
- 95, 194, 195, 198, 199, 203, 205, 206,
- 207, 210, 212, 213, 214, 215, 216, 217,
- 219, 220, 221, 222, 223, 224, 225, 226,
- 227, 228, 233, 234, 237, 239, 240, 65,
- 90, 97, 122, 196, 202, 208, 218, 229,
- 236, 95, 194, 195, 198, 199, 203, 205,
- 206, 207, 210, 212, 213, 214, 215, 216,
- 217, 219, 220, 221, 222, 223, 224, 225,
- 226, 227, 228, 233, 234, 237, 239, 240,
- 65, 90, 97, 122, 196, 202, 208, 218,
- 229, 236, 10, 13, 45, 95, 194, 195,
- 198, 199, 203, 204, 205, 206, 207, 210,
- 212, 213, 214, 215, 216, 217, 219, 220,
- 221, 222, 223, 224, 225, 226, 227, 228,
- 233, 234, 237, 239, 240, 243, 48, 57,
- 65, 90, 97, 122, 196, 218, 229, 236,
- 10, 170, 181, 183, 186, 128, 150, 152,
- 182, 184, 255, 192, 255, 0, 127, 173,
- 130, 133, 146, 159, 165, 171, 175, 255,
- 181, 190, 184, 185, 192, 255, 140, 134,
- 138, 142, 161, 163, 255, 182, 130, 136,
- 137, 176, 151, 152, 154, 160, 190, 136,
- 144, 192, 255, 135, 129, 130, 132, 133,
- 144, 170, 176, 178, 144, 154, 160, 191,
- 128, 169, 174, 255, 148, 169, 157, 158,
- 189, 190, 192, 255, 144, 255, 139, 140,
- 178, 255, 186, 128, 181, 160, 161, 162,
- 163, 164, 165, 166, 167, 168, 169, 170,
- 171, 172, 173, 174, 175, 176, 177, 178,
- 179, 180, 181, 182, 183, 184, 185, 186,
- 187, 188, 189, 190, 191, 128, 173, 128,
- 155, 160, 180, 182, 189, 148, 161, 163,
- 255, 176, 164, 165, 132, 169, 177, 141,
- 142, 145, 146, 179, 181, 186, 187, 158,
- 133, 134, 137, 138, 143, 150, 152, 155,
- 164, 165, 178, 255, 188, 129, 131, 133,
- 138, 143, 144, 147, 168, 170, 176, 178,
- 179, 181, 182, 184, 185, 190, 255, 157,
- 131, 134, 137, 138, 142, 144, 146, 152,
- 159, 165, 182, 255, 129, 131, 133, 141,
- 143, 145, 147, 168, 170, 176, 178, 179,
- 181, 185, 188, 255, 134, 138, 142, 143,
- 145, 159, 164, 165, 176, 184, 186, 255,
- 129, 131, 133, 140, 143, 144, 147, 168,
- 170, 176, 178, 179, 181, 185, 188, 191,
- 177, 128, 132, 135, 136, 139, 141, 150,
- 151, 156, 157, 159, 163, 166, 175, 156,
- 130, 131, 133, 138, 142, 144, 146, 149,
- 153, 154, 158, 159, 163, 164, 168, 170,
- 174, 185, 190, 191, 144, 151, 128, 130,
- 134, 136, 138, 141, 166, 175, 128, 131,
- 133, 140, 142, 144, 146, 168, 170, 185,
- 189, 255, 133, 137, 151, 142, 148, 155,
- 159, 164, 165, 176, 255, 128, 131, 133,
- 140, 142, 144, 146, 168, 170, 179, 181,
- 185, 188, 191, 158, 128, 132, 134, 136,
- 138, 141, 149, 150, 160, 163, 166, 175,
- 177, 178, 129, 131, 133, 140, 142, 144,
- 146, 186, 189, 255, 133, 137, 143, 147,
- 152, 158, 164, 165, 176, 185, 192, 255,
- 189, 130, 131, 133, 150, 154, 177, 179,
- 187, 138, 150, 128, 134, 143, 148, 152,
- 159, 166, 175, 178, 179, 129, 186, 128,
- 142, 144, 153, 132, 138, 141, 165, 167,
- 129, 130, 135, 136, 148, 151, 153, 159,
- 161, 163, 170, 171, 173, 185, 187, 189,
- 134, 128, 132, 136, 141, 144, 153, 156,
- 159, 128, 181, 183, 185, 152, 153, 160,
- 169, 190, 191, 128, 135, 137, 172, 177,
- 191, 128, 132, 134, 151, 153, 188, 134,
- 128, 129, 130, 131, 137, 138, 139, 140,
- 141, 142, 143, 144, 153, 154, 155, 156,
- 157, 158, 159, 160, 161, 162, 163, 164,
- 165, 166, 167, 168, 169, 170, 173, 175,
- 176, 177, 178, 179, 181, 182, 183, 188,
- 189, 190, 191, 132, 152, 172, 184, 185,
- 187, 128, 191, 128, 137, 144, 255, 158,
- 159, 134, 187, 136, 140, 142, 143, 137,
- 151, 153, 142, 143, 158, 159, 137, 177,
- 142, 143, 182, 183, 191, 255, 128, 130,
- 133, 136, 150, 152, 255, 145, 150, 151,
- 155, 156, 160, 168, 178, 255, 128, 143,
- 160, 255, 182, 183, 190, 255, 129, 255,
- 173, 174, 192, 255, 129, 154, 160, 255,
- 171, 173, 185, 255, 128, 140, 142, 148,
- 160, 180, 128, 147, 160, 172, 174, 176,
- 178, 179, 148, 150, 152, 155, 158, 159,
- 170, 255, 139, 141, 144, 153, 160, 255,
- 184, 255, 128, 170, 176, 255, 182, 255,
- 128, 158, 160, 171, 176, 187, 134, 173,
- 176, 180, 128, 171, 176, 255, 138, 143,
- 155, 255, 128, 155, 160, 255, 159, 189,
- 190, 192, 255, 167, 128, 137, 144, 153,
- 176, 189, 140, 143, 154, 170, 180, 255,
- 180, 255, 128, 183, 128, 137, 141, 189,
- 128, 136, 144, 146, 148, 182, 184, 185,
- 128, 181, 187, 191, 150, 151, 158, 159,
- 152, 154, 156, 158, 134, 135, 142, 143,
- 190, 255, 190, 128, 180, 182, 188, 130,
- 132, 134, 140, 144, 147, 150, 155, 160,
- 172, 178, 180, 182, 188, 128, 129, 130,
- 131, 132, 133, 134, 176, 177, 178, 179,
- 180, 181, 182, 183, 191, 255, 129, 147,
- 149, 176, 178, 190, 192, 255, 144, 156,
- 161, 144, 156, 165, 176, 130, 135, 149,
- 164, 166, 168, 138, 147, 152, 157, 170,
- 185, 188, 191, 142, 133, 137, 160, 255,
- 137, 255, 128, 174, 176, 255, 159, 165,
- 170, 180, 255, 167, 173, 128, 165, 176,
- 255, 168, 174, 176, 190, 192, 255, 128,
- 150, 160, 166, 168, 174, 176, 182, 184,
- 190, 128, 134, 136, 142, 144, 150, 152,
- 158, 160, 191, 128, 129, 130, 131, 132,
- 133, 134, 135, 144, 145, 255, 133, 135,
- 161, 175, 177, 181, 184, 188, 160, 151,
- 152, 187, 192, 255, 133, 173, 177, 255,
- 143, 159, 187, 255, 176, 191, 182, 183,
- 184, 191, 192, 255, 150, 255, 128, 146,
- 147, 148, 152, 153, 154, 155, 156, 158,
- 159, 160, 161, 162, 163, 164, 165, 166,
- 167, 168, 169, 170, 171, 172, 173, 174,
- 175, 176, 129, 255, 141, 255, 144, 189,
- 141, 143, 172, 255, 191, 128, 175, 180,
- 189, 151, 159, 162, 255, 175, 137, 138,
- 184, 255, 183, 255, 168, 255, 128, 179,
- 188, 134, 143, 154, 159, 184, 186, 190,
- 255, 128, 173, 176, 255, 148, 159, 189,
- 255, 129, 142, 154, 159, 191, 255, 128,
- 182, 128, 141, 144, 153, 160, 182, 186,
- 255, 128, 130, 155, 157, 160, 175, 178,
- 182, 129, 134, 137, 142, 145, 150, 160,
- 166, 168, 174, 176, 255, 155, 166, 175,
- 128, 170, 172, 173, 176, 185, 158, 159,
- 160, 255, 164, 175, 135, 138, 188, 255,
- 164, 169, 171, 172, 173, 174, 175, 180,
- 181, 182, 183, 184, 185, 187, 188, 189,
- 190, 191, 165, 186, 174, 175, 154, 255,
- 190, 128, 134, 147, 151, 157, 168, 170,
- 182, 184, 188, 128, 129, 131, 132, 134,
- 255, 147, 255, 190, 255, 144, 145, 136,
- 175, 188, 255, 128, 143, 160, 175, 179,
- 180, 141, 143, 176, 180, 182, 255, 189,
- 255, 191, 144, 153, 161, 186, 129, 154,
- 166, 255, 191, 255, 130, 135, 138, 143,
- 146, 151, 154, 156, 144, 145, 146, 147,
- 148, 150, 151, 152, 155, 157, 158, 160,
- 170, 171, 172, 175, 161, 169, 128, 129,
- 130, 131, 133, 135, 138, 139, 140, 141,
- 142, 143, 144, 145, 146, 147, 148, 149,
- 152, 156, 157, 160, 161, 162, 163, 164,
- 166, 168, 169, 170, 171, 172, 173, 174,
- 176, 177, 153, 155, 178, 179, 128, 139,
- 141, 166, 168, 186, 188, 189, 191, 255,
- 142, 143, 158, 255, 187, 255, 128, 180,
- 189, 128, 156, 160, 255, 145, 159, 161,
- 255, 128, 159, 176, 255, 139, 143, 187,
- 255, 128, 157, 160, 255, 144, 132, 135,
- 150, 255, 158, 159, 170, 175, 148, 151,
- 188, 255, 128, 167, 176, 255, 164, 255,
- 183, 255, 128, 149, 160, 167, 136, 188,
- 128, 133, 138, 181, 183, 184, 191, 255,
- 150, 159, 183, 255, 128, 158, 160, 178,
- 180, 181, 128, 149, 160, 185, 128, 183,
- 190, 191, 191, 128, 131, 133, 134, 140,
- 147, 149, 151, 153, 179, 184, 186, 160,
- 188, 128, 156, 128, 135, 137, 166, 128,
- 181, 128, 149, 160, 178, 128, 145, 128,
- 178, 129, 130, 131, 132, 133, 135, 136,
- 138, 139, 140, 141, 144, 145, 146, 147,
- 150, 151, 152, 153, 154, 155, 156, 162,
- 163, 171, 176, 177, 178, 128, 134, 135,
- 165, 176, 190, 144, 168, 176, 185, 128,
- 180, 182, 191, 182, 144, 179, 155, 133,
- 137, 141, 143, 157, 255, 190, 128, 145,
- 147, 183, 136, 128, 134, 138, 141, 143,
- 157, 159, 168, 176, 255, 171, 175, 186,
- 255, 128, 131, 133, 140, 143, 144, 147,
- 168, 170, 176, 178, 179, 181, 185, 188,
- 191, 144, 151, 128, 132, 135, 136, 139,
- 141, 157, 163, 166, 172, 176, 180, 128,
- 138, 144, 153, 134, 136, 143, 154, 255,
- 128, 181, 184, 255, 129, 151, 158, 255,
- 129, 131, 133, 143, 154, 255, 128, 137,
- 128, 153, 157, 171, 176, 185, 160, 255,
- 170, 190, 192, 255, 128, 184, 128, 136,
- 138, 182, 184, 191, 128, 144, 153, 178,
- 255, 168, 144, 145, 183, 255, 128, 142,
- 145, 149, 129, 141, 144, 146, 147, 148,
- 175, 255, 132, 255, 128, 144, 129, 143,
- 144, 153, 145, 152, 135, 255, 160, 168,
- 169, 171, 172, 173, 174, 188, 189, 190,
- 191, 161, 167, 185, 255, 128, 158, 160,
- 169, 144, 173, 176, 180, 128, 131, 144,
- 153, 163, 183, 189, 255, 144, 255, 133,
- 143, 191, 255, 143, 159, 160, 128, 129,
- 255, 159, 160, 171, 172, 255, 173, 255,
- 179, 255, 128, 176, 177, 178, 128, 129,
- 171, 175, 189, 255, 128, 136, 144, 153,
- 157, 158, 133, 134, 137, 144, 145, 146,
- 147, 148, 149, 154, 155, 156, 157, 158,
- 159, 168, 169, 170, 150, 153, 165, 169,
- 173, 178, 187, 255, 131, 132, 140, 169,
- 174, 255, 130, 132, 149, 157, 173, 186,
- 188, 160, 161, 163, 164, 167, 168, 132,
- 134, 149, 157, 186, 139, 140, 191, 255,
- 134, 128, 132, 138, 144, 146, 255, 166,
- 167, 129, 155, 187, 149, 181, 143, 175,
- 137, 169, 131, 140, 141, 192, 255, 128,
- 182, 187, 255, 173, 180, 182, 255, 132,
- 155, 159, 161, 175, 128, 160, 163, 164,
- 165, 184, 185, 186, 161, 162, 128, 134,
- 136, 152, 155, 161, 163, 164, 166, 170,
- 133, 143, 151, 255, 139, 143, 154, 255,
- 164, 167, 185, 187, 128, 131, 133, 159,
- 161, 162, 169, 178, 180, 183, 130, 135,
- 137, 139, 148, 151, 153, 155, 157, 159,
- 164, 190, 141, 143, 145, 146, 161, 162,
- 167, 170, 172, 178, 180, 183, 185, 188,
- 128, 137, 139, 155, 161, 163, 165, 169,
- 171, 187, 155, 156, 151, 255, 156, 157,
- 160, 181, 255, 186, 187, 255, 162, 255,
- 160, 168, 161, 167, 158, 255, 160, 132,
- 135, 133, 134, 176, 255, 170, 181, 186,
- 191, 176, 180, 182, 183, 186, 189, 134,
- 140, 136, 138, 142, 161, 163, 255, 130,
- 137, 136, 255, 144, 170, 176, 178, 160,
- 191, 128, 138, 174, 175, 177, 255, 148,
- 150, 164, 167, 173, 176, 185, 189, 190,
- 192, 255, 144, 146, 175, 141, 255, 166,
- 176, 178, 255, 186, 138, 170, 180, 181,
- 160, 161, 162, 164, 165, 166, 167, 168,
- 169, 170, 171, 172, 173, 174, 175, 176,
- 177, 178, 179, 180, 181, 182, 184, 186,
- 187, 188, 189, 190, 183, 185, 154, 164,
- 168, 128, 149, 128, 152, 189, 132, 185,
- 144, 152, 161, 177, 255, 169, 177, 129,
- 132, 141, 142, 145, 146, 179, 181, 186,
- 188, 190, 255, 142, 156, 157, 159, 161,
- 176, 177, 133, 138, 143, 144, 147, 168,
- 170, 176, 178, 179, 181, 182, 184, 185,
- 158, 153, 156, 178, 180, 189, 133, 141,
- 143, 145, 147, 168, 170, 176, 178, 179,
- 181, 185, 144, 185, 160, 161, 189, 133,
- 140, 143, 144, 147, 168, 170, 176, 178,
- 179, 181, 185, 177, 156, 157, 159, 161,
- 131, 156, 133, 138, 142, 144, 146, 149,
- 153, 154, 158, 159, 163, 164, 168, 170,
- 174, 185, 144, 189, 133, 140, 142, 144,
- 146, 168, 170, 185, 152, 154, 160, 161,
- 128, 189, 133, 140, 142, 144, 146, 168,
- 170, 179, 181, 185, 158, 160, 161, 177,
- 178, 189, 133, 140, 142, 144, 146, 186,
- 142, 148, 150, 159, 161, 186, 191, 189,
- 133, 150, 154, 177, 179, 187, 128, 134,
- 129, 176, 178, 179, 132, 138, 141, 165,
- 167, 189, 129, 130, 135, 136, 148, 151,
- 153, 159, 161, 163, 170, 171, 173, 176,
- 178, 179, 134, 128, 132, 156, 159, 128,
- 128, 135, 137, 172, 136, 140, 128, 129,
- 130, 131, 137, 138, 139, 140, 141, 142,
- 143, 144, 153, 154, 155, 156, 157, 158,
- 159, 160, 161, 162, 163, 164, 165, 166,
- 167, 168, 169, 170, 172, 173, 174, 175,
- 176, 177, 178, 179, 180, 181, 182, 184,
- 188, 189, 190, 191, 132, 152, 185, 187,
- 191, 128, 170, 161, 144, 149, 154, 157,
- 165, 166, 174, 176, 181, 255, 130, 141,
- 143, 159, 155, 255, 128, 140, 142, 145,
- 160, 177, 128, 145, 160, 172, 174, 176,
- 151, 156, 170, 128, 168, 176, 255, 138,
- 255, 128, 150, 160, 255, 149, 255, 167,
- 133, 179, 133, 139, 131, 160, 174, 175,
- 186, 255, 166, 255, 128, 163, 141, 143,
- 154, 189, 169, 172, 174, 177, 181, 182,
- 129, 130, 132, 133, 134, 176, 177, 178,
- 179, 180, 181, 182, 183, 177, 191, 165,
- 170, 175, 177, 180, 255, 168, 174, 176,
- 255, 128, 134, 136, 142, 144, 150, 152,
- 158, 128, 129, 130, 131, 132, 133, 134,
- 135, 144, 145, 255, 133, 135, 161, 169,
- 177, 181, 184, 188, 160, 151, 154, 128,
- 146, 147, 148, 152, 153, 154, 155, 156,
- 158, 159, 160, 161, 162, 163, 164, 165,
- 166, 167, 168, 169, 170, 171, 172, 173,
- 174, 175, 176, 129, 255, 141, 143, 160,
- 169, 172, 255, 191, 128, 174, 130, 134,
- 139, 163, 255, 130, 179, 187, 189, 178,
- 183, 138, 165, 176, 255, 135, 159, 189,
- 255, 132, 178, 143, 160, 164, 166, 175,
- 186, 190, 128, 168, 186, 128, 130, 132,
- 139, 160, 182, 190, 255, 176, 178, 180,
- 183, 184, 190, 255, 128, 130, 155, 157,
- 160, 170, 178, 180, 128, 162, 164, 169,
- 171, 172, 173, 174, 175, 180, 181, 182,
- 183, 185, 186, 187, 188, 189, 190, 191,
- 165, 179, 157, 190, 128, 134, 147, 151,
- 159, 168, 170, 182, 184, 188, 176, 180,
- 182, 255, 161, 186, 144, 145, 146, 147,
- 148, 150, 151, 152, 155, 157, 158, 160,
- 170, 171, 172, 175, 161, 169, 128, 129,
- 130, 131, 133, 138, 139, 140, 141, 142,
- 143, 144, 145, 146, 147, 148, 149, 152,
- 156, 157, 160, 161, 162, 163, 164, 166,
- 168, 169, 170, 171, 172, 173, 174, 176,
- 177, 153, 155, 178, 179, 145, 255, 139,
- 143, 182, 255, 158, 175, 128, 144, 147,
- 149, 151, 153, 179, 128, 135, 137, 164,
- 128, 130, 131, 132, 133, 134, 135, 136,
- 138, 139, 140, 141, 144, 145, 146, 147,
- 150, 151, 152, 153, 154, 156, 162, 163,
- 171, 176, 177, 178, 131, 183, 131, 175,
- 144, 168, 131, 166, 182, 144, 178, 131,
- 178, 154, 156, 129, 132, 128, 145, 147,
- 171, 159, 255, 144, 157, 161, 135, 138,
- 128, 175, 135, 132, 133, 128, 174, 152,
- 155, 132, 128, 170, 128, 153, 160, 190,
- 192, 255, 128, 136, 138, 174, 128, 178,
- 255, 160, 168, 169, 171, 172, 173, 174,
- 188, 189, 190, 191, 161, 167, 144, 173,
- 128, 131, 163, 183, 189, 255, 133, 143,
- 145, 255, 147, 159, 128, 176, 177, 178,
- 128, 136, 144, 153, 144, 145, 146, 147,
- 148, 149, 154, 155, 156, 157, 158, 159,
- 150, 153, 131, 140, 255, 160, 163, 164,
- 165, 184, 185, 186, 161, 162, 133, 255,
- 170, 181, 183, 186, 128, 150, 152, 182,
- 184, 255, 192, 255, 0, 127, 173, 130,
- 133, 146, 159, 165, 171, 175, 255, 181,
- 190, 184, 185, 192, 255, 140, 134, 138,
- 142, 161, 163, 255, 182, 130, 136, 137,
- 176, 151, 152, 154, 160, 190, 136, 144,
- 192, 255, 135, 129, 130, 132, 133, 144,
- 170, 176, 178, 144, 154, 160, 191, 128,
- 169, 174, 255, 148, 169, 157, 158, 189,
- 190, 192, 255, 144, 255, 139, 140, 178,
- 255, 186, 128, 181, 160, 161, 162, 163,
- 164, 165, 166, 167, 168, 169, 170, 171,
- 172, 173, 174, 175, 176, 177, 178, 179,
- 180, 181, 182, 183, 184, 185, 186, 187,
- 188, 189, 190, 191, 128, 173, 128, 155,
- 160, 180, 182, 189, 148, 161, 163, 255,
- 176, 164, 165, 132, 169, 177, 141, 142,
- 145, 146, 179, 181, 186, 187, 158, 133,
- 134, 137, 138, 143, 150, 152, 155, 164,
- 165, 178, 255, 188, 129, 131, 133, 138,
- 143, 144, 147, 168, 170, 176, 178, 179,
- 181, 182, 184, 185, 190, 255, 157, 131,
- 134, 137, 138, 142, 144, 146, 152, 159,
- 165, 182, 255, 129, 131, 133, 141, 143,
- 145, 147, 168, 170, 176, 178, 179, 181,
- 185, 188, 255, 134, 138, 142, 143, 145,
- 159, 164, 165, 176, 184, 186, 255, 129,
- 131, 133, 140, 143, 144, 147, 168, 170,
- 176, 178, 179, 181, 185, 188, 191, 177,
- 128, 132, 135, 136, 139, 141, 150, 151,
- 156, 157, 159, 163, 166, 175, 156, 130,
- 131, 133, 138, 142, 144, 146, 149, 153,
- 154, 158, 159, 163, 164, 168, 170, 174,
- 185, 190, 191, 144, 151, 128, 130, 134,
- 136, 138, 141, 166, 175, 128, 131, 133,
- 140, 142, 144, 146, 168, 170, 185, 189,
- 255, 133, 137, 151, 142, 148, 155, 159,
- 164, 165, 176, 255, 128, 131, 133, 140,
- 142, 144, 146, 168, 170, 179, 181, 185,
- 188, 191, 158, 128, 132, 134, 136, 138,
- 141, 149, 150, 160, 163, 166, 175, 177,
- 178, 129, 131, 133, 140, 142, 144, 146,
- 186, 189, 255, 133, 137, 143, 147, 152,
- 158, 164, 165, 176, 185, 192, 255, 189,
- 130, 131, 133, 150, 154, 177, 179, 187,
- 138, 150, 128, 134, 143, 148, 152, 159,
- 166, 175, 178, 179, 129, 186, 128, 142,
- 144, 153, 132, 138, 141, 165, 167, 129,
- 130, 135, 136, 148, 151, 153, 159, 161,
- 163, 170, 171, 173, 185, 187, 189, 134,
- 128, 132, 136, 141, 144, 153, 156, 159,
- 128, 181, 183, 185, 152, 153, 160, 169,
- 190, 191, 128, 135, 137, 172, 177, 191,
- 128, 132, 134, 151, 153, 188, 134, 128,
- 129, 130, 131, 137, 138, 139, 140, 141,
- 142, 143, 144, 153, 154, 155, 156, 157,
- 158, 159, 160, 161, 162, 163, 164, 165,
- 166, 167, 168, 169, 170, 173, 175, 176,
- 177, 178, 179, 181, 182, 183, 188, 189,
- 190, 191, 132, 152, 172, 184, 185, 187,
- 128, 191, 128, 137, 144, 255, 158, 159,
- 134, 187, 136, 140, 142, 143, 137, 151,
- 153, 142, 143, 158, 159, 137, 177, 142,
- 143, 182, 183, 191, 255, 128, 130, 133,
- 136, 150, 152, 255, 145, 150, 151, 155,
- 156, 160, 168, 178, 255, 128, 143, 160,
- 255, 182, 183, 190, 255, 129, 255, 173,
- 174, 192, 255, 129, 154, 160, 255, 171,
- 173, 185, 255, 128, 140, 142, 148, 160,
- 180, 128, 147, 160, 172, 174, 176, 178,
- 179, 148, 150, 152, 155, 158, 159, 170,
- 255, 139, 141, 144, 153, 160, 255, 184,
- 255, 128, 170, 176, 255, 182, 255, 128,
- 158, 160, 171, 176, 187, 134, 173, 176,
- 180, 128, 171, 176, 255, 138, 143, 155,
- 255, 128, 155, 160, 255, 159, 189, 190,
- 192, 255, 167, 128, 137, 144, 153, 176,
- 189, 140, 143, 154, 170, 180, 255, 180,
- 255, 128, 183, 128, 137, 141, 189, 128,
- 136, 144, 146, 148, 182, 184, 185, 128,
- 181, 187, 191, 150, 151, 158, 159, 152,
- 154, 156, 158, 134, 135, 142, 143, 190,
- 255, 190, 128, 180, 182, 188, 130, 132,
- 134, 140, 144, 147, 150, 155, 160, 172,
- 178, 180, 182, 188, 128, 129, 130, 131,
- 132, 133, 134, 176, 177, 178, 179, 180,
- 181, 182, 183, 191, 255, 129, 147, 149,
- 176, 178, 190, 192, 255, 144, 156, 161,
- 144, 156, 165, 176, 130, 135, 149, 164,
- 166, 168, 138, 147, 152, 157, 170, 185,
- 188, 191, 142, 133, 137, 160, 255, 137,
- 255, 128, 174, 176, 255, 159, 165, 170,
- 180, 255, 167, 173, 128, 165, 176, 255,
- 168, 174, 176, 190, 192, 255, 128, 150,
- 160, 166, 168, 174, 176, 182, 184, 190,
- 128, 134, 136, 142, 144, 150, 152, 158,
- 160, 191, 128, 129, 130, 131, 132, 133,
- 134, 135, 144, 145, 255, 133, 135, 161,
- 175, 177, 181, 184, 188, 160, 151, 152,
- 187, 192, 255, 133, 173, 177, 255, 143,
- 159, 187, 255, 176, 191, 182, 183, 184,
- 191, 192, 255, 150, 255, 128, 146, 147,
- 148, 152, 153, 154, 155, 156, 158, 159,
- 160, 161, 162, 163, 164, 165, 166, 167,
- 168, 169, 170, 171, 172, 173, 174, 175,
- 176, 129, 255, 141, 255, 144, 189, 141,
- 143, 172, 255, 191, 128, 175, 180, 189,
- 151, 159, 162, 255, 175, 137, 138, 184,
- 255, 183, 255, 168, 255, 128, 179, 188,
- 134, 143, 154, 159, 184, 186, 190, 255,
- 128, 173, 176, 255, 148, 159, 189, 255,
- 129, 142, 154, 159, 191, 255, 128, 182,
- 128, 141, 144, 153, 160, 182, 186, 255,
- 128, 130, 155, 157, 160, 175, 178, 182,
- 129, 134, 137, 142, 145, 150, 160, 166,
- 168, 174, 176, 255, 155, 166, 175, 128,
- 170, 172, 173, 176, 185, 158, 159, 160,
- 255, 164, 175, 135, 138, 188, 255, 164,
- 169, 171, 172, 173, 174, 175, 180, 181,
- 182, 183, 184, 185, 187, 188, 189, 190,
- 191, 165, 186, 174, 175, 154, 255, 190,
- 128, 134, 147, 151, 157, 168, 170, 182,
- 184, 188, 128, 129, 131, 132, 134, 255,
- 147, 255, 190, 255, 144, 145, 136, 175,
- 188, 255, 128, 143, 160, 175, 179, 180,
- 141, 143, 176, 180, 182, 255, 189, 255,
- 191, 144, 153, 161, 186, 129, 154, 166,
- 255, 191, 255, 130, 135, 138, 143, 146,
- 151, 154, 156, 144, 145, 146, 147, 148,
- 150, 151, 152, 155, 157, 158, 160, 170,
- 171, 172, 175, 161, 169, 128, 129, 130,
- 131, 133, 135, 138, 139, 140, 141, 142,
- 143, 144, 145, 146, 147, 148, 149, 152,
- 156, 157, 160, 161, 162, 163, 164, 166,
- 168, 169, 170, 171, 172, 173, 174, 176,
- 177, 153, 155, 178, 179, 128, 139, 141,
- 166, 168, 186, 188, 189, 191, 255, 142,
- 143, 158, 255, 187, 255, 128, 180, 189,
- 128, 156, 160, 255, 145, 159, 161, 255,
- 128, 159, 176, 255, 139, 143, 187, 255,
- 128, 157, 160, 255, 144, 132, 135, 150,
- 255, 158, 159, 170, 175, 148, 151, 188,
- 255, 128, 167, 176, 255, 164, 255, 183,
- 255, 128, 149, 160, 167, 136, 188, 128,
- 133, 138, 181, 183, 184, 191, 255, 150,
- 159, 183, 255, 128, 158, 160, 178, 180,
- 181, 128, 149, 160, 185, 128, 183, 190,
- 191, 191, 128, 131, 133, 134, 140, 147,
- 149, 151, 153, 179, 184, 186, 160, 188,
- 128, 156, 128, 135, 137, 166, 128, 181,
- 128, 149, 160, 178, 128, 145, 128, 178,
- 129, 130, 131, 132, 133, 135, 136, 138,
- 139, 140, 141, 144, 145, 146, 147, 150,
- 151, 152, 153, 154, 155, 156, 162, 163,
- 171, 176, 177, 178, 128, 134, 135, 165,
- 176, 190, 144, 168, 176, 185, 128, 180,
- 182, 191, 182, 144, 179, 155, 133, 137,
- 141, 143, 157, 255, 190, 128, 145, 147,
- 183, 136, 128, 134, 138, 141, 143, 157,
- 159, 168, 176, 255, 171, 175, 186, 255,
- 128, 131, 133, 140, 143, 144, 147, 168,
- 170, 176, 178, 179, 181, 185, 188, 191,
- 144, 151, 128, 132, 135, 136, 139, 141,
- 157, 163, 166, 172, 176, 180, 128, 138,
- 144, 153, 134, 136, 143, 154, 255, 128,
- 181, 184, 255, 129, 151, 158, 255, 129,
- 131, 133, 143, 154, 255, 128, 137, 128,
- 153, 157, 171, 176, 185, 160, 255, 170,
- 190, 192, 255, 128, 184, 128, 136, 138,
- 182, 184, 191, 128, 144, 153, 178, 255,
- 168, 144, 145, 183, 255, 128, 142, 145,
- 149, 129, 141, 144, 146, 147, 148, 175,
- 255, 132, 255, 128, 144, 129, 143, 144,
- 153, 145, 152, 135, 255, 160, 168, 169,
- 171, 172, 173, 174, 188, 189, 190, 191,
- 161, 167, 185, 255, 128, 158, 160, 169,
- 144, 173, 176, 180, 128, 131, 144, 153,
- 163, 183, 189, 255, 144, 255, 133, 143,
- 191, 255, 143, 159, 160, 128, 129, 255,
- 159, 160, 171, 172, 255, 173, 255, 179,
- 255, 128, 176, 177, 178, 128, 129, 171,
- 175, 189, 255, 128, 136, 144, 153, 157,
- 158, 133, 134, 137, 144, 145, 146, 147,
- 148, 149, 154, 155, 156, 157, 158, 159,
- 168, 169, 170, 150, 153, 165, 169, 173,
- 178, 187, 255, 131, 132, 140, 169, 174,
- 255, 130, 132, 149, 157, 173, 186, 188,
- 160, 161, 163, 164, 167, 168, 132, 134,
- 149, 157, 186, 139, 140, 191, 255, 134,
- 128, 132, 138, 144, 146, 255, 166, 167,
- 129, 155, 187, 149, 181, 143, 175, 137,
- 169, 131, 140, 141, 192, 255, 128, 182,
- 187, 255, 173, 180, 182, 255, 132, 155,
- 159, 161, 175, 128, 160, 163, 164, 165,
- 184, 185, 186, 161, 162, 128, 134, 136,
- 152, 155, 161, 163, 164, 166, 170, 133,
- 143, 151, 255, 139, 143, 154, 255, 164,
- 167, 185, 187, 128, 131, 133, 159, 161,
- 162, 169, 178, 180, 183, 130, 135, 137,
- 139, 148, 151, 153, 155, 157, 159, 164,
- 190, 141, 143, 145, 146, 161, 162, 167,
- 170, 172, 178, 180, 183, 185, 188, 128,
- 137, 139, 155, 161, 163, 165, 169, 171,
- 187, 155, 156, 151, 255, 156, 157, 160,
- 181, 255, 186, 187, 255, 162, 255, 160,
- 168, 161, 167, 158, 255, 160, 132, 135,
- 133, 134, 176, 255, 128, 191, 154, 164,
- 168, 128, 149, 150, 191, 128, 152, 153,
- 191, 181, 128, 159, 160, 189, 190, 191,
- 189, 128, 131, 132, 185, 186, 191, 144,
- 128, 151, 152, 161, 162, 176, 177, 255,
- 169, 177, 129, 132, 141, 142, 145, 146,
- 179, 181, 186, 188, 190, 191, 192, 255,
- 142, 158, 128, 155, 156, 161, 162, 175,
- 176, 177, 178, 191, 169, 177, 180, 183,
- 128, 132, 133, 138, 139, 142, 143, 144,
- 145, 146, 147, 185, 186, 191, 157, 128,
- 152, 153, 158, 159, 177, 178, 180, 181,
- 191, 142, 146, 169, 177, 180, 189, 128,
- 132, 133, 185, 186, 191, 144, 185, 128,
- 159, 160, 161, 162, 191, 169, 177, 180,
- 189, 128, 132, 133, 140, 141, 142, 143,
- 144, 145, 146, 147, 185, 186, 191, 158,
- 177, 128, 155, 156, 161, 162, 191, 131,
- 145, 155, 157, 128, 132, 133, 138, 139,
- 141, 142, 149, 150, 152, 153, 159, 160,
- 162, 163, 164, 165, 167, 168, 170, 171,
- 173, 174, 185, 186, 191, 144, 128, 191,
- 141, 145, 169, 189, 128, 132, 133, 185,
- 186, 191, 128, 151, 152, 154, 155, 159,
- 160, 161, 162, 191, 128, 141, 145, 169,
- 180, 189, 129, 132, 133, 185, 186, 191,
- 158, 128, 159, 160, 161, 162, 176, 177,
- 178, 179, 191, 141, 145, 189, 128, 132,
- 133, 186, 187, 191, 142, 128, 147, 148,
- 150, 151, 158, 159, 161, 162, 185, 186,
- 191, 178, 188, 128, 132, 133, 150, 151,
- 153, 154, 189, 190, 191, 128, 134, 135,
- 191, 128, 177, 129, 179, 180, 191, 128,
- 131, 137, 141, 152, 160, 164, 166, 172,
- 177, 189, 129, 132, 133, 134, 135, 138,
- 139, 147, 148, 167, 168, 169, 170, 179,
- 180, 191, 133, 128, 134, 135, 155, 156,
- 159, 160, 191, 128, 129, 191, 136, 128,
- 172, 173, 191, 128, 135, 136, 140, 141,
- 191, 191, 128, 170, 171, 190, 161, 128,
- 143, 144, 149, 150, 153, 154, 157, 158,
- 164, 165, 166, 167, 173, 174, 176, 177,
- 180, 181, 255, 130, 141, 143, 159, 134,
- 187, 136, 140, 142, 143, 137, 151, 153,
- 142, 143, 158, 159, 137, 177, 191, 142,
- 143, 182, 183, 192, 255, 129, 151, 128,
- 133, 134, 135, 136, 255, 145, 150, 151,
- 155, 191, 192, 255, 128, 143, 144, 159,
- 160, 255, 182, 183, 190, 191, 192, 255,
- 128, 129, 255, 173, 174, 192, 255, 128,
- 129, 154, 155, 159, 160, 255, 171, 173,
- 185, 191, 192, 255, 141, 128, 145, 146,
- 159, 160, 177, 178, 191, 173, 128, 145,
- 146, 159, 160, 176, 177, 191, 128, 179,
- 180, 191, 151, 156, 128, 191, 128, 159,
- 160, 255, 184, 191, 192, 255, 169, 128,
- 170, 171, 175, 176, 255, 182, 191, 192,
- 255, 128, 158, 159, 191, 128, 143, 144,
- 173, 174, 175, 176, 180, 181, 191, 128,
- 171, 172, 175, 176, 255, 138, 191, 192,
- 255, 128, 150, 151, 159, 160, 255, 149,
- 191, 192, 255, 167, 128, 191, 128, 132,
- 133, 179, 180, 191, 128, 132, 133, 139,
- 140, 191, 128, 130, 131, 160, 161, 173,
- 174, 175, 176, 185, 186, 255, 166, 191,
- 192, 255, 128, 163, 164, 191, 128, 140,
- 141, 143, 144, 153, 154, 189, 190, 191,
- 128, 136, 137, 191, 173, 128, 168, 169,
- 177, 178, 180, 181, 182, 183, 191, 0,
- 127, 192, 255, 150, 151, 158, 159, 152,
- 154, 156, 158, 134, 135, 142, 143, 190,
- 191, 192, 255, 181, 189, 191, 128, 190,
- 133, 181, 128, 129, 130, 140, 141, 143,
- 144, 147, 148, 149, 150, 155, 156, 159,
- 160, 172, 173, 177, 178, 188, 189, 191,
- 177, 191, 128, 190, 128, 143, 144, 156,
- 157, 191, 130, 135, 148, 164, 166, 168,
- 128, 137, 138, 149, 150, 151, 152, 157,
- 158, 169, 170, 185, 186, 187, 188, 191,
- 142, 128, 132, 133, 137, 138, 159, 160,
- 255, 137, 191, 192, 255, 175, 128, 255,
- 159, 165, 170, 175, 177, 180, 191, 192,
- 255, 166, 173, 128, 167, 168, 175, 176,
- 255, 168, 174, 176, 191, 192, 255, 167,
- 175, 183, 191, 128, 150, 151, 159, 160,
- 190, 135, 143, 151, 128, 158, 159, 191,
- 128, 132, 133, 135, 136, 160, 161, 169,
- 170, 176, 177, 181, 182, 183, 184, 188,
- 189, 191, 160, 151, 154, 187, 192, 255,
- 128, 132, 133, 173, 174, 176, 177, 255,
- 143, 159, 187, 191, 192, 255, 128, 175,
- 176, 191, 150, 191, 192, 255, 141, 191,
- 192, 255, 128, 143, 144, 189, 190, 191,
- 141, 143, 160, 169, 172, 191, 192, 255,
- 191, 128, 174, 175, 190, 128, 157, 158,
- 159, 160, 255, 176, 191, 192, 255, 128,
- 150, 151, 159, 160, 161, 162, 255, 175,
- 137, 138, 184, 191, 192, 255, 128, 182,
- 183, 255, 130, 134, 139, 163, 191, 192,
- 255, 128, 129, 130, 179, 180, 191, 187,
- 189, 128, 177, 178, 183, 184, 191, 128,
- 137, 138, 165, 166, 175, 176, 255, 135,
- 159, 189, 191, 192, 255, 128, 131, 132,
- 178, 179, 191, 143, 165, 191, 128, 159,
- 160, 175, 176, 185, 186, 190, 128, 168,
- 169, 191, 131, 186, 128, 139, 140, 159,
- 160, 182, 183, 189, 190, 255, 176, 178,
- 180, 183, 184, 190, 191, 192, 255, 129,
- 128, 130, 131, 154, 155, 157, 158, 159,
- 160, 170, 171, 177, 178, 180, 181, 191,
- 128, 167, 175, 129, 134, 135, 136, 137,
- 142, 143, 144, 145, 150, 151, 159, 160,
- 255, 155, 166, 175, 128, 162, 163, 191,
- 164, 175, 135, 138, 188, 191, 192, 255,
- 174, 175, 154, 191, 192, 255, 157, 169,
- 183, 189, 191, 128, 134, 135, 146, 147,
- 151, 152, 158, 159, 190, 130, 133, 128,
- 255, 178, 191, 192, 255, 128, 146, 147,
- 255, 190, 191, 192, 255, 128, 143, 144,
- 255, 144, 145, 136, 175, 188, 191, 192,
- 255, 181, 128, 175, 176, 255, 189, 191,
- 192, 255, 128, 160, 161, 186, 187, 191,
- 128, 129, 154, 155, 165, 166, 255, 191,
- 192, 255, 128, 129, 130, 135, 136, 137,
- 138, 143, 144, 145, 146, 151, 152, 153,
- 154, 156, 157, 191, 128, 191, 128, 129,
- 130, 131, 133, 138, 139, 140, 141, 142,
- 143, 144, 145, 146, 147, 148, 149, 152,
- 156, 157, 160, 161, 162, 163, 164, 166,
- 168, 169, 170, 171, 172, 173, 174, 176,
- 177, 132, 151, 153, 155, 158, 175, 178,
- 179, 180, 191, 140, 167, 187, 190, 128,
- 255, 142, 143, 158, 191, 192, 255, 187,
- 191, 192, 255, 128, 180, 181, 191, 128,
- 156, 157, 159, 160, 255, 145, 191, 192,
- 255, 128, 159, 160, 175, 176, 255, 139,
- 143, 182, 191, 192, 255, 144, 132, 135,
- 150, 191, 192, 255, 158, 175, 148, 151,
- 188, 191, 192, 255, 128, 167, 168, 175,
- 176, 255, 164, 191, 192, 255, 183, 191,
- 192, 255, 128, 149, 150, 159, 160, 167,
- 168, 191, 136, 182, 188, 128, 133, 134,
- 137, 138, 184, 185, 190, 191, 255, 150,
- 159, 183, 191, 192, 255, 179, 128, 159,
- 160, 181, 182, 191, 128, 149, 150, 159,
- 160, 185, 186, 191, 128, 183, 184, 189,
- 190, 191, 128, 148, 152, 129, 143, 144,
- 179, 180, 191, 128, 159, 160, 188, 189,
- 191, 128, 156, 157, 191, 136, 128, 164,
- 165, 191, 128, 181, 182, 191, 128, 149,
- 150, 159, 160, 178, 179, 191, 128, 145,
- 146, 191, 128, 178, 179, 191, 128, 130,
- 131, 132, 133, 134, 135, 136, 138, 139,
- 140, 141, 144, 145, 146, 147, 150, 151,
- 152, 153, 154, 156, 162, 163, 171, 176,
- 177, 178, 129, 191, 128, 130, 131, 183,
- 184, 191, 128, 130, 131, 175, 176, 191,
- 128, 143, 144, 168, 169, 191, 128, 130,
- 131, 166, 167, 191, 182, 128, 143, 144,
- 178, 179, 191, 128, 130, 131, 178, 179,
- 191, 128, 154, 156, 129, 132, 133, 191,
- 146, 128, 171, 172, 191, 135, 137, 142,
- 158, 128, 168, 169, 175, 176, 255, 159,
- 191, 192, 255, 144, 128, 156, 157, 161,
- 162, 191, 128, 134, 135, 138, 139, 191,
- 128, 175, 176, 191, 134, 128, 131, 132,
- 135, 136, 191, 128, 174, 175, 191, 128,
- 151, 152, 155, 156, 191, 132, 128, 191,
- 128, 170, 171, 191, 128, 153, 154, 191,
- 160, 190, 192, 255, 128, 184, 185, 191,
- 137, 128, 174, 175, 191, 128, 129, 177,
- 178, 255, 144, 191, 192, 255, 128, 142,
- 143, 144, 145, 146, 149, 129, 148, 150,
- 191, 175, 191, 192, 255, 132, 191, 192,
- 255, 128, 144, 129, 143, 145, 191, 144,
- 153, 128, 143, 145, 152, 154, 191, 135,
- 191, 192, 255, 160, 168, 169, 171, 172,
- 173, 174, 188, 189, 190, 191, 128, 159,
- 161, 167, 170, 187, 185, 191, 192, 255,
- 128, 143, 144, 173, 174, 191, 128, 131,
- 132, 162, 163, 183, 184, 188, 189, 255,
- 133, 143, 145, 191, 192, 255, 128, 146,
- 147, 159, 160, 191, 160, 128, 191, 128,
- 129, 191, 192, 255, 159, 160, 171, 128,
- 170, 172, 191, 192, 255, 173, 191, 192,
- 255, 179, 191, 192, 255, 128, 176, 177,
- 178, 129, 191, 128, 129, 130, 191, 171,
- 175, 189, 191, 192, 255, 128, 136, 137,
- 143, 144, 153, 154, 191, 144, 145, 146,
- 147, 148, 149, 154, 155, 156, 157, 158,
- 159, 128, 143, 150, 153, 160, 191, 149,
- 157, 173, 186, 188, 160, 161, 163, 164,
- 167, 168, 132, 134, 149, 157, 186, 191,
- 139, 140, 192, 255, 133, 145, 128, 134,
- 135, 137, 138, 255, 166, 167, 129, 155,
- 187, 149, 181, 143, 175, 137, 169, 131,
- 140, 191, 192, 255, 160, 163, 164, 165,
- 184, 185, 186, 128, 159, 161, 162, 166,
- 191, 133, 191, 192, 255, 132, 160, 163,
- 167, 179, 184, 186, 128, 164, 165, 168,
- 169, 187, 188, 191, 130, 135, 137, 139,
- 144, 147, 151, 153, 155, 157, 159, 163,
- 171, 179, 184, 189, 191, 128, 140, 141,
- 148, 149, 160, 161, 164, 165, 166, 167,
- 190, 138, 164, 170, 128, 155, 156, 160,
- 161, 187, 188, 191, 128, 191, 155, 156,
- 128, 191, 151, 191, 192, 255, 156, 157,
- 160, 128, 191, 181, 191, 192, 255, 158,
- 159, 186, 128, 185, 187, 191, 192, 255,
- 162, 191, 192, 255, 160, 168, 128, 159,
- 161, 167, 169, 191, 158, 191, 192, 255,
- 10, 13, 128, 191, 192, 223, 224, 239,
- 240, 247, 248, 255, 128, 191, 128, 191,
- 128, 191, 128, 191, 128, 191, 10, 128,
- 191, 128, 191, 128, 191, 36, 123, 37,
- 123, 10, 128, 191, 128, 191, 128, 191,
- 36, 123, 37, 123, 170, 181, 183, 186,
- 128, 150, 152, 182, 184, 255, 192, 255,
- 128, 255, 173, 130, 133, 146, 159, 165,
- 171, 175, 255, 181, 190, 184, 185, 192,
- 255, 140, 134, 138, 142, 161, 163, 255,
- 182, 130, 136, 137, 176, 151, 152, 154,
- 160, 190, 136, 144, 192, 255, 135, 129,
- 130, 132, 133, 144, 170, 176, 178, 144,
- 154, 160, 191, 128, 169, 174, 255, 148,
- 169, 157, 158, 189, 190, 192, 255, 144,
- 255, 139, 140, 178, 255, 186, 128, 181,
- 160, 161, 162, 163, 164, 165, 166, 167,
- 168, 169, 170, 171, 172, 173, 174, 175,
- 176, 177, 178, 179, 180, 181, 182, 183,
- 184, 185, 186, 187, 188, 189, 190, 191,
- 128, 173, 128, 155, 160, 180, 182, 189,
- 148, 161, 163, 255, 176, 164, 165, 132,
- 169, 177, 141, 142, 145, 146, 179, 181,
- 186, 187, 158, 133, 134, 137, 138, 143,
- 150, 152, 155, 164, 165, 178, 255, 188,
- 129, 131, 133, 138, 143, 144, 147, 168,
- 170, 176, 178, 179, 181, 182, 184, 185,
- 190, 255, 157, 131, 134, 137, 138, 142,
- 144, 146, 152, 159, 165, 182, 255, 129,
- 131, 133, 141, 143, 145, 147, 168, 170,
- 176, 178, 179, 181, 185, 188, 255, 134,
- 138, 142, 143, 145, 159, 164, 165, 176,
- 184, 186, 255, 129, 131, 133, 140, 143,
- 144, 147, 168, 170, 176, 178, 179, 181,
- 185, 188, 191, 177, 128, 132, 135, 136,
- 139, 141, 150, 151, 156, 157, 159, 163,
- 166, 175, 156, 130, 131, 133, 138, 142,
- 144, 146, 149, 153, 154, 158, 159, 163,
- 164, 168, 170, 174, 185, 190, 191, 144,
- 151, 128, 130, 134, 136, 138, 141, 166,
- 175, 128, 131, 133, 140, 142, 144, 146,
- 168, 170, 185, 189, 255, 133, 137, 151,
- 142, 148, 155, 159, 164, 165, 176, 255,
- 128, 131, 133, 140, 142, 144, 146, 168,
- 170, 179, 181, 185, 188, 191, 158, 128,
- 132, 134, 136, 138, 141, 149, 150, 160,
- 163, 166, 175, 177, 178, 129, 131, 133,
- 140, 142, 144, 146, 186, 189, 255, 133,
- 137, 143, 147, 152, 158, 164, 165, 176,
- 185, 192, 255, 189, 130, 131, 133, 150,
- 154, 177, 179, 187, 138, 150, 128, 134,
- 143, 148, 152, 159, 166, 175, 178, 179,
- 129, 186, 128, 142, 144, 153, 132, 138,
- 141, 165, 167, 129, 130, 135, 136, 148,
- 151, 153, 159, 161, 163, 170, 171, 173,
- 185, 187, 189, 134, 128, 132, 136, 141,
- 144, 153, 156, 159, 128, 181, 183, 185,
- 152, 153, 160, 169, 190, 191, 128, 135,
- 137, 172, 177, 191, 128, 132, 134, 151,
- 153, 188, 134, 128, 129, 130, 131, 137,
- 138, 139, 140, 141, 142, 143, 144, 153,
- 154, 155, 156, 157, 158, 159, 160, 161,
- 162, 163, 164, 165, 166, 167, 168, 169,
- 170, 173, 175, 176, 177, 178, 179, 181,
- 182, 183, 188, 189, 190, 191, 132, 152,
- 172, 184, 185, 187, 128, 191, 128, 137,
- 144, 255, 158, 159, 134, 187, 136, 140,
- 142, 143, 137, 151, 153, 142, 143, 158,
- 159, 137, 177, 142, 143, 182, 183, 191,
- 255, 128, 130, 133, 136, 150, 152, 255,
- 145, 150, 151, 155, 156, 160, 168, 178,
- 255, 128, 143, 160, 255, 182, 183, 190,
- 255, 129, 255, 173, 174, 192, 255, 129,
- 154, 160, 255, 171, 173, 185, 255, 128,
- 140, 142, 148, 160, 180, 128, 147, 160,
- 172, 174, 176, 178, 179, 148, 150, 152,
- 155, 158, 159, 170, 255, 139, 141, 144,
- 153, 160, 255, 184, 255, 128, 170, 176,
- 255, 182, 255, 128, 158, 160, 171, 176,
- 187, 134, 173, 176, 180, 128, 171, 176,
- 255, 138, 143, 155, 255, 128, 155, 160,
- 255, 159, 189, 190, 192, 255, 167, 128,
- 137, 144, 153, 176, 189, 140, 143, 154,
- 170, 180, 255, 180, 255, 128, 183, 128,
- 137, 141, 189, 128, 136, 144, 146, 148,
- 182, 184, 185, 128, 181, 187, 191, 150,
- 151, 158, 159, 152, 154, 156, 158, 134,
- 135, 142, 143, 190, 255, 190, 128, 180,
- 182, 188, 130, 132, 134, 140, 144, 147,
- 150, 155, 160, 172, 178, 180, 182, 188,
- 128, 129, 130, 131, 132, 133, 134, 176,
- 177, 178, 179, 180, 181, 182, 183, 191,
- 255, 129, 147, 149, 176, 178, 190, 192,
- 255, 144, 156, 161, 144, 156, 165, 176,
- 130, 135, 149, 164, 166, 168, 138, 147,
- 152, 157, 170, 185, 188, 191, 142, 133,
- 137, 160, 255, 137, 255, 128, 174, 176,
- 255, 159, 165, 170, 180, 255, 167, 173,
- 128, 165, 176, 255, 168, 174, 176, 190,
- 192, 255, 128, 150, 160, 166, 168, 174,
- 176, 182, 184, 190, 128, 134, 136, 142,
- 144, 150, 152, 158, 160, 191, 128, 129,
- 130, 131, 132, 133, 134, 135, 144, 145,
- 255, 133, 135, 161, 175, 177, 181, 184,
- 188, 160, 151, 152, 187, 192, 255, 133,
- 173, 177, 255, 143, 159, 187, 255, 176,
- 191, 182, 183, 184, 191, 192, 255, 150,
- 255, 128, 146, 147, 148, 152, 153, 154,
- 155, 156, 158, 159, 160, 161, 162, 163,
- 164, 165, 166, 167, 168, 169, 170, 171,
- 172, 173, 174, 175, 176, 129, 255, 141,
- 255, 144, 189, 141, 143, 172, 255, 191,
- 128, 175, 180, 189, 151, 159, 162, 255,
- 175, 137, 138, 184, 255, 183, 255, 168,
- 255, 128, 179, 188, 134, 143, 154, 159,
- 184, 186, 190, 255, 128, 173, 176, 255,
- 148, 159, 189, 255, 129, 142, 154, 159,
- 191, 255, 128, 182, 128, 141, 144, 153,
- 160, 182, 186, 255, 128, 130, 155, 157,
- 160, 175, 178, 182, 129, 134, 137, 142,
- 145, 150, 160, 166, 168, 174, 176, 255,
- 155, 166, 175, 128, 170, 172, 173, 176,
- 185, 158, 159, 160, 255, 164, 175, 135,
- 138, 188, 255, 164, 169, 171, 172, 173,
- 174, 175, 180, 181, 182, 183, 184, 185,
- 187, 188, 189, 190, 191, 165, 186, 174,
- 175, 154, 255, 190, 128, 134, 147, 151,
- 157, 168, 170, 182, 184, 188, 128, 129,
- 131, 132, 134, 255, 147, 255, 190, 255,
- 144, 145, 136, 175, 188, 255, 128, 143,
- 160, 175, 179, 180, 141, 143, 176, 180,
- 182, 255, 189, 255, 191, 144, 153, 161,
- 186, 129, 154, 166, 255, 191, 255, 130,
- 135, 138, 143, 146, 151, 154, 156, 144,
- 145, 146, 147, 148, 150, 151, 152, 155,
- 157, 158, 160, 170, 171, 172, 175, 161,
- 169, 128, 129, 130, 131, 133, 135, 138,
- 139, 140, 141, 142, 143, 144, 145, 146,
- 147, 148, 149, 152, 156, 157, 160, 161,
- 162, 163, 164, 166, 168, 169, 170, 171,
- 172, 173, 174, 176, 177, 153, 155, 178,
- 179, 128, 139, 141, 166, 168, 186, 188,
- 189, 191, 255, 142, 143, 158, 255, 187,
- 255, 128, 180, 189, 128, 156, 160, 255,
- 145, 159, 161, 255, 128, 159, 176, 255,
- 139, 143, 187, 255, 128, 157, 160, 255,
- 144, 132, 135, 150, 255, 158, 159, 170,
- 175, 148, 151, 188, 255, 128, 167, 176,
- 255, 164, 255, 183, 255, 128, 149, 160,
- 167, 136, 188, 128, 133, 138, 181, 183,
- 184, 191, 255, 150, 159, 183, 255, 128,
- 158, 160, 178, 180, 181, 128, 149, 160,
- 185, 128, 183, 190, 191, 191, 128, 131,
- 133, 134, 140, 147, 149, 151, 153, 179,
- 184, 186, 160, 188, 128, 156, 128, 135,
- 137, 166, 128, 181, 128, 149, 160, 178,
- 128, 145, 128, 178, 129, 130, 131, 132,
- 133, 135, 136, 138, 139, 140, 141, 144,
- 145, 146, 147, 150, 151, 152, 153, 154,
- 155, 156, 162, 163, 171, 176, 177, 178,
- 128, 134, 135, 165, 176, 190, 144, 168,
- 176, 185, 128, 180, 182, 191, 182, 144,
- 179, 155, 133, 137, 141, 143, 157, 255,
- 190, 128, 145, 147, 183, 136, 128, 134,
- 138, 141, 143, 157, 159, 168, 176, 255,
- 171, 175, 186, 255, 128, 131, 133, 140,
- 143, 144, 147, 168, 170, 176, 178, 179,
- 181, 185, 188, 191, 144, 151, 128, 132,
- 135, 136, 139, 141, 157, 163, 166, 172,
- 176, 180, 128, 138, 144, 153, 134, 136,
- 143, 154, 255, 128, 181, 184, 255, 129,
- 151, 158, 255, 129, 131, 133, 143, 154,
- 255, 128, 137, 128, 153, 157, 171, 176,
- 185, 160, 255, 170, 190, 192, 255, 128,
- 184, 128, 136, 138, 182, 184, 191, 128,
- 144, 153, 178, 255, 168, 144, 145, 183,
- 255, 128, 142, 145, 149, 129, 141, 144,
- 146, 147, 148, 175, 255, 132, 255, 128,
- 144, 129, 143, 144, 153, 145, 152, 135,
- 255, 160, 168, 169, 171, 172, 173, 174,
- 188, 189, 190, 191, 161, 167, 185, 255,
- 128, 158, 160, 169, 144, 173, 176, 180,
- 128, 131, 144, 153, 163, 183, 189, 255,
- 144, 255, 133, 143, 191, 255, 143, 159,
- 160, 128, 129, 255, 159, 160, 171, 172,
- 255, 173, 255, 179, 255, 128, 176, 177,
- 178, 128, 129, 171, 175, 189, 255, 128,
- 136, 144, 153, 157, 158, 133, 134, 137,
- 144, 145, 146, 147, 148, 149, 154, 155,
- 156, 157, 158, 159, 168, 169, 170, 150,
- 153, 165, 169, 173, 178, 187, 255, 131,
- 132, 140, 169, 174, 255, 130, 132, 149,
- 157, 173, 186, 188, 160, 161, 163, 164,
- 167, 168, 132, 134, 149, 157, 186, 139,
- 140, 191, 255, 134, 128, 132, 138, 144,
- 146, 255, 166, 167, 129, 155, 187, 149,
- 181, 143, 175, 137, 169, 131, 140, 141,
- 192, 255, 128, 182, 187, 255, 173, 180,
- 182, 255, 132, 155, 159, 161, 175, 128,
- 160, 163, 164, 165, 184, 185, 186, 161,
- 162, 128, 134, 136, 152, 155, 161, 163,
- 164, 166, 170, 133, 143, 151, 255, 139,
- 143, 154, 255, 164, 167, 185, 187, 128,
- 131, 133, 159, 161, 162, 169, 178, 180,
- 183, 130, 135, 137, 139, 148, 151, 153,
- 155, 157, 159, 164, 190, 141, 143, 145,
- 146, 161, 162, 167, 170, 172, 178, 180,
- 183, 185, 188, 128, 137, 139, 155, 161,
- 163, 165, 169, 171, 187, 155, 156, 151,
- 255, 156, 157, 160, 181, 255, 186, 187,
- 255, 162, 255, 160, 168, 161, 167, 158,
- 255, 160, 132, 135, 133, 134, 176, 255,
- 128, 191, 154, 164, 168, 128, 149, 150,
- 191, 128, 152, 153, 191, 181, 128, 159,
- 160, 189, 190, 191, 189, 128, 131, 132,
- 185, 186, 191, 144, 128, 151, 152, 161,
- 162, 176, 177, 255, 169, 177, 129, 132,
- 141, 142, 145, 146, 179, 181, 186, 188,
- 190, 191, 192, 255, 142, 158, 128, 155,
- 156, 161, 162, 175, 176, 177, 178, 191,
- 169, 177, 180, 183, 128, 132, 133, 138,
- 139, 142, 143, 144, 145, 146, 147, 185,
- 186, 191, 157, 128, 152, 153, 158, 159,
- 177, 178, 180, 181, 191, 142, 146, 169,
- 177, 180, 189, 128, 132, 133, 185, 186,
- 191, 144, 185, 128, 159, 160, 161, 162,
- 191, 169, 177, 180, 189, 128, 132, 133,
- 140, 141, 142, 143, 144, 145, 146, 147,
- 185, 186, 191, 158, 177, 128, 155, 156,
- 161, 162, 191, 131, 145, 155, 157, 128,
- 132, 133, 138, 139, 141, 142, 149, 150,
- 152, 153, 159, 160, 162, 163, 164, 165,
- 167, 168, 170, 171, 173, 174, 185, 186,
- 191, 144, 128, 191, 141, 145, 169, 189,
- 128, 132, 133, 185, 186, 191, 128, 151,
- 152, 154, 155, 159, 160, 161, 162, 191,
- 128, 141, 145, 169, 180, 189, 129, 132,
- 133, 185, 186, 191, 158, 128, 159, 160,
- 161, 162, 176, 177, 178, 179, 191, 141,
- 145, 189, 128, 132, 133, 186, 187, 191,
- 142, 128, 147, 148, 150, 151, 158, 159,
- 161, 162, 185, 186, 191, 178, 188, 128,
- 132, 133, 150, 151, 153, 154, 189, 190,
- 191, 128, 134, 135, 191, 128, 177, 129,
- 179, 180, 191, 128, 131, 137, 141, 152,
- 160, 164, 166, 172, 177, 189, 129, 132,
- 133, 134, 135, 138, 139, 147, 148, 167,
- 168, 169, 170, 179, 180, 191, 133, 128,
- 134, 135, 155, 156, 159, 160, 191, 128,
- 129, 191, 136, 128, 172, 173, 191, 128,
- 135, 136, 140, 141, 191, 191, 128, 170,
- 171, 190, 161, 128, 143, 144, 149, 150,
- 153, 154, 157, 158, 164, 165, 166, 167,
- 173, 174, 176, 177, 180, 181, 255, 130,
- 141, 143, 159, 134, 187, 136, 140, 142,
- 143, 137, 151, 153, 142, 143, 158, 159,
- 137, 177, 191, 142, 143, 182, 183, 192,
- 255, 129, 151, 128, 133, 134, 135, 136,
- 255, 145, 150, 151, 155, 191, 192, 255,
- 128, 143, 144, 159, 160, 255, 182, 183,
- 190, 191, 192, 255, 128, 129, 255, 173,
- 174, 192, 255, 128, 129, 154, 155, 159,
- 160, 255, 171, 173, 185, 191, 192, 255,
- 141, 128, 145, 146, 159, 160, 177, 178,
- 191, 173, 128, 145, 146, 159, 160, 176,
- 177, 191, 128, 179, 180, 191, 151, 156,
- 128, 191, 128, 159, 160, 255, 184, 191,
- 192, 255, 169, 128, 170, 171, 175, 176,
- 255, 182, 191, 192, 255, 128, 158, 159,
- 191, 128, 143, 144, 173, 174, 175, 176,
- 180, 181, 191, 128, 171, 172, 175, 176,
- 255, 138, 191, 192, 255, 128, 150, 151,
- 159, 160, 255, 149, 191, 192, 255, 167,
- 128, 191, 128, 132, 133, 179, 180, 191,
- 128, 132, 133, 139, 140, 191, 128, 130,
- 131, 160, 161, 173, 174, 175, 176, 185,
- 186, 255, 166, 191, 192, 255, 128, 163,
- 164, 191, 128, 140, 141, 143, 144, 153,
- 154, 189, 190, 191, 128, 136, 137, 191,
- 173, 128, 168, 169, 177, 178, 180, 181,
- 182, 183, 191, 0, 127, 192, 255, 150,
- 151, 158, 159, 152, 154, 156, 158, 134,
- 135, 142, 143, 190, 191, 192, 255, 181,
- 189, 191, 128, 190, 133, 181, 128, 129,
- 130, 140, 141, 143, 144, 147, 148, 149,
- 150, 155, 156, 159, 160, 172, 173, 177,
- 178, 188, 189, 191, 177, 191, 128, 190,
- 128, 143, 144, 156, 157, 191, 130, 135,
- 148, 164, 166, 168, 128, 137, 138, 149,
- 150, 151, 152, 157, 158, 169, 170, 185,
- 186, 187, 188, 191, 142, 128, 132, 133,
- 137, 138, 159, 160, 255, 137, 191, 192,
- 255, 175, 128, 255, 159, 165, 170, 175,
- 177, 180, 191, 192, 255, 166, 173, 128,
- 167, 168, 175, 176, 255, 168, 174, 176,
- 191, 192, 255, 167, 175, 183, 191, 128,
- 150, 151, 159, 160, 190, 135, 143, 151,
- 128, 158, 159, 191, 128, 132, 133, 135,
- 136, 160, 161, 169, 170, 176, 177, 181,
- 182, 183, 184, 188, 189, 191, 160, 151,
- 154, 187, 192, 255, 128, 132, 133, 173,
- 174, 176, 177, 255, 143, 159, 187, 191,
- 192, 255, 128, 175, 176, 191, 150, 191,
- 192, 255, 141, 191, 192, 255, 128, 143,
- 144, 189, 190, 191, 141, 143, 160, 169,
- 172, 191, 192, 255, 191, 128, 174, 175,
- 190, 128, 157, 158, 159, 160, 255, 176,
- 191, 192, 255, 128, 150, 151, 159, 160,
- 161, 162, 255, 175, 137, 138, 184, 191,
- 192, 255, 128, 182, 183, 255, 130, 134,
- 139, 163, 191, 192, 255, 128, 129, 130,
- 179, 180, 191, 187, 189, 128, 177, 178,
- 183, 184, 191, 128, 137, 138, 165, 166,
- 175, 176, 255, 135, 159, 189, 191, 192,
- 255, 128, 131, 132, 178, 179, 191, 143,
- 165, 191, 128, 159, 160, 175, 176, 185,
- 186, 190, 128, 168, 169, 191, 131, 186,
- 128, 139, 140, 159, 160, 182, 183, 189,
- 190, 255, 176, 178, 180, 183, 184, 190,
- 191, 192, 255, 129, 128, 130, 131, 154,
- 155, 157, 158, 159, 160, 170, 171, 177,
- 178, 180, 181, 191, 128, 167, 175, 129,
- 134, 135, 136, 137, 142, 143, 144, 145,
- 150, 151, 159, 160, 255, 155, 166, 175,
- 128, 162, 163, 191, 164, 175, 135, 138,
- 188, 191, 192, 255, 174, 175, 154, 191,
- 192, 255, 157, 169, 183, 189, 191, 128,
- 134, 135, 146, 147, 151, 152, 158, 159,
- 190, 130, 133, 128, 255, 178, 191, 192,
- 255, 128, 146, 147, 255, 190, 191, 192,
- 255, 128, 143, 144, 255, 144, 145, 136,
- 175, 188, 191, 192, 255, 181, 128, 175,
- 176, 255, 189, 191, 192, 255, 128, 160,
- 161, 186, 187, 191, 128, 129, 154, 155,
- 165, 166, 255, 191, 192, 255, 128, 129,
- 130, 135, 136, 137, 138, 143, 144, 145,
- 146, 151, 152, 153, 154, 156, 157, 191,
- 128, 191, 128, 129, 130, 131, 133, 138,
- 139, 140, 141, 142, 143, 144, 145, 146,
- 147, 148, 149, 152, 156, 157, 160, 161,
- 162, 163, 164, 166, 168, 169, 170, 171,
- 172, 173, 174, 176, 177, 132, 151, 153,
- 155, 158, 175, 178, 179, 180, 191, 140,
- 167, 187, 190, 128, 255, 142, 143, 158,
- 191, 192, 255, 187, 191, 192, 255, 128,
- 180, 181, 191, 128, 156, 157, 159, 160,
- 255, 145, 191, 192, 255, 128, 159, 160,
- 175, 176, 255, 139, 143, 182, 191, 192,
- 255, 144, 132, 135, 150, 191, 192, 255,
- 158, 175, 148, 151, 188, 191, 192, 255,
- 128, 167, 168, 175, 176, 255, 164, 191,
- 192, 255, 183, 191, 192, 255, 128, 149,
- 150, 159, 160, 167, 168, 191, 136, 182,
- 188, 128, 133, 134, 137, 138, 184, 185,
- 190, 191, 255, 150, 159, 183, 191, 192,
- 255, 179, 128, 159, 160, 181, 182, 191,
- 128, 149, 150, 159, 160, 185, 186, 191,
- 128, 183, 184, 189, 190, 191, 128, 148,
- 152, 129, 143, 144, 179, 180, 191, 128,
- 159, 160, 188, 189, 191, 128, 156, 157,
- 191, 136, 128, 164, 165, 191, 128, 181,
- 182, 191, 128, 149, 150, 159, 160, 178,
- 179, 191, 128, 145, 146, 191, 128, 178,
- 179, 191, 128, 130, 131, 132, 133, 134,
- 135, 136, 138, 139, 140, 141, 144, 145,
- 146, 147, 150, 151, 152, 153, 154, 156,
- 162, 163, 171, 176, 177, 178, 129, 191,
- 128, 130, 131, 183, 184, 191, 128, 130,
- 131, 175, 176, 191, 128, 143, 144, 168,
- 169, 191, 128, 130, 131, 166, 167, 191,
- 182, 128, 143, 144, 178, 179, 191, 128,
- 130, 131, 178, 179, 191, 128, 154, 156,
- 129, 132, 133, 191, 146, 128, 171, 172,
- 191, 135, 137, 142, 158, 128, 168, 169,
- 175, 176, 255, 159, 191, 192, 255, 144,
- 128, 156, 157, 161, 162, 191, 128, 134,
- 135, 138, 139, 191, 128, 175, 176, 191,
- 134, 128, 131, 132, 135, 136, 191, 128,
- 174, 175, 191, 128, 151, 152, 155, 156,
- 191, 132, 128, 191, 128, 170, 171, 191,
- 128, 153, 154, 191, 160, 190, 192, 255,
- 128, 184, 185, 191, 137, 128, 174, 175,
- 191, 128, 129, 177, 178, 255, 144, 191,
- 192, 255, 128, 142, 143, 144, 145, 146,
- 149, 129, 148, 150, 191, 175, 191, 192,
- 255, 132, 191, 192, 255, 128, 144, 129,
- 143, 145, 191, 144, 153, 128, 143, 145,
- 152, 154, 191, 135, 191, 192, 255, 160,
- 168, 169, 171, 172, 173, 174, 188, 189,
- 190, 191, 128, 159, 161, 167, 170, 187,
- 185, 191, 192, 255, 128, 143, 144, 173,
- 174, 191, 128, 131, 132, 162, 163, 183,
- 184, 188, 189, 255, 133, 143, 145, 191,
- 192, 255, 128, 146, 147, 159, 160, 191,
- 160, 128, 191, 128, 129, 191, 192, 255,
- 159, 160, 171, 128, 170, 172, 191, 192,
- 255, 173, 191, 192, 255, 179, 191, 192,
- 255, 128, 176, 177, 178, 129, 191, 128,
- 129, 130, 191, 171, 175, 189, 191, 192,
- 255, 128, 136, 137, 143, 144, 153, 154,
- 191, 144, 145, 146, 147, 148, 149, 154,
- 155, 156, 157, 158, 159, 128, 143, 150,
- 153, 160, 191, 149, 157, 173, 186, 188,
- 160, 161, 163, 164, 167, 168, 132, 134,
- 149, 157, 186, 191, 139, 140, 192, 255,
- 133, 145, 128, 134, 135, 137, 138, 255,
- 166, 167, 129, 155, 187, 149, 181, 143,
- 175, 137, 169, 131, 140, 191, 192, 255,
- 160, 163, 164, 165, 184, 185, 186, 128,
- 159, 161, 162, 166, 191, 133, 191, 192,
- 255, 132, 160, 163, 167, 179, 184, 186,
- 128, 164, 165, 168, 169, 187, 188, 191,
- 130, 135, 137, 139, 144, 147, 151, 153,
- 155, 157, 159, 163, 171, 179, 184, 189,
- 191, 128, 140, 141, 148, 149, 160, 161,
- 164, 165, 166, 167, 190, 138, 164, 170,
- 128, 155, 156, 160, 161, 187, 188, 191,
- 128, 191, 155, 156, 128, 191, 151, 191,
- 192, 255, 156, 157, 160, 128, 191, 181,
- 191, 192, 255, 158, 159, 186, 128, 185,
- 187, 191, 192, 255, 162, 191, 192, 255,
- 160, 168, 128, 159, 161, 167, 169, 191,
- 158, 191, 192, 255, 9, 10, 13, 32,
- 33, 34, 35, 38, 46, 47, 58, 60,
- 61, 62, 64, 92, 95, 123, 124, 125,
- 126, 127, 194, 195, 198, 199, 203, 204,
- 205, 206, 207, 210, 212, 213, 214, 215,
- 216, 217, 219, 220, 221, 222, 223, 224,
- 225, 226, 227, 228, 233, 234, 237, 238,
- 239, 240, 0, 36, 37, 45, 48, 57,
- 59, 63, 65, 90, 91, 96, 97, 122,
- 192, 193, 196, 218, 229, 236, 241, 247,
- 9, 32, 10, 61, 10, 38, 46, 42,
- 47, 46, 69, 101, 48, 57, 58, 60,
- 61, 61, 62, 61, 45, 95, 194, 195,
- 198, 199, 203, 204, 205, 206, 207, 210,
- 212, 213, 214, 215, 216, 217, 219, 220,
- 221, 222, 223, 224, 225, 226, 227, 228,
- 233, 234, 237, 239, 240, 243, 48, 57,
- 65, 90, 97, 122, 196, 218, 229, 236,
- 124, 125, 128, 191, 170, 181, 186, 128,
- 191, 151, 183, 128, 255, 192, 255, 0,
- 127, 173, 130, 133, 146, 159, 165, 171,
- 175, 191, 192, 255, 181, 190, 128, 175,
- 176, 183, 184, 185, 186, 191, 134, 139,
- 141, 162, 128, 135, 136, 255, 182, 130,
- 137, 176, 151, 152, 154, 160, 136, 191,
- 192, 255, 128, 143, 144, 170, 171, 175,
- 176, 178, 179, 191, 128, 159, 160, 191,
- 176, 128, 138, 139, 173, 174, 255, 148,
- 150, 164, 167, 173, 176, 185, 189, 190,
- 192, 255, 144, 128, 145, 146, 175, 176,
- 191, 128, 140, 141, 255, 166, 176, 178,
- 191, 192, 255, 186, 128, 137, 138, 170,
- 171, 179, 180, 181, 182, 191, 160, 161,
- 162, 164, 165, 166, 167, 168, 169, 170,
- 171, 172, 173, 174, 175, 176, 177, 178,
- 179, 180, 181, 182, 183, 184, 185, 186,
- 187, 188, 189, 190, 128, 191, 128, 129,
- 130, 131, 137, 138, 139, 140, 141, 142,
- 143, 144, 153, 154, 155, 156, 157, 158,
- 159, 160, 161, 162, 163, 164, 165, 166,
- 167, 168, 169, 170, 171, 172, 173, 174,
- 175, 176, 177, 178, 179, 180, 182, 183,
- 184, 188, 189, 190, 191, 132, 187, 129,
- 130, 132, 133, 134, 176, 177, 178, 179,
- 180, 181, 182, 183, 128, 191, 128, 129,
- 130, 131, 132, 133, 134, 135, 144, 136,
- 143, 145, 191, 192, 255, 182, 183, 184,
- 128, 191, 128, 191, 191, 128, 190, 192,
- 255, 128, 146, 147, 148, 152, 153, 154,
- 155, 156, 158, 159, 160, 161, 162, 163,
- 164, 165, 166, 167, 168, 169, 170, 171,
- 172, 173, 174, 175, 176, 129, 191, 192,
- 255, 158, 159, 128, 157, 160, 191, 192,
- 255, 128, 191, 164, 169, 171, 172, 173,
- 174, 175, 180, 181, 182, 183, 184, 185,
- 187, 188, 189, 190, 191, 128, 163, 165,
- 186, 144, 145, 146, 147, 148, 150, 151,
- 152, 155, 157, 158, 160, 170, 171, 172,
- 175, 128, 159, 161, 169, 173, 191, 128,
- 191, 10, 13, 34, 36, 37, 92, 128,
- 191, 192, 223, 224, 239, 240, 247, 248,
- 255, 10, 13, 34, 92, 36, 37, 128,
- 191, 192, 223, 224, 239, 240, 247, 248,
- 255, 10, 13, 36, 123, 123, 126, 126,
- 37, 123, 126, 10, 13, 128, 191, 192,
- 223, 224, 239, 240, 247, 248, 255, 128,
- 191, 128, 191, 128, 191, 10, 13, 36,
- 37, 128, 191, 192, 223, 224, 239, 240,
- 247, 248, 255, 10, 13, 36, 37, 128,
- 191, 192, 223, 224, 239, 240, 247, 248,
- 255, 10, 13, 10, 13, 123, 10, 13,
- 126, 10, 13, 126, 126, 128, 191, 128,
- 191, 128, 191, 10, 13, 36, 37, 128,
- 191, 192, 223, 224, 239, 240, 247, 248,
- 255, 10, 13, 36, 37, 128, 191, 192,
- 223, 224, 239, 240, 247, 248, 255, 10,
- 13, 10, 13, 123, 10, 13, 126, 10,
- 13, 126, 126, 128, 191, 128, 191, 128,
- 191, 95, 194, 195, 198, 199, 203, 204,
- 205, 206, 207, 210, 212, 213, 214, 215,
- 216, 217, 219, 220, 221, 222, 223, 224,
- 225, 226, 227, 228, 233, 234, 237, 238,
- 239, 240, 65, 90, 97, 122, 128, 191,
- 192, 193, 196, 218, 229, 236, 241, 247,
- 248, 255, 45, 95, 194, 195, 198, 199,
- 203, 204, 205, 206, 207, 210, 212, 213,
- 214, 215, 216, 217, 219, 220, 221, 222,
- 223, 224, 225, 226, 227, 228, 233, 234,
- 237, 239, 240, 243, 48, 57, 65, 90,
- 97, 122, 196, 218, 229, 236, 128, 191,
- 170, 181, 186, 128, 191, 151, 183, 128,
- 255, 192, 255, 0, 127, 173, 130, 133,
- 146, 159, 165, 171, 175, 191, 192, 255,
- 181, 190, 128, 175, 176, 183, 184, 185,
- 186, 191, 134, 139, 141, 162, 128, 135,
- 136, 255, 182, 130, 137, 176, 151, 152,
- 154, 160, 136, 191, 192, 255, 128, 143,
- 144, 170, 171, 175, 176, 178, 179, 191,
- 128, 159, 160, 191, 176, 128, 138, 139,
- 173, 174, 255, 148, 150, 164, 167, 173,
- 176, 185, 189, 190, 192, 255, 144, 128,
- 145, 146, 175, 176, 191, 128, 140, 141,
- 255, 166, 176, 178, 191, 192, 255, 186,
- 128, 137, 138, 170, 171, 179, 180, 181,
- 182, 191, 160, 161, 162, 164, 165, 166,
- 167, 168, 169, 170, 171, 172, 173, 174,
- 175, 176, 177, 178, 179, 180, 181, 182,
- 183, 184, 185, 186, 187, 188, 189, 190,
- 128, 191, 128, 129, 130, 131, 137, 138,
- 139, 140, 141, 142, 143, 144, 153, 154,
- 155, 156, 157, 158, 159, 160, 161, 162,
- 163, 164, 165, 166, 167, 168, 169, 170,
- 171, 172, 173, 174, 175, 176, 177, 178,
- 179, 180, 182, 183, 184, 188, 189, 190,
- 191, 132, 187, 129, 130, 132, 133, 134,
- 176, 177, 178, 179, 180, 181, 182, 183,
- 128, 191, 128, 129, 130, 131, 132, 133,
- 134, 135, 144, 136, 143, 145, 191, 192,
- 255, 182, 183, 184, 128, 191, 128, 191,
- 191, 128, 190, 192, 255, 128, 146, 147,
- 148, 152, 153, 154, 155, 156, 158, 159,
- 160, 161, 162, 163, 164, 165, 166, 167,
- 168, 169, 170, 171, 172, 173, 174, 175,
- 176, 129, 191, 192, 255, 158, 159, 128,
- 157, 160, 191, 192, 255, 128, 191, 164,
- 169, 171, 172, 173, 174, 175, 180, 181,
- 182, 183, 184, 185, 187, 188, 189, 190,
- 191, 128, 163, 165, 186, 144, 145, 146,
- 147, 148, 150, 151, 152, 155, 157, 158,
- 160, 170, 171, 172, 175, 128, 159, 161,
- 169, 173, 191, 128, 191,
-}
-
-var _hcltok_single_lengths []byte = []byte{
- 0, 1, 1, 2, 3, 2, 0, 32,
- 31, 36, 1, 4, 0, 0, 0, 0,
- 1, 2, 1, 1, 1, 1, 0, 1,
- 1, 0, 0, 2, 0, 0, 0, 1,
- 32, 0, 0, 0, 0, 1, 3, 1,
- 1, 1, 0, 2, 0, 1, 1, 2,
- 0, 3, 0, 1, 0, 2, 1, 2,
- 0, 0, 5, 1, 4, 0, 0, 1,
- 43, 0, 0, 0, 2, 3, 2, 1,
- 1, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 1, 1, 0, 0,
- 0, 0, 0, 0, 0, 0, 4, 1,
- 0, 15, 0, 0, 0, 1, 6, 1,
- 0, 0, 1, 0, 2, 0, 0, 0,
- 9, 0, 1, 1, 0, 0, 0, 3,
- 0, 1, 0, 28, 0, 0, 0, 1,
- 0, 1, 0, 0, 0, 1, 0, 0,
- 0, 0, 0, 0, 0, 1, 0, 2,
- 0, 0, 18, 0, 0, 1, 0, 0,
- 0, 0, 0, 0, 0, 0, 1, 0,
- 0, 0, 16, 36, 0, 0, 0, 0,
- 1, 0, 0, 0, 0, 0, 1, 0,
- 0, 0, 0, 0, 0, 2, 0, 0,
- 0, 0, 0, 1, 0, 0, 0, 0,
- 0, 0, 0, 28, 0, 0, 0, 1,
- 1, 1, 1, 0, 0, 2, 0, 1,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 1, 1, 4, 0, 0, 2, 2,
- 0, 11, 0, 0, 0, 0, 0, 0,
- 0, 1, 1, 3, 0, 0, 4, 0,
- 0, 0, 18, 0, 0, 0, 1, 4,
- 1, 4, 1, 0, 3, 2, 2, 2,
- 1, 0, 0, 1, 8, 0, 0, 0,
- 4, 12, 0, 2, 0, 3, 0, 1,
- 0, 2, 0, 1, 2, 0, 3, 1,
- 2, 0, 0, 0, 0, 0, 1, 1,
- 0, 0, 1, 28, 3, 0, 1, 1,
- 2, 1, 0, 1, 1, 2, 1, 1,
- 2, 1, 1, 0, 2, 1, 1, 1,
- 1, 0, 0, 6, 1, 1, 0, 0,
- 46, 1, 1, 0, 0, 0, 0, 2,
- 1, 0, 0, 0, 1, 0, 0, 0,
- 0, 0, 0, 0, 13, 2, 0, 0,
- 0, 9, 0, 1, 28, 0, 1, 3,
- 0, 2, 0, 0, 0, 1, 0, 1,
- 1, 2, 0, 18, 2, 0, 0, 16,
- 35, 0, 0, 0, 1, 0, 28, 0,
- 0, 0, 0, 1, 0, 2, 0, 0,
- 1, 0, 0, 1, 0, 0, 1, 0,
- 0, 0, 0, 1, 11, 0, 0, 0,
- 0, 4, 0, 12, 1, 7, 0, 4,
- 0, 0, 0, 0, 1, 2, 1, 1,
- 1, 1, 0, 1, 1, 0, 0, 2,
- 0, 0, 0, 1, 32, 0, 0, 0,
- 0, 1, 3, 1, 1, 1, 0, 2,
- 0, 1, 1, 2, 0, 3, 0, 1,
- 0, 2, 1, 2, 0, 0, 5, 1,
- 4, 0, 0, 1, 43, 0, 0, 0,
- 2, 3, 2, 1, 1, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 1, 0, 0, 0, 0, 0, 0,
- 0, 0, 4, 1, 0, 15, 0, 0,
- 0, 1, 6, 1, 0, 0, 1, 0,
- 2, 0, 0, 0, 9, 0, 1, 1,
- 0, 0, 0, 3, 0, 1, 0, 28,
- 0, 0, 0, 1, 0, 1, 0, 0,
- 0, 1, 0, 0, 0, 0, 0, 0,
- 0, 1, 0, 2, 0, 0, 18, 0,
- 0, 1, 0, 0, 0, 0, 0, 0,
- 0, 0, 1, 0, 0, 0, 16, 36,
- 0, 0, 0, 0, 1, 0, 0, 0,
- 0, 0, 1, 0, 0, 0, 0, 0,
- 0, 2, 0, 0, 0, 0, 0, 1,
- 0, 0, 0, 0, 0, 0, 0, 28,
- 0, 0, 0, 1, 1, 1, 1, 0,
- 0, 2, 0, 1, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 1, 1, 4,
- 0, 0, 2, 2, 0, 11, 0, 0,
- 0, 0, 0, 0, 0, 1, 1, 3,
- 0, 0, 4, 0, 0, 0, 18, 0,
- 0, 0, 1, 4, 1, 4, 1, 0,
- 3, 2, 2, 2, 1, 0, 0, 1,
- 8, 0, 0, 0, 4, 12, 0, 2,
- 0, 3, 0, 1, 0, 2, 0, 1,
- 2, 0, 0, 3, 0, 1, 1, 1,
- 2, 2, 4, 1, 6, 2, 4, 2,
- 4, 1, 4, 0, 6, 1, 3, 1,
- 2, 0, 2, 11, 1, 1, 1, 0,
- 1, 1, 0, 2, 0, 3, 3, 2,
- 1, 0, 0, 0, 1, 0, 1, 0,
- 1, 1, 0, 2, 0, 0, 1, 0,
- 0, 0, 0, 0, 0, 0, 1, 0,
- 0, 0, 0, 0, 0, 0, 1, 0,
- 0, 0, 4, 3, 2, 2, 0, 6,
- 1, 0, 1, 1, 0, 2, 0, 4,
- 3, 0, 1, 1, 0, 0, 0, 0,
- 0, 0, 0, 1, 0, 0, 0, 1,
- 0, 3, 0, 2, 0, 0, 0, 3,
- 0, 2, 1, 1, 3, 1, 0, 0,
- 0, 0, 0, 5, 2, 0, 0, 0,
- 0, 0, 0, 1, 0, 0, 1, 1,
- 0, 0, 35, 4, 0, 0, 0, 0,
- 0, 0, 0, 1, 0, 0, 0, 0,
- 0, 0, 3, 0, 1, 0, 0, 3,
- 0, 0, 1, 0, 0, 0, 0, 28,
- 0, 0, 0, 0, 1, 0, 3, 1,
- 4, 0, 1, 0, 0, 1, 0, 0,
- 1, 0, 0, 0, 0, 1, 1, 0,
- 7, 0, 0, 2, 2, 0, 11, 0,
- 0, 0, 0, 0, 1, 1, 3, 0,
- 0, 4, 0, 0, 0, 12, 1, 4,
- 1, 5, 2, 0, 3, 2, 2, 2,
- 1, 7, 0, 7, 17, 3, 0, 2,
- 0, 3, 0, 0, 1, 0, 2, 0,
- 2, 0, 0, 0, 0, 0, 1, 0,
- 0, 0, 2, 2, 1, 0, 0, 0,
- 2, 2, 4, 0, 0, 0, 0, 1,
- 2, 1, 1, 1, 1, 0, 1, 1,
- 0, 0, 2, 0, 0, 0, 1, 32,
- 0, 0, 0, 0, 1, 3, 1, 1,
- 1, 0, 2, 0, 1, 1, 2, 0,
- 3, 0, 1, 0, 2, 1, 2, 0,
- 0, 5, 1, 4, 0, 0, 1, 43,
- 0, 0, 0, 2, 3, 2, 1, 1,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 1, 1, 0, 0, 0,
- 0, 0, 0, 0, 0, 4, 1, 0,
- 15, 0, 0, 0, 1, 6, 1, 0,
- 0, 1, 0, 2, 0, 0, 0, 9,
- 0, 1, 1, 0, 0, 0, 3, 0,
- 1, 0, 28, 0, 0, 0, 1, 0,
- 1, 0, 0, 0, 1, 0, 0, 0,
- 0, 0, 0, 0, 1, 0, 2, 0,
- 0, 18, 0, 0, 1, 0, 0, 0,
- 0, 0, 0, 0, 0, 1, 0, 0,
- 0, 16, 36, 0, 0, 0, 0, 1,
- 0, 0, 0, 0, 0, 1, 0, 0,
- 0, 0, 0, 0, 2, 0, 0, 0,
- 0, 0, 1, 0, 0, 0, 0, 0,
- 0, 0, 28, 0, 0, 0, 1, 1,
- 1, 1, 0, 0, 2, 0, 1, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 1, 4, 0, 0, 2, 2, 0,
- 11, 0, 0, 0, 0, 0, 0, 0,
- 1, 1, 3, 0, 0, 4, 0, 0,
- 0, 18, 0, 0, 0, 1, 4, 1,
- 4, 1, 0, 3, 2, 2, 2, 1,
- 0, 0, 1, 8, 0, 0, 0, 4,
- 12, 0, 2, 0, 3, 0, 1, 0,
- 2, 0, 1, 2, 0, 0, 3, 0,
- 1, 1, 1, 2, 2, 4, 1, 6,
- 2, 4, 2, 4, 1, 4, 0, 6,
- 1, 3, 1, 2, 0, 2, 11, 1,
- 1, 1, 0, 1, 1, 0, 2, 0,
- 3, 3, 2, 1, 0, 0, 0, 1,
- 0, 1, 0, 1, 1, 0, 2, 0,
- 0, 1, 0, 0, 0, 0, 0, 0,
- 0, 1, 0, 0, 0, 0, 0, 0,
- 0, 1, 0, 0, 0, 4, 3, 2,
- 2, 0, 6, 1, 0, 1, 1, 0,
- 2, 0, 4, 3, 0, 1, 1, 0,
- 0, 0, 0, 0, 0, 0, 1, 0,
- 0, 0, 1, 0, 3, 0, 2, 0,
- 0, 0, 3, 0, 2, 1, 1, 3,
- 1, 0, 0, 0, 0, 0, 5, 2,
- 0, 0, 0, 0, 0, 0, 1, 0,
- 0, 1, 1, 0, 0, 35, 4, 0,
- 0, 0, 0, 0, 0, 0, 1, 0,
- 0, 0, 0, 0, 0, 3, 0, 1,
- 0, 0, 3, 0, 0, 1, 0, 0,
- 0, 0, 28, 0, 0, 0, 0, 1,
- 0, 3, 1, 4, 0, 1, 0, 0,
- 1, 0, 0, 1, 0, 0, 0, 0,
- 1, 1, 0, 7, 0, 0, 2, 2,
- 0, 11, 0, 0, 0, 0, 0, 1,
- 1, 3, 0, 0, 4, 0, 0, 0,
- 12, 1, 4, 1, 5, 2, 0, 3,
- 2, 2, 2, 1, 7, 0, 7, 17,
- 3, 0, 2, 0, 3, 0, 0, 1,
- 0, 2, 0, 54, 2, 1, 1, 1,
- 1, 1, 2, 3, 1, 2, 2, 1,
- 34, 1, 1, 0, 3, 2, 0, 0,
- 0, 1, 2, 4, 1, 0, 1, 0,
- 0, 0, 0, 1, 1, 1, 0, 0,
- 1, 30, 47, 13, 9, 3, 0, 1,
- 28, 2, 0, 18, 16, 0, 6, 4,
- 2, 2, 0, 1, 1, 1, 2, 1,
- 2, 0, 0, 0, 4, 2, 2, 3,
- 3, 2, 1, 1, 0, 0, 0, 4,
- 2, 2, 3, 3, 2, 1, 1, 0,
- 0, 0, 33, 34, 0, 3, 2, 0,
- 0, 0, 1, 2, 4, 1, 0, 1,
- 0, 0, 0, 0, 1, 1, 1, 0,
- 0, 1, 30, 47, 13, 9, 3, 0,
- 1, 28, 2, 0, 18, 16, 0,
-}
-
-var _hcltok_range_lengths []byte = []byte{
- 0, 0, 0, 0, 1, 1, 1, 5,
- 5, 5, 0, 0, 3, 0, 1, 1,
- 4, 2, 3, 0, 1, 0, 2, 2,
- 4, 2, 2, 3, 1, 1, 1, 1,
- 0, 1, 1, 2, 2, 1, 4, 6,
- 9, 6, 8, 5, 8, 7, 10, 4,
- 6, 4, 7, 7, 5, 5, 4, 5,
- 1, 2, 8, 4, 3, 3, 3, 0,
- 3, 1, 2, 1, 2, 2, 3, 3,
- 1, 3, 2, 2, 1, 2, 2, 2,
- 3, 4, 4, 3, 1, 2, 1, 3,
- 2, 2, 2, 2, 2, 3, 3, 1,
- 1, 2, 1, 3, 2, 2, 3, 2,
- 7, 0, 1, 4, 1, 2, 4, 2,
- 1, 2, 0, 2, 2, 3, 5, 5,
- 1, 4, 1, 1, 2, 2, 1, 0,
- 0, 1, 1, 1, 1, 1, 2, 2,
- 2, 2, 1, 1, 1, 4, 2, 2,
- 3, 1, 4, 4, 6, 1, 3, 1,
- 1, 2, 1, 1, 1, 5, 3, 1,
- 1, 1, 2, 3, 3, 1, 2, 2,
- 1, 4, 1, 2, 5, 2, 1, 1,
- 0, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 1, 1, 2, 4, 2, 1,
- 2, 2, 2, 6, 1, 1, 2, 1,
- 2, 1, 1, 1, 2, 2, 2, 1,
- 3, 2, 5, 2, 8, 6, 2, 2,
- 2, 2, 3, 1, 3, 1, 2, 1,
- 3, 2, 2, 3, 1, 1, 1, 1,
- 1, 1, 1, 2, 2, 4, 1, 2,
- 1, 0, 1, 1, 1, 1, 0, 1,
- 2, 3, 1, 3, 3, 1, 0, 3,
- 0, 2, 3, 1, 0, 0, 0, 0,
- 2, 2, 2, 2, 1, 5, 2, 2,
- 5, 7, 5, 0, 1, 0, 1, 1,
- 1, 1, 1, 0, 1, 1, 0, 3,
- 3, 1, 1, 2, 1, 3, 5, 1,
- 1, 2, 2, 1, 1, 1, 1, 2,
- 6, 3, 7, 2, 6, 1, 6, 2,
- 8, 0, 4, 2, 5, 2, 3, 3,
- 3, 1, 2, 8, 2, 0, 2, 1,
- 2, 1, 5, 2, 1, 3, 3, 0,
- 2, 1, 2, 1, 0, 1, 1, 3,
- 1, 1, 2, 3, 0, 0, 3, 2,
- 4, 1, 4, 1, 1, 3, 1, 1,
- 1, 1, 2, 2, 1, 3, 1, 4,
- 3, 3, 1, 1, 5, 2, 1, 1,
- 2, 1, 2, 1, 3, 2, 0, 1,
- 1, 1, 1, 1, 1, 1, 2, 1,
- 1, 1, 1, 1, 1, 1, 0, 1,
- 1, 2, 2, 1, 1, 1, 3, 2,
- 1, 0, 2, 1, 1, 1, 1, 0,
- 3, 0, 1, 1, 4, 2, 3, 0,
- 1, 0, 2, 2, 4, 2, 2, 3,
- 1, 1, 1, 1, 0, 1, 1, 2,
- 2, 1, 4, 6, 9, 6, 8, 5,
- 8, 7, 10, 4, 6, 4, 7, 7,
- 5, 5, 4, 5, 1, 2, 8, 4,
- 3, 3, 3, 0, 3, 1, 2, 1,
- 2, 2, 3, 3, 1, 3, 2, 2,
- 1, 2, 2, 2, 3, 4, 4, 3,
- 1, 2, 1, 3, 2, 2, 2, 2,
- 2, 3, 3, 1, 1, 2, 1, 3,
- 2, 2, 3, 2, 7, 0, 1, 4,
- 1, 2, 4, 2, 1, 2, 0, 2,
- 2, 3, 5, 5, 1, 4, 1, 1,
- 2, 2, 1, 0, 0, 1, 1, 1,
- 1, 1, 2, 2, 2, 2, 1, 1,
- 1, 4, 2, 2, 3, 1, 4, 4,
- 6, 1, 3, 1, 1, 2, 1, 1,
- 1, 5, 3, 1, 1, 1, 2, 3,
- 3, 1, 2, 2, 1, 4, 1, 2,
- 5, 2, 1, 1, 0, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 1, 1,
- 2, 4, 2, 1, 2, 2, 2, 6,
- 1, 1, 2, 1, 2, 1, 1, 1,
- 2, 2, 2, 1, 3, 2, 5, 2,
- 8, 6, 2, 2, 2, 2, 3, 1,
- 3, 1, 2, 1, 3, 2, 2, 3,
- 1, 1, 1, 1, 1, 1, 1, 2,
- 2, 4, 1, 2, 1, 0, 1, 1,
- 1, 1, 0, 1, 2, 3, 1, 3,
- 3, 1, 0, 3, 0, 2, 3, 1,
- 0, 0, 0, 0, 2, 2, 2, 2,
- 1, 5, 2, 2, 5, 7, 5, 0,
- 1, 0, 1, 1, 1, 1, 1, 0,
- 1, 1, 1, 2, 2, 3, 3, 4,
- 7, 5, 7, 5, 3, 3, 7, 3,
- 13, 1, 3, 5, 3, 5, 3, 6,
- 5, 2, 2, 8, 4, 1, 2, 3,
- 2, 10, 2, 2, 0, 2, 3, 3,
- 1, 2, 3, 3, 1, 2, 3, 3,
- 4, 4, 2, 1, 2, 2, 3, 2,
- 2, 5, 3, 2, 3, 2, 1, 3,
- 3, 6, 2, 2, 5, 2, 5, 1,
- 1, 2, 4, 1, 11, 1, 3, 8,
- 4, 2, 1, 0, 4, 3, 3, 3,
- 2, 9, 1, 1, 4, 3, 2, 2,
- 2, 3, 4, 2, 3, 2, 4, 3,
- 2, 2, 3, 3, 4, 3, 3, 4,
- 2, 5, 4, 8, 7, 1, 2, 1,
- 3, 1, 2, 5, 1, 2, 2, 2,
- 2, 1, 3, 2, 2, 3, 3, 1,
- 9, 1, 5, 1, 3, 2, 2, 3,
- 2, 3, 3, 3, 1, 3, 3, 2,
- 2, 4, 5, 3, 3, 4, 3, 3,
- 3, 2, 2, 2, 4, 2, 2, 1,
- 3, 3, 3, 3, 3, 3, 2, 2,
- 3, 2, 3, 3, 2, 3, 2, 3,
- 1, 2, 2, 2, 2, 2, 2, 2,
- 2, 2, 2, 2, 3, 2, 3, 2,
- 3, 5, 3, 3, 1, 2, 3, 2,
- 2, 1, 2, 3, 4, 3, 0, 3,
- 0, 2, 3, 1, 0, 0, 0, 0,
- 2, 3, 2, 4, 6, 4, 1, 1,
- 2, 1, 2, 1, 3, 2, 3, 2,
- 5, 1, 1, 1, 1, 1, 0, 1,
- 1, 1, 0, 0, 0, 1, 1, 1,
- 0, 0, 0, 3, 0, 1, 1, 4,
- 2, 3, 0, 1, 0, 2, 2, 4,
- 2, 2, 3, 1, 1, 1, 1, 0,
- 1, 1, 2, 2, 1, 4, 6, 9,
- 6, 8, 5, 8, 7, 10, 4, 6,
- 4, 7, 7, 5, 5, 4, 5, 1,
- 2, 8, 4, 3, 3, 3, 0, 3,
- 1, 2, 1, 2, 2, 3, 3, 1,
- 3, 2, 2, 1, 2, 2, 2, 3,
- 4, 4, 3, 1, 2, 1, 3, 2,
- 2, 2, 2, 2, 3, 3, 1, 1,
- 2, 1, 3, 2, 2, 3, 2, 7,
- 0, 1, 4, 1, 2, 4, 2, 1,
- 2, 0, 2, 2, 3, 5, 5, 1,
- 4, 1, 1, 2, 2, 1, 0, 0,
- 1, 1, 1, 1, 1, 2, 2, 2,
- 2, 1, 1, 1, 4, 2, 2, 3,
- 1, 4, 4, 6, 1, 3, 1, 1,
- 2, 1, 1, 1, 5, 3, 1, 1,
- 1, 2, 3, 3, 1, 2, 2, 1,
- 4, 1, 2, 5, 2, 1, 1, 0,
- 2, 2, 2, 2, 2, 2, 2, 2,
- 2, 1, 1, 2, 4, 2, 1, 2,
- 2, 2, 6, 1, 1, 2, 1, 2,
- 1, 1, 1, 2, 2, 2, 1, 3,
- 2, 5, 2, 8, 6, 2, 2, 2,
- 2, 3, 1, 3, 1, 2, 1, 3,
- 2, 2, 3, 1, 1, 1, 1, 1,
- 1, 1, 2, 2, 4, 1, 2, 1,
- 0, 1, 1, 1, 1, 0, 1, 2,
- 3, 1, 3, 3, 1, 0, 3, 0,
- 2, 3, 1, 0, 0, 0, 0, 2,
- 2, 2, 2, 1, 5, 2, 2, 5,
- 7, 5, 0, 1, 0, 1, 1, 1,
- 1, 1, 0, 1, 1, 1, 2, 2,
- 3, 3, 4, 7, 5, 7, 5, 3,
- 3, 7, 3, 13, 1, 3, 5, 3,
- 5, 3, 6, 5, 2, 2, 8, 4,
- 1, 2, 3, 2, 10, 2, 2, 0,
- 2, 3, 3, 1, 2, 3, 3, 1,
- 2, 3, 3, 4, 4, 2, 1, 2,
- 2, 3, 2, 2, 5, 3, 2, 3,
- 2, 1, 3, 3, 6, 2, 2, 5,
- 2, 5, 1, 1, 2, 4, 1, 11,
- 1, 3, 8, 4, 2, 1, 0, 4,
- 3, 3, 3, 2, 9, 1, 1, 4,
- 3, 2, 2, 2, 3, 4, 2, 3,
- 2, 4, 3, 2, 2, 3, 3, 4,
- 3, 3, 4, 2, 5, 4, 8, 7,
- 1, 2, 1, 3, 1, 2, 5, 1,
- 2, 2, 2, 2, 1, 3, 2, 2,
- 3, 3, 1, 9, 1, 5, 1, 3,
- 2, 2, 3, 2, 3, 3, 3, 1,
- 3, 3, 2, 2, 4, 5, 3, 3,
- 4, 3, 3, 3, 2, 2, 2, 4,
- 2, 2, 1, 3, 3, 3, 3, 3,
- 3, 2, 2, 3, 2, 3, 3, 2,
- 3, 2, 3, 1, 2, 2, 2, 2,
- 2, 2, 2, 2, 2, 2, 2, 3,
- 2, 3, 2, 3, 5, 3, 3, 1,
- 2, 3, 2, 2, 1, 2, 3, 4,
- 3, 0, 3, 0, 2, 3, 1, 0,
- 0, 0, 0, 2, 3, 2, 4, 6,
- 4, 1, 1, 2, 1, 2, 1, 3,
- 2, 3, 2, 11, 0, 0, 0, 0,
- 0, 0, 0, 1, 0, 0, 0, 0,
- 5, 0, 0, 1, 1, 1, 0, 1,
- 1, 5, 4, 2, 0, 1, 0, 2,
- 2, 5, 2, 3, 5, 3, 2, 3,
- 5, 1, 1, 1, 3, 1, 1, 2,
- 2, 3, 1, 2, 3, 1, 5, 6,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 5, 1, 1, 1, 5, 6, 0, 0,
- 0, 0, 0, 0, 1, 1, 1, 5,
- 6, 0, 0, 0, 0, 0, 0, 1,
- 1, 1, 8, 5, 1, 1, 1, 0,
- 1, 1, 5, 4, 2, 0, 1, 0,
- 2, 2, 5, 2, 3, 5, 3, 2,
- 3, 5, 1, 1, 1, 3, 1, 1,
- 2, 2, 3, 1, 2, 3, 1,
-}
-
-var _hcltok_index_offsets []int16 = []int16{
- 0, 0, 2, 4, 7, 12, 16, 18,
- 56, 93, 135, 137, 142, 146, 147, 149,
- 151, 157, 162, 167, 169, 172, 174, 177,
- 181, 187, 190, 193, 199, 201, 203, 205,
- 208, 241, 243, 245, 248, 251, 254, 262,
- 270, 281, 289, 298, 306, 315, 324, 336,
- 343, 350, 358, 366, 375, 381, 389, 395,
- 403, 405, 408, 422, 428, 436, 440, 444,
- 446, 493, 495, 498, 500, 505, 511, 517,
- 522, 525, 529, 532, 535, 537, 540, 543,
- 546, 550, 555, 560, 564, 566, 569, 571,
- 575, 578, 581, 584, 587, 591, 596, 600,
- 602, 604, 607, 609, 613, 616, 619, 627,
- 631, 639, 655, 657, 662, 664, 668, 679,
- 683, 685, 688, 690, 693, 698, 702, 708,
- 714, 725, 730, 733, 736, 739, 742, 744,
- 748, 749, 752, 754, 784, 786, 788, 791,
- 795, 798, 802, 804, 806, 808, 814, 817,
- 820, 824, 826, 831, 836, 843, 846, 850,
- 854, 856, 859, 879, 881, 883, 890, 894,
- 896, 898, 900, 903, 907, 911, 913, 917,
- 920, 922, 927, 945, 984, 990, 993, 995,
- 997, 999, 1002, 1005, 1008, 1011, 1014, 1018,
- 1021, 1024, 1027, 1029, 1031, 1034, 1041, 1044,
- 1046, 1049, 1052, 1055, 1063, 1065, 1067, 1070,
- 1072, 1075, 1077, 1079, 1109, 1112, 1115, 1118,
- 1121, 1126, 1130, 1137, 1140, 1149, 1158, 1161,
- 1165, 1168, 1171, 1175, 1177, 1181, 1183, 1186,
- 1188, 1192, 1196, 1200, 1208, 1210, 1212, 1216,
- 1220, 1222, 1235, 1237, 1240, 1243, 1248, 1250,
- 1253, 1255, 1257, 1260, 1265, 1267, 1269, 1274,
- 1276, 1279, 1283, 1303, 1307, 1311, 1313, 1315,
- 1323, 1325, 1332, 1337, 1339, 1343, 1346, 1349,
- 1352, 1356, 1359, 1362, 1366, 1376, 1382, 1385,
- 1388, 1398, 1418, 1424, 1427, 1429, 1433, 1435,
- 1438, 1440, 1444, 1446, 1448, 1452, 1454, 1458,
- 1463, 1469, 1471, 1473, 1476, 1478, 1482, 1489,
- 1492, 1494, 1497, 1501, 1531, 1536, 1538, 1541,
- 1545, 1554, 1559, 1567, 1571, 1579, 1583, 1591,
- 1595, 1606, 1608, 1614, 1617, 1625, 1629, 1634,
- 1639, 1644, 1646, 1649, 1664, 1668, 1670, 1673,
- 1675, 1724, 1727, 1734, 1737, 1739, 1743, 1747,
- 1750, 1754, 1756, 1759, 1761, 1763, 1765, 1767,
- 1771, 1773, 1775, 1778, 1782, 1796, 1799, 1803,
- 1806, 1811, 1822, 1827, 1830, 1860, 1864, 1867,
- 1872, 1874, 1878, 1881, 1884, 1886, 1891, 1893,
- 1899, 1904, 1910, 1912, 1932, 1940, 1943, 1945,
- 1963, 2001, 2003, 2006, 2008, 2013, 2016, 2045,
- 2047, 2049, 2051, 2053, 2056, 2058, 2062, 2065,
- 2067, 2070, 2072, 2074, 2077, 2079, 2081, 2083,
- 2085, 2087, 2090, 2093, 2096, 2109, 2111, 2115,
- 2118, 2120, 2125, 2128, 2142, 2145, 2154, 2156,
- 2161, 2165, 2166, 2168, 2170, 2176, 2181, 2186,
- 2188, 2191, 2193, 2196, 2200, 2206, 2209, 2212,
- 2218, 2220, 2222, 2224, 2227, 2260, 2262, 2264,
- 2267, 2270, 2273, 2281, 2289, 2300, 2308, 2317,
- 2325, 2334, 2343, 2355, 2362, 2369, 2377, 2385,
- 2394, 2400, 2408, 2414, 2422, 2424, 2427, 2441,
- 2447, 2455, 2459, 2463, 2465, 2512, 2514, 2517,
- 2519, 2524, 2530, 2536, 2541, 2544, 2548, 2551,
- 2554, 2556, 2559, 2562, 2565, 2569, 2574, 2579,
- 2583, 2585, 2588, 2590, 2594, 2597, 2600, 2603,
- 2606, 2610, 2615, 2619, 2621, 2623, 2626, 2628,
- 2632, 2635, 2638, 2646, 2650, 2658, 2674, 2676,
- 2681, 2683, 2687, 2698, 2702, 2704, 2707, 2709,
- 2712, 2717, 2721, 2727, 2733, 2744, 2749, 2752,
- 2755, 2758, 2761, 2763, 2767, 2768, 2771, 2773,
- 2803, 2805, 2807, 2810, 2814, 2817, 2821, 2823,
- 2825, 2827, 2833, 2836, 2839, 2843, 2845, 2850,
- 2855, 2862, 2865, 2869, 2873, 2875, 2878, 2898,
- 2900, 2902, 2909, 2913, 2915, 2917, 2919, 2922,
- 2926, 2930, 2932, 2936, 2939, 2941, 2946, 2964,
- 3003, 3009, 3012, 3014, 3016, 3018, 3021, 3024,
- 3027, 3030, 3033, 3037, 3040, 3043, 3046, 3048,
- 3050, 3053, 3060, 3063, 3065, 3068, 3071, 3074,
- 3082, 3084, 3086, 3089, 3091, 3094, 3096, 3098,
- 3128, 3131, 3134, 3137, 3140, 3145, 3149, 3156,
- 3159, 3168, 3177, 3180, 3184, 3187, 3190, 3194,
- 3196, 3200, 3202, 3205, 3207, 3211, 3215, 3219,
- 3227, 3229, 3231, 3235, 3239, 3241, 3254, 3256,
- 3259, 3262, 3267, 3269, 3272, 3274, 3276, 3279,
- 3284, 3286, 3288, 3293, 3295, 3298, 3302, 3322,
- 3326, 3330, 3332, 3334, 3342, 3344, 3351, 3356,
- 3358, 3362, 3365, 3368, 3371, 3375, 3378, 3381,
- 3385, 3395, 3401, 3404, 3407, 3417, 3437, 3443,
- 3446, 3448, 3452, 3454, 3457, 3459, 3463, 3465,
- 3467, 3471, 3473, 3475, 3481, 3484, 3489, 3494,
- 3500, 3510, 3518, 3530, 3537, 3547, 3553, 3565,
- 3571, 3589, 3592, 3600, 3606, 3616, 3623, 3630,
- 3638, 3646, 3649, 3654, 3674, 3680, 3683, 3687,
- 3691, 3695, 3707, 3710, 3715, 3716, 3722, 3729,
- 3735, 3738, 3741, 3745, 3749, 3752, 3755, 3760,
- 3764, 3770, 3776, 3779, 3783, 3786, 3789, 3794,
- 3797, 3800, 3806, 3810, 3813, 3817, 3820, 3823,
- 3827, 3831, 3838, 3841, 3844, 3850, 3853, 3860,
- 3862, 3864, 3867, 3876, 3881, 3895, 3899, 3903,
- 3918, 3924, 3927, 3930, 3932, 3937, 3943, 3947,
- 3955, 3961, 3971, 3974, 3977, 3982, 3986, 3989,
- 3992, 3995, 3999, 4004, 4008, 4012, 4015, 4020,
- 4025, 4028, 4034, 4038, 4044, 4049, 4053, 4057,
- 4065, 4068, 4076, 4082, 4092, 4103, 4106, 4109,
- 4111, 4115, 4117, 4120, 4131, 4135, 4138, 4141,
- 4144, 4147, 4149, 4153, 4157, 4160, 4164, 4169,
- 4172, 4182, 4184, 4225, 4231, 4235, 4238, 4241,
- 4245, 4248, 4252, 4256, 4261, 4263, 4267, 4271,
- 4274, 4277, 4282, 4291, 4295, 4300, 4305, 4309,
- 4316, 4320, 4323, 4327, 4330, 4335, 4338, 4341,
- 4371, 4375, 4379, 4383, 4387, 4392, 4396, 4402,
- 4406, 4414, 4417, 4422, 4426, 4429, 4434, 4437,
- 4441, 4444, 4447, 4450, 4453, 4456, 4460, 4464,
- 4467, 4477, 4480, 4483, 4488, 4494, 4497, 4512,
- 4515, 4519, 4525, 4529, 4533, 4536, 4540, 4547,
- 4550, 4553, 4559, 4562, 4566, 4571, 4587, 4589,
- 4597, 4599, 4607, 4613, 4615, 4619, 4622, 4625,
- 4628, 4632, 4643, 4646, 4658, 4682, 4690, 4692,
- 4696, 4699, 4704, 4707, 4709, 4714, 4717, 4723,
- 4726, 4734, 4736, 4738, 4740, 4742, 4744, 4746,
- 4748, 4750, 4752, 4755, 4758, 4760, 4762, 4764,
- 4766, 4769, 4772, 4777, 4781, 4782, 4784, 4786,
- 4792, 4797, 4802, 4804, 4807, 4809, 4812, 4816,
- 4822, 4825, 4828, 4834, 4836, 4838, 4840, 4843,
- 4876, 4878, 4880, 4883, 4886, 4889, 4897, 4905,
- 4916, 4924, 4933, 4941, 4950, 4959, 4971, 4978,
- 4985, 4993, 5001, 5010, 5016, 5024, 5030, 5038,
- 5040, 5043, 5057, 5063, 5071, 5075, 5079, 5081,
- 5128, 5130, 5133, 5135, 5140, 5146, 5152, 5157,
- 5160, 5164, 5167, 5170, 5172, 5175, 5178, 5181,
- 5185, 5190, 5195, 5199, 5201, 5204, 5206, 5210,
- 5213, 5216, 5219, 5222, 5226, 5231, 5235, 5237,
- 5239, 5242, 5244, 5248, 5251, 5254, 5262, 5266,
- 5274, 5290, 5292, 5297, 5299, 5303, 5314, 5318,
- 5320, 5323, 5325, 5328, 5333, 5337, 5343, 5349,
- 5360, 5365, 5368, 5371, 5374, 5377, 5379, 5383,
- 5384, 5387, 5389, 5419, 5421, 5423, 5426, 5430,
- 5433, 5437, 5439, 5441, 5443, 5449, 5452, 5455,
- 5459, 5461, 5466, 5471, 5478, 5481, 5485, 5489,
- 5491, 5494, 5514, 5516, 5518, 5525, 5529, 5531,
- 5533, 5535, 5538, 5542, 5546, 5548, 5552, 5555,
- 5557, 5562, 5580, 5619, 5625, 5628, 5630, 5632,
- 5634, 5637, 5640, 5643, 5646, 5649, 5653, 5656,
- 5659, 5662, 5664, 5666, 5669, 5676, 5679, 5681,
- 5684, 5687, 5690, 5698, 5700, 5702, 5705, 5707,
- 5710, 5712, 5714, 5744, 5747, 5750, 5753, 5756,
- 5761, 5765, 5772, 5775, 5784, 5793, 5796, 5800,
- 5803, 5806, 5810, 5812, 5816, 5818, 5821, 5823,
- 5827, 5831, 5835, 5843, 5845, 5847, 5851, 5855,
- 5857, 5870, 5872, 5875, 5878, 5883, 5885, 5888,
- 5890, 5892, 5895, 5900, 5902, 5904, 5909, 5911,
- 5914, 5918, 5938, 5942, 5946, 5948, 5950, 5958,
- 5960, 5967, 5972, 5974, 5978, 5981, 5984, 5987,
- 5991, 5994, 5997, 6001, 6011, 6017, 6020, 6023,
- 6033, 6053, 6059, 6062, 6064, 6068, 6070, 6073,
- 6075, 6079, 6081, 6083, 6087, 6089, 6091, 6097,
- 6100, 6105, 6110, 6116, 6126, 6134, 6146, 6153,
- 6163, 6169, 6181, 6187, 6205, 6208, 6216, 6222,
- 6232, 6239, 6246, 6254, 6262, 6265, 6270, 6290,
- 6296, 6299, 6303, 6307, 6311, 6323, 6326, 6331,
- 6332, 6338, 6345, 6351, 6354, 6357, 6361, 6365,
- 6368, 6371, 6376, 6380, 6386, 6392, 6395, 6399,
- 6402, 6405, 6410, 6413, 6416, 6422, 6426, 6429,
- 6433, 6436, 6439, 6443, 6447, 6454, 6457, 6460,
- 6466, 6469, 6476, 6478, 6480, 6483, 6492, 6497,
- 6511, 6515, 6519, 6534, 6540, 6543, 6546, 6548,
- 6553, 6559, 6563, 6571, 6577, 6587, 6590, 6593,
- 6598, 6602, 6605, 6608, 6611, 6615, 6620, 6624,
- 6628, 6631, 6636, 6641, 6644, 6650, 6654, 6660,
- 6665, 6669, 6673, 6681, 6684, 6692, 6698, 6708,
- 6719, 6722, 6725, 6727, 6731, 6733, 6736, 6747,
- 6751, 6754, 6757, 6760, 6763, 6765, 6769, 6773,
- 6776, 6780, 6785, 6788, 6798, 6800, 6841, 6847,
- 6851, 6854, 6857, 6861, 6864, 6868, 6872, 6877,
- 6879, 6883, 6887, 6890, 6893, 6898, 6907, 6911,
- 6916, 6921, 6925, 6932, 6936, 6939, 6943, 6946,
- 6951, 6954, 6957, 6987, 6991, 6995, 6999, 7003,
- 7008, 7012, 7018, 7022, 7030, 7033, 7038, 7042,
- 7045, 7050, 7053, 7057, 7060, 7063, 7066, 7069,
- 7072, 7076, 7080, 7083, 7093, 7096, 7099, 7104,
- 7110, 7113, 7128, 7131, 7135, 7141, 7145, 7149,
- 7152, 7156, 7163, 7166, 7169, 7175, 7178, 7182,
- 7187, 7203, 7205, 7213, 7215, 7223, 7229, 7231,
- 7235, 7238, 7241, 7244, 7248, 7259, 7262, 7274,
- 7298, 7306, 7308, 7312, 7315, 7320, 7323, 7325,
- 7330, 7333, 7339, 7342, 7408, 7411, 7413, 7415,
- 7417, 7419, 7421, 7424, 7429, 7431, 7434, 7437,
- 7439, 7479, 7481, 7483, 7485, 7490, 7494, 7495,
- 7497, 7499, 7506, 7513, 7520, 7522, 7524, 7526,
- 7529, 7532, 7538, 7541, 7546, 7553, 7558, 7561,
- 7565, 7572, 7604, 7653, 7668, 7681, 7686, 7688,
- 7692, 7723, 7729, 7731, 7752, 7772, 7774, 7786,
- 7797, 7800, 7803, 7804, 7806, 7808, 7810, 7813,
- 7815, 7823, 7825, 7827, 7829, 7839, 7848, 7851,
- 7855, 7859, 7862, 7864, 7866, 7868, 7870, 7872,
- 7882, 7891, 7894, 7898, 7902, 7905, 7907, 7909,
- 7911, 7913, 7915, 7957, 7997, 7999, 8004, 8008,
- 8009, 8011, 8013, 8020, 8027, 8034, 8036, 8038,
- 8040, 8043, 8046, 8052, 8055, 8060, 8067, 8072,
- 8075, 8079, 8086, 8118, 8167, 8182, 8195, 8200,
- 8202, 8206, 8237, 8243, 8245, 8266, 8286,
-}
-
-var _hcltok_indicies []int16 = []int16{
- 1, 0, 3, 2, 3, 4, 2, 6,
- 8, 8, 7, 5, 9, 9, 7, 5,
- 7, 5, 10, 11, 12, 13, 15, 16,
- 17, 18, 19, 20, 21, 22, 23, 24,
- 25, 26, 27, 28, 29, 30, 31, 32,
- 33, 34, 35, 36, 37, 39, 40, 41,
- 42, 43, 11, 11, 14, 14, 38, 0,
- 11, 12, 13, 15, 16, 17, 18, 19,
- 20, 21, 22, 23, 24, 25, 26, 27,
- 28, 29, 30, 31, 32, 33, 34, 35,
- 36, 37, 39, 40, 41, 42, 43, 11,
- 11, 14, 14, 38, 0, 44, 45, 11,
- 11, 46, 13, 15, 16, 17, 16, 47,
- 48, 20, 49, 22, 23, 50, 51, 52,
- 53, 54, 55, 56, 57, 58, 59, 60,
- 61, 62, 37, 39, 63, 41, 64, 65,
- 66, 11, 11, 11, 14, 38, 0, 44,
- 0, 11, 11, 11, 11, 0, 11, 11,
- 11, 0, 11, 0, 11, 0, 11, 0,
- 0, 0, 0, 0, 11, 0, 0, 0,
- 0, 11, 11, 11, 11, 11, 0, 0,
- 11, 0, 0, 11, 0, 11, 0, 0,
- 11, 0, 0, 0, 11, 11, 11, 11,
- 11, 11, 0, 11, 11, 0, 11, 11,
- 0, 0, 0, 0, 0, 0, 11, 11,
- 0, 0, 11, 0, 11, 11, 11, 0,
- 67, 68, 69, 70, 14, 71, 72, 73,
- 74, 75, 76, 77, 78, 79, 80, 81,
- 82, 83, 84, 85, 86, 87, 88, 89,
- 90, 91, 92, 93, 94, 95, 96, 97,
- 0, 11, 0, 11, 0, 11, 11, 0,
- 11, 11, 0, 0, 0, 11, 0, 0,
- 0, 0, 0, 0, 0, 11, 0, 0,
- 0, 0, 0, 0, 0, 11, 11, 11,
- 11, 11, 11, 11, 11, 11, 11, 11,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 0, 0, 0, 0, 0, 0, 0,
- 0, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 0, 11, 11, 11, 11, 11,
- 11, 11, 11, 0, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 11, 11, 0,
- 11, 11, 11, 11, 11, 11, 0, 11,
- 11, 11, 11, 11, 11, 0, 0, 0,
- 0, 0, 0, 0, 0, 11, 11, 11,
- 11, 11, 11, 11, 11, 0, 11, 11,
- 11, 11, 11, 11, 11, 11, 0, 11,
- 11, 11, 11, 11, 0, 0, 0, 0,
- 0, 0, 0, 0, 11, 11, 11, 11,
- 11, 11, 0, 11, 11, 11, 11, 11,
- 11, 11, 0, 11, 0, 11, 11, 0,
- 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 0, 11, 11,
- 11, 11, 11, 0, 11, 11, 11, 11,
- 11, 11, 11, 0, 11, 11, 11, 0,
- 11, 11, 11, 0, 11, 0, 98, 99,
- 100, 101, 102, 103, 104, 105, 106, 107,
- 108, 109, 110, 111, 112, 113, 114, 16,
- 115, 116, 117, 118, 119, 120, 121, 122,
- 123, 124, 125, 126, 127, 128, 129, 130,
- 131, 132, 14, 15, 133, 134, 135, 136,
- 137, 14, 16, 14, 0, 11, 0, 11,
- 11, 0, 0, 11, 0, 0, 0, 0,
- 11, 0, 0, 0, 0, 0, 11, 0,
- 0, 0, 0, 0, 11, 11, 11, 11,
- 11, 0, 0, 0, 11, 0, 0, 0,
- 11, 11, 11, 0, 0, 0, 11, 11,
- 0, 0, 0, 11, 11, 11, 0, 0,
- 0, 11, 11, 11, 11, 0, 11, 11,
- 11, 11, 0, 0, 0, 0, 0, 11,
- 11, 11, 11, 0, 0, 11, 11, 11,
- 0, 0, 11, 11, 11, 11, 0, 11,
- 11, 0, 11, 11, 0, 0, 0, 11,
- 11, 11, 0, 0, 0, 0, 11, 11,
- 11, 11, 11, 0, 0, 0, 0, 11,
- 0, 11, 11, 0, 11, 11, 0, 11,
- 0, 11, 11, 11, 0, 11, 11, 0,
- 0, 0, 11, 0, 0, 0, 0, 0,
- 0, 0, 11, 11, 11, 11, 0, 11,
- 11, 11, 11, 11, 11, 11, 0, 138,
- 139, 140, 141, 142, 143, 144, 145, 146,
- 14, 147, 148, 149, 150, 151, 0, 11,
- 0, 0, 0, 0, 0, 11, 11, 0,
- 11, 11, 11, 0, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 11, 0, 11,
- 11, 11, 0, 0, 11, 11, 11, 0,
- 0, 11, 0, 0, 11, 11, 11, 11,
- 11, 0, 0, 0, 0, 11, 11, 11,
- 11, 11, 11, 0, 11, 11, 11, 11,
- 11, 0, 152, 109, 153, 154, 155, 14,
- 156, 157, 16, 14, 0, 11, 11, 11,
- 11, 0, 0, 0, 11, 0, 0, 11,
- 11, 11, 0, 0, 0, 11, 11, 0,
- 119, 0, 16, 14, 14, 158, 0, 14,
- 0, 11, 16, 159, 160, 16, 161, 162,
- 16, 57, 163, 164, 165, 166, 167, 16,
- 168, 169, 170, 16, 171, 172, 173, 15,
- 174, 175, 176, 15, 177, 16, 14, 0,
- 0, 11, 11, 0, 0, 0, 11, 11,
- 11, 11, 0, 11, 11, 0, 0, 0,
- 0, 11, 11, 0, 0, 11, 11, 0,
- 0, 0, 0, 0, 0, 11, 11, 11,
- 0, 0, 0, 11, 0, 0, 0, 11,
- 11, 0, 11, 11, 11, 11, 0, 11,
- 11, 11, 11, 0, 11, 11, 11, 11,
- 11, 11, 0, 0, 0, 11, 11, 11,
- 11, 0, 178, 179, 0, 14, 0, 11,
- 0, 0, 11, 16, 180, 181, 182, 183,
- 57, 184, 185, 55, 186, 187, 188, 189,
- 190, 191, 192, 193, 194, 14, 0, 0,
- 11, 0, 11, 11, 11, 11, 11, 11,
- 11, 0, 11, 11, 11, 0, 11, 0,
- 0, 11, 0, 11, 0, 0, 11, 11,
- 11, 11, 0, 11, 11, 11, 0, 0,
- 11, 11, 11, 11, 0, 11, 11, 0,
- 0, 11, 11, 11, 11, 11, 0, 195,
- 196, 197, 198, 199, 200, 201, 202, 203,
- 204, 205, 201, 206, 207, 208, 209, 38,
- 0, 210, 211, 16, 212, 213, 214, 215,
- 216, 217, 218, 219, 220, 16, 14, 221,
- 222, 223, 224, 16, 225, 226, 227, 228,
- 229, 230, 231, 232, 233, 234, 235, 236,
- 237, 238, 239, 16, 144, 14, 240, 0,
- 11, 11, 11, 11, 11, 0, 0, 0,
- 11, 0, 11, 11, 0, 11, 0, 11,
- 11, 0, 0, 0, 11, 11, 11, 0,
- 0, 0, 11, 11, 11, 0, 0, 0,
- 0, 11, 0, 0, 11, 0, 0, 11,
- 11, 11, 0, 0, 11, 0, 11, 11,
- 11, 0, 11, 11, 11, 11, 11, 11,
- 0, 0, 0, 11, 11, 0, 11, 11,
- 0, 11, 11, 0, 11, 11, 0, 11,
- 11, 11, 11, 11, 11, 11, 0, 11,
- 0, 11, 0, 11, 11, 0, 11, 0,
- 11, 11, 0, 11, 0, 11, 0, 241,
- 212, 242, 243, 244, 245, 246, 247, 248,
- 249, 250, 98, 251, 16, 252, 253, 254,
- 16, 255, 129, 256, 257, 258, 259, 260,
- 261, 262, 263, 16, 0, 0, 0, 11,
- 11, 11, 0, 11, 11, 0, 11, 11,
- 0, 0, 0, 0, 0, 11, 11, 11,
- 11, 0, 11, 11, 11, 11, 11, 11,
- 0, 0, 0, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 0, 11, 11, 11,
- 11, 11, 11, 11, 11, 0, 11, 11,
- 0, 0, 0, 0, 11, 11, 11, 0,
- 0, 0, 11, 0, 0, 0, 11, 11,
- 0, 11, 11, 11, 0, 11, 0, 0,
- 0, 11, 11, 0, 11, 11, 11, 0,
- 11, 11, 11, 0, 0, 0, 0, 11,
- 16, 181, 264, 265, 14, 16, 14, 0,
- 0, 11, 0, 11, 16, 264, 14, 0,
- 16, 266, 14, 0, 0, 11, 16, 267,
- 268, 269, 172, 270, 271, 16, 272, 273,
- 274, 14, 0, 0, 11, 11, 11, 0,
- 11, 11, 0, 11, 11, 11, 11, 0,
- 0, 11, 0, 0, 11, 11, 0, 11,
- 0, 16, 14, 0, 275, 16, 276, 0,
- 14, 0, 11, 0, 11, 277, 16, 278,
- 279, 0, 11, 0, 0, 0, 11, 11,
- 11, 11, 0, 280, 281, 282, 16, 283,
- 284, 285, 286, 287, 288, 289, 290, 291,
- 292, 293, 294, 295, 296, 14, 0, 11,
- 11, 11, 0, 0, 0, 0, 11, 11,
- 0, 0, 11, 0, 0, 0, 0, 0,
- 0, 0, 11, 0, 11, 0, 0, 0,
- 0, 0, 0, 11, 11, 11, 11, 11,
- 0, 0, 11, 0, 0, 0, 11, 0,
- 0, 11, 0, 0, 11, 0, 0, 11,
- 0, 0, 0, 11, 11, 11, 0, 0,
- 0, 11, 11, 11, 11, 0, 297, 16,
- 298, 16, 299, 300, 301, 302, 14, 0,
- 11, 11, 11, 11, 11, 0, 0, 0,
- 11, 0, 0, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 0, 11, 11,
- 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 11, 11, 11,
- 11, 0, 11, 11, 11, 11, 11, 0,
- 303, 16, 14, 0, 11, 304, 16, 100,
- 14, 0, 11, 305, 0, 14, 0, 11,
- 16, 306, 14, 0, 0, 11, 307, 0,
- 16, 308, 14, 0, 0, 11, 11, 11,
- 11, 0, 11, 11, 11, 11, 0, 11,
- 11, 11, 11, 11, 0, 0, 11, 0,
- 11, 11, 11, 0, 11, 0, 11, 11,
- 11, 0, 0, 0, 0, 0, 0, 0,
- 11, 11, 11, 0, 11, 0, 0, 0,
- 11, 11, 11, 11, 0, 309, 310, 69,
- 311, 312, 313, 314, 315, 316, 317, 318,
- 319, 320, 321, 322, 323, 324, 325, 326,
- 327, 328, 329, 331, 332, 333, 334, 335,
- 336, 330, 0, 11, 11, 11, 11, 0,
- 11, 0, 11, 11, 0, 11, 11, 11,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 11, 11, 11, 11, 11, 0, 11,
- 11, 11, 11, 11, 11, 11, 0, 11,
- 11, 11, 0, 11, 11, 11, 11, 11,
- 11, 11, 0, 11, 11, 11, 0, 11,
- 11, 11, 11, 11, 11, 11, 0, 11,
- 11, 11, 0, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 0, 11, 0,
- 11, 11, 11, 11, 11, 0, 11, 11,
- 0, 11, 11, 11, 11, 11, 11, 11,
- 0, 11, 11, 11, 0, 11, 11, 11,
- 11, 0, 11, 11, 11, 11, 0, 11,
- 11, 11, 11, 0, 11, 0, 11, 11,
- 0, 11, 11, 11, 11, 11, 11, 11,
- 11, 11, 11, 11, 11, 11, 11, 0,
- 11, 11, 11, 0, 11, 0, 11, 11,
- 0, 11, 0, 337, 338, 339, 101, 102,
- 103, 104, 105, 340, 107, 108, 109, 110,
- 111, 112, 341, 342, 167, 343, 258, 117,
- 344, 119, 229, 269, 122, 345, 346, 347,
- 348, 349, 350, 351, 352, 353, 354, 131,
- 355, 16, 14, 15, 16, 134, 135, 136,
- 137, 14, 14, 0, 11, 11, 0, 11,
- 11, 11, 11, 11, 11, 0, 0, 0,
- 11, 0, 11, 11, 11, 11, 0, 11,
- 11, 11, 0, 11, 11, 0, 11, 11,
- 11, 0, 0, 11, 11, 11, 0, 0,
- 11, 11, 0, 11, 0, 11, 0, 11,
- 11, 11, 0, 0, 11, 11, 0, 11,
- 11, 0, 11, 11, 11, 0, 356, 140,
- 142, 143, 144, 145, 146, 14, 357, 148,
- 358, 150, 359, 0, 11, 11, 0, 0,
- 0, 0, 11, 0, 0, 11, 11, 11,
- 11, 11, 0, 360, 109, 361, 154, 155,
- 14, 156, 157, 16, 14, 0, 11, 11,
- 11, 11, 0, 0, 0, 11, 16, 159,
- 160, 16, 362, 363, 219, 308, 163, 164,
- 165, 364, 167, 365, 366, 367, 368, 369,
- 370, 371, 372, 373, 374, 175, 176, 15,
- 375, 16, 14, 0, 0, 0, 0, 11,
- 11, 11, 0, 0, 0, 0, 0, 11,
- 11, 0, 11, 11, 11, 0, 11, 11,
- 0, 0, 0, 11, 11, 0, 11, 11,
- 11, 11, 0, 11, 0, 11, 11, 11,
- 11, 11, 0, 0, 0, 0, 0, 11,
- 11, 11, 11, 11, 11, 0, 11, 0,
- 16, 180, 181, 376, 183, 57, 184, 185,
- 55, 186, 187, 377, 14, 190, 378, 192,
- 193, 194, 14, 0, 11, 11, 11, 11,
- 11, 11, 11, 0, 11, 11, 0, 11,
- 0, 379, 380, 197, 198, 199, 381, 201,
- 202, 382, 383, 384, 201, 206, 207, 208,
- 209, 38, 0, 210, 211, 16, 212, 213,
- 215, 385, 217, 386, 219, 220, 16, 14,
- 387, 222, 223, 224, 16, 225, 226, 227,
- 228, 229, 230, 231, 232, 388, 234, 235,
- 389, 237, 238, 239, 16, 144, 14, 240,
- 0, 0, 11, 0, 0, 11, 0, 11,
- 11, 11, 11, 11, 0, 11, 11, 0,
- 390, 391, 392, 393, 394, 395, 396, 397,
- 247, 398, 319, 399, 213, 400, 401, 402,
- 403, 404, 401, 405, 406, 407, 258, 408,
- 260, 409, 410, 271, 0, 11, 0, 11,
- 0, 11, 0, 11, 0, 11, 11, 0,
- 11, 0, 11, 11, 11, 0, 11, 11,
- 0, 0, 11, 11, 11, 0, 11, 0,
- 11, 0, 11, 11, 0, 11, 0, 11,
- 0, 11, 0, 11, 0, 11, 0, 0,
- 0, 11, 11, 11, 0, 11, 11, 0,
- 16, 267, 229, 411, 401, 412, 271, 16,
- 413, 414, 274, 14, 0, 11, 0, 11,
- 11, 11, 0, 0, 0, 11, 11, 0,
- 277, 16, 278, 415, 0, 11, 11, 0,
- 16, 283, 284, 285, 286, 287, 288, 289,
- 290, 291, 292, 416, 14, 0, 0, 0,
- 11, 16, 417, 16, 265, 300, 301, 302,
- 14, 0, 0, 11, 419, 419, 419, 419,
- 418, 419, 419, 419, 418, 419, 418, 419,
- 418, 419, 418, 418, 418, 418, 418, 419,
- 418, 418, 418, 418, 419, 419, 419, 419,
- 419, 418, 418, 419, 418, 418, 419, 418,
- 419, 418, 418, 419, 418, 418, 418, 419,
- 419, 419, 419, 419, 419, 418, 419, 419,
- 418, 419, 419, 418, 418, 418, 418, 418,
- 418, 419, 419, 418, 418, 419, 418, 419,
- 419, 419, 418, 421, 422, 423, 424, 425,
- 426, 427, 428, 429, 430, 431, 432, 433,
- 434, 435, 436, 437, 438, 439, 440, 441,
- 442, 443, 444, 445, 446, 447, 448, 449,
- 450, 451, 452, 418, 419, 418, 419, 418,
- 419, 419, 418, 419, 419, 418, 418, 418,
- 419, 418, 418, 418, 418, 418, 418, 418,
- 419, 418, 418, 418, 418, 418, 418, 418,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 418, 418, 418, 418, 418,
- 418, 418, 418, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 418, 418, 418, 418,
- 418, 418, 418, 418, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 418, 419, 419,
- 419, 419, 419, 419, 419, 419, 418, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 418, 419, 419, 419, 419, 419,
- 419, 418, 419, 419, 419, 419, 419, 419,
- 418, 418, 418, 418, 418, 418, 418, 418,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 418, 419, 419, 419, 419, 419, 419, 419,
- 419, 418, 419, 419, 419, 419, 419, 418,
- 418, 418, 418, 418, 418, 418, 418, 419,
- 419, 419, 419, 419, 419, 418, 419, 419,
- 419, 419, 419, 419, 419, 418, 419, 418,
- 419, 419, 418, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 418, 419, 419, 419, 419, 419, 418, 419,
- 419, 419, 419, 419, 419, 419, 418, 419,
- 419, 419, 418, 419, 419, 419, 418, 419,
- 418, 453, 454, 455, 456, 457, 458, 459,
- 460, 461, 462, 463, 464, 465, 466, 467,
- 468, 469, 470, 471, 472, 473, 474, 475,
- 476, 477, 478, 479, 480, 481, 482, 483,
- 484, 485, 486, 487, 488, 425, 489, 490,
- 491, 492, 493, 494, 425, 470, 425, 418,
- 419, 418, 419, 419, 418, 418, 419, 418,
- 418, 418, 418, 419, 418, 418, 418, 418,
- 418, 419, 418, 418, 418, 418, 418, 419,
- 419, 419, 419, 419, 418, 418, 418, 419,
- 418, 418, 418, 419, 419, 419, 418, 418,
- 418, 419, 419, 418, 418, 418, 419, 419,
- 419, 418, 418, 418, 419, 419, 419, 419,
- 418, 419, 419, 419, 419, 418, 418, 418,
- 418, 418, 419, 419, 419, 419, 418, 418,
- 419, 419, 419, 418, 418, 419, 419, 419,
- 419, 418, 419, 419, 418, 419, 419, 418,
- 418, 418, 419, 419, 419, 418, 418, 418,
- 418, 419, 419, 419, 419, 419, 418, 418,
- 418, 418, 419, 418, 419, 419, 418, 419,
- 419, 418, 419, 418, 419, 419, 419, 418,
- 419, 419, 418, 418, 418, 419, 418, 418,
- 418, 418, 418, 418, 418, 419, 419, 419,
- 419, 418, 419, 419, 419, 419, 419, 419,
- 419, 418, 495, 496, 497, 498, 499, 500,
- 501, 502, 503, 425, 504, 505, 506, 507,
- 508, 418, 419, 418, 418, 418, 418, 418,
- 419, 419, 418, 419, 419, 419, 418, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 418, 419, 419, 419, 418, 418, 419,
- 419, 419, 418, 418, 419, 418, 418, 419,
- 419, 419, 419, 419, 418, 418, 418, 418,
- 419, 419, 419, 419, 419, 419, 418, 419,
- 419, 419, 419, 419, 418, 509, 464, 510,
- 511, 512, 425, 513, 514, 470, 425, 418,
- 419, 419, 419, 419, 418, 418, 418, 419,
- 418, 418, 419, 419, 419, 418, 418, 418,
- 419, 419, 418, 475, 418, 470, 425, 425,
- 515, 418, 425, 418, 419, 470, 516, 517,
- 470, 518, 519, 470, 520, 521, 522, 523,
- 524, 525, 470, 526, 527, 528, 470, 529,
- 530, 531, 489, 532, 533, 534, 489, 535,
- 470, 425, 418, 418, 419, 419, 418, 418,
- 418, 419, 419, 419, 419, 418, 419, 419,
- 418, 418, 418, 418, 419, 419, 418, 418,
- 419, 419, 418, 418, 418, 418, 418, 418,
- 419, 419, 419, 418, 418, 418, 419, 418,
- 418, 418, 419, 419, 418, 419, 419, 419,
- 419, 418, 419, 419, 419, 419, 418, 419,
- 419, 419, 419, 419, 419, 418, 418, 418,
- 419, 419, 419, 419, 418, 536, 537, 418,
- 425, 418, 419, 418, 418, 419, 470, 538,
- 539, 540, 541, 520, 542, 543, 544, 545,
- 546, 547, 548, 549, 550, 551, 552, 553,
- 425, 418, 418, 419, 418, 419, 419, 419,
- 419, 419, 419, 419, 418, 419, 419, 419,
- 418, 419, 418, 418, 419, 418, 419, 418,
- 418, 419, 419, 419, 419, 418, 419, 419,
- 419, 418, 418, 419, 419, 419, 419, 418,
- 419, 419, 418, 418, 419, 419, 419, 419,
- 419, 418, 554, 555, 556, 557, 558, 559,
- 560, 561, 562, 563, 564, 560, 566, 567,
- 568, 569, 565, 418, 570, 571, 470, 572,
- 573, 574, 575, 576, 577, 578, 579, 580,
- 470, 425, 581, 582, 583, 584, 470, 585,
- 586, 587, 588, 589, 590, 591, 592, 593,
- 594, 595, 596, 597, 598, 599, 470, 501,
- 425, 600, 418, 419, 419, 419, 419, 419,
- 418, 418, 418, 419, 418, 419, 419, 418,
- 419, 418, 419, 419, 418, 418, 418, 419,
- 419, 419, 418, 418, 418, 419, 419, 419,
- 418, 418, 418, 418, 419, 418, 418, 419,
- 418, 418, 419, 419, 419, 418, 418, 419,
- 418, 419, 419, 419, 418, 419, 419, 419,
- 419, 419, 419, 418, 418, 418, 419, 419,
- 418, 419, 419, 418, 419, 419, 418, 419,
- 419, 418, 419, 419, 419, 419, 419, 419,
- 419, 418, 419, 418, 419, 418, 419, 419,
- 418, 419, 418, 419, 419, 418, 419, 418,
- 419, 418, 601, 572, 602, 603, 604, 605,
- 606, 607, 608, 609, 610, 453, 611, 470,
- 612, 613, 614, 470, 615, 485, 616, 617,
- 618, 619, 620, 621, 622, 623, 470, 418,
- 418, 418, 419, 419, 419, 418, 419, 419,
- 418, 419, 419, 418, 418, 418, 418, 418,
- 419, 419, 419, 419, 418, 419, 419, 419,
- 419, 419, 419, 418, 418, 418, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 418,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 418, 419, 419, 418, 418, 418, 418, 419,
- 419, 419, 418, 418, 418, 419, 418, 418,
- 418, 419, 419, 418, 419, 419, 419, 418,
- 419, 418, 418, 418, 419, 419, 418, 419,
- 419, 419, 418, 419, 419, 419, 418, 418,
- 418, 418, 419, 470, 539, 624, 625, 425,
- 470, 425, 418, 418, 419, 418, 419, 470,
- 624, 425, 418, 470, 626, 425, 418, 418,
- 419, 470, 627, 628, 629, 530, 630, 631,
- 470, 632, 633, 634, 425, 418, 418, 419,
- 419, 419, 418, 419, 419, 418, 419, 419,
- 419, 419, 418, 418, 419, 418, 418, 419,
- 419, 418, 419, 418, 470, 425, 418, 635,
- 470, 636, 418, 425, 418, 419, 418, 419,
- 637, 470, 638, 639, 418, 419, 418, 418,
- 418, 419, 419, 419, 419, 418, 640, 641,
- 642, 470, 643, 644, 645, 646, 647, 648,
- 649, 650, 651, 652, 653, 654, 655, 656,
- 425, 418, 419, 419, 419, 418, 418, 418,
- 418, 419, 419, 418, 418, 419, 418, 418,
- 418, 418, 418, 418, 418, 419, 418, 419,
- 418, 418, 418, 418, 418, 418, 419, 419,
- 419, 419, 419, 418, 418, 419, 418, 418,
- 418, 419, 418, 418, 419, 418, 418, 419,
- 418, 418, 419, 418, 418, 418, 419, 419,
- 419, 418, 418, 418, 419, 419, 419, 419,
- 418, 657, 470, 658, 470, 659, 660, 661,
- 662, 425, 418, 419, 419, 419, 419, 419,
- 418, 418, 418, 419, 418, 418, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 418, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 418, 419, 419, 419,
- 419, 419, 418, 663, 470, 425, 418, 419,
- 664, 470, 455, 425, 418, 419, 665, 418,
- 425, 418, 419, 470, 666, 425, 418, 418,
- 419, 667, 418, 470, 668, 425, 418, 418,
- 419, 670, 669, 419, 419, 419, 419, 670,
- 669, 419, 670, 669, 670, 670, 419, 670,
- 669, 419, 670, 419, 670, 669, 419, 670,
- 419, 670, 419, 669, 670, 670, 670, 670,
- 670, 670, 670, 670, 669, 419, 419, 670,
- 670, 419, 670, 419, 670, 669, 670, 670,
- 670, 670, 670, 419, 670, 419, 670, 419,
- 670, 669, 670, 670, 419, 670, 419, 670,
- 669, 670, 670, 670, 670, 670, 419, 670,
- 419, 670, 669, 419, 419, 670, 419, 670,
- 669, 670, 670, 670, 419, 670, 419, 670,
- 419, 670, 419, 670, 669, 670, 419, 670,
- 419, 670, 669, 419, 670, 670, 670, 670,
- 419, 670, 419, 670, 419, 670, 419, 670,
- 419, 670, 419, 670, 669, 419, 670, 669,
- 670, 670, 670, 419, 670, 419, 670, 669,
- 670, 419, 670, 419, 670, 669, 419, 670,
- 670, 670, 670, 419, 670, 419, 670, 669,
- 419, 670, 419, 670, 419, 670, 669, 670,
- 670, 419, 670, 419, 670, 669, 419, 670,
- 419, 670, 419, 670, 419, 669, 670, 670,
- 670, 419, 670, 419, 670, 669, 419, 670,
- 669, 670, 670, 419, 670, 669, 670, 670,
- 670, 419, 670, 670, 670, 670, 670, 670,
- 419, 419, 670, 419, 670, 419, 670, 419,
- 670, 669, 670, 419, 670, 419, 670, 669,
- 419, 670, 669, 670, 419, 670, 669, 670,
- 419, 670, 669, 419, 419, 670, 669, 419,
- 670, 419, 670, 419, 670, 419, 670, 419,
- 670, 419, 669, 670, 670, 419, 670, 670,
- 670, 670, 419, 419, 670, 670, 670, 670,
- 670, 419, 670, 670, 670, 670, 670, 669,
- 419, 670, 670, 419, 670, 419, 669, 670,
- 670, 419, 670, 669, 419, 419, 670, 419,
- 669, 670, 670, 669, 419, 670, 419, 669,
- 670, 669, 419, 670, 419, 670, 419, 669,
- 670, 670, 669, 419, 670, 419, 670, 419,
- 670, 669, 670, 419, 670, 419, 670, 669,
- 419, 670, 669, 419, 419, 670, 669, 670,
- 419, 669, 670, 669, 419, 670, 419, 670,
- 419, 669, 670, 669, 419, 419, 670, 669,
- 670, 419, 670, 419, 670, 669, 419, 670,
- 419, 669, 670, 669, 419, 419, 670, 419,
- 669, 670, 669, 419, 419, 670, 669, 670,
- 419, 670, 669, 670, 419, 670, 669, 670,
- 419, 670, 419, 670, 419, 669, 670, 669,
- 419, 419, 670, 669, 670, 419, 670, 419,
- 670, 669, 419, 670, 669, 670, 670, 419,
- 670, 419, 670, 669, 669, 419, 669, 419,
- 670, 670, 419, 670, 670, 670, 670, 670,
- 670, 670, 669, 419, 670, 670, 670, 419,
- 669, 670, 670, 670, 419, 670, 419, 670,
- 419, 670, 419, 670, 419, 670, 669, 419,
- 419, 670, 669, 670, 419, 670, 669, 419,
- 419, 670, 419, 419, 419, 670, 419, 670,
- 419, 670, 419, 670, 419, 669, 419, 670,
- 419, 670, 419, 669, 670, 669, 419, 670,
- 419, 669, 670, 419, 670, 670, 670, 669,
- 419, 670, 419, 419, 670, 419, 669, 670,
- 670, 669, 419, 670, 670, 670, 670, 419,
- 670, 419, 669, 670, 670, 670, 419, 670,
- 669, 670, 419, 670, 419, 670, 419, 670,
- 419, 670, 669, 670, 670, 419, 670, 669,
- 419, 670, 419, 670, 419, 669, 670, 670,
- 669, 419, 670, 419, 669, 670, 669, 419,
- 670, 669, 419, 670, 419, 670, 669, 670,
- 670, 670, 669, 419, 419, 419, 670, 669,
- 419, 670, 419, 669, 670, 669, 419, 670,
- 419, 670, 419, 669, 670, 670, 670, 669,
- 419, 670, 419, 669, 670, 670, 670, 670,
- 669, 419, 670, 419, 670, 669, 419, 419,
- 670, 419, 670, 669, 670, 419, 670, 419,
- 669, 670, 670, 669, 419, 670, 419, 670,
- 669, 419, 670, 670, 670, 419, 670, 419,
- 669, 419, 670, 669, 670, 419, 419, 670,
- 419, 670, 419, 669, 670, 670, 670, 670,
- 669, 419, 670, 419, 670, 419, 670, 419,
- 670, 419, 670, 669, 670, 670, 670, 419,
- 670, 419, 670, 419, 670, 419, 669, 670,
- 670, 419, 419, 670, 669, 670, 419, 670,
- 670, 669, 419, 670, 419, 670, 669, 419,
- 419, 670, 670, 670, 670, 419, 670, 419,
- 670, 419, 669, 670, 670, 419, 669, 670,
- 669, 419, 670, 419, 669, 670, 669, 419,
- 670, 419, 669, 670, 419, 670, 670, 669,
- 419, 670, 670, 419, 669, 670, 669, 419,
- 670, 419, 670, 669, 670, 419, 670, 419,
- 669, 670, 669, 419, 670, 419, 670, 419,
- 670, 419, 670, 419, 670, 669, 671, 669,
- 672, 673, 674, 675, 676, 677, 678, 679,
- 680, 681, 682, 674, 683, 684, 685, 686,
- 687, 674, 688, 689, 690, 691, 692, 693,
- 694, 695, 696, 697, 698, 699, 700, 701,
- 702, 674, 703, 671, 683, 671, 704, 671,
- 669, 670, 670, 670, 670, 419, 669, 670,
- 670, 669, 419, 670, 669, 419, 419, 670,
- 669, 419, 670, 419, 669, 670, 669, 419,
- 419, 670, 419, 669, 670, 670, 669, 419,
- 670, 670, 670, 669, 419, 670, 419, 670,
- 670, 669, 419, 419, 670, 419, 669, 670,
- 669, 419, 670, 669, 419, 419, 670, 419,
- 670, 669, 419, 670, 419, 419, 670, 419,
- 670, 419, 669, 670, 670, 669, 419, 670,
- 670, 419, 670, 669, 419, 670, 419, 670,
- 669, 419, 670, 419, 669, 419, 670, 670,
- 670, 419, 670, 669, 670, 419, 670, 669,
- 419, 670, 669, 670, 419, 670, 669, 419,
- 670, 669, 419, 670, 419, 670, 669, 419,
- 670, 669, 419, 670, 669, 705, 706, 707,
- 708, 709, 710, 711, 712, 713, 714, 715,
- 716, 676, 717, 718, 719, 720, 721, 718,
- 722, 723, 724, 725, 726, 727, 728, 729,
- 730, 671, 669, 670, 419, 670, 669, 670,
- 419, 670, 669, 670, 419, 670, 669, 670,
- 419, 670, 669, 419, 670, 419, 670, 669,
- 670, 419, 670, 669, 670, 419, 419, 419,
- 670, 669, 670, 419, 670, 669, 670, 670,
- 670, 670, 419, 670, 419, 669, 670, 669,
- 419, 419, 670, 419, 670, 669, 670, 419,
- 670, 669, 419, 670, 669, 670, 670, 419,
- 670, 669, 419, 670, 669, 670, 419, 670,
- 669, 419, 670, 669, 419, 670, 669, 419,
- 670, 669, 670, 669, 419, 419, 670, 669,
- 670, 419, 670, 669, 419, 670, 419, 669,
- 670, 669, 419, 674, 731, 671, 674, 732,
- 674, 733, 683, 671, 669, 670, 669, 419,
- 670, 669, 419, 674, 732, 683, 671, 669,
- 674, 734, 671, 683, 671, 669, 670, 669,
- 419, 674, 735, 692, 736, 718, 737, 730,
- 674, 738, 739, 740, 671, 683, 671, 669,
- 670, 669, 419, 670, 419, 670, 669, 419,
- 670, 419, 670, 419, 669, 670, 670, 669,
- 419, 670, 419, 670, 669, 419, 670, 669,
- 674, 683, 425, 669, 741, 674, 742, 683,
- 671, 669, 425, 670, 669, 419, 670, 669,
- 419, 743, 674, 744, 745, 671, 669, 419,
- 670, 669, 670, 670, 669, 419, 419, 670,
- 419, 670, 669, 674, 746, 747, 748, 749,
- 750, 751, 752, 753, 754, 755, 756, 671,
- 683, 671, 669, 670, 419, 670, 670, 670,
- 670, 670, 670, 670, 419, 670, 419, 670,
- 670, 670, 670, 670, 670, 669, 419, 670,
- 670, 419, 670, 419, 669, 670, 419, 670,
- 670, 670, 419, 670, 670, 419, 670, 670,
- 419, 670, 670, 419, 670, 670, 669, 419,
- 674, 757, 674, 733, 758, 759, 760, 671,
- 683, 671, 669, 670, 669, 419, 670, 670,
- 670, 419, 670, 670, 670, 419, 670, 419,
- 670, 669, 419, 419, 419, 419, 670, 670,
- 419, 419, 419, 419, 419, 670, 670, 670,
- 670, 670, 670, 670, 419, 670, 419, 670,
- 419, 669, 670, 670, 670, 419, 670, 419,
- 670, 669, 683, 425, 761, 674, 683, 425,
- 670, 669, 419, 762, 674, 763, 683, 425,
- 670, 669, 419, 670, 419, 764, 683, 671,
- 669, 425, 670, 669, 419, 674, 765, 671,
- 683, 671, 669, 670, 669, 419, 766, 766,
- 766, 768, 769, 770, 766, 767, 767, 771,
- 768, 771, 769, 771, 767, 772, 773, 772,
- 775, 774, 776, 774, 777, 774, 779, 778,
- 781, 782, 780, 781, 783, 780, 785, 784,
- 786, 784, 787, 784, 789, 788, 791, 792,
- 790, 791, 793, 790, 795, 795, 795, 795,
- 794, 795, 795, 795, 794, 795, 794, 795,
- 795, 794, 794, 794, 794, 794, 794, 795,
- 794, 794, 794, 794, 795, 795, 795, 795,
- 795, 794, 794, 795, 794, 794, 795, 794,
- 795, 794, 794, 795, 794, 794, 794, 795,
- 795, 795, 795, 795, 795, 794, 795, 795,
- 794, 795, 795, 794, 794, 794, 794, 794,
- 794, 795, 795, 794, 794, 795, 794, 795,
- 795, 795, 794, 797, 798, 799, 800, 801,
- 802, 803, 804, 805, 806, 807, 808, 809,
- 810, 811, 812, 813, 814, 815, 816, 817,
- 818, 819, 820, 821, 822, 823, 824, 825,
- 826, 827, 828, 794, 795, 794, 795, 794,
- 795, 795, 794, 795, 795, 794, 794, 794,
- 795, 794, 794, 794, 794, 794, 794, 794,
- 795, 794, 794, 794, 794, 794, 794, 794,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 794, 794, 794, 794, 794,
- 794, 794, 794, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 794, 794, 794, 794,
- 794, 794, 794, 794, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 794, 795, 795,
- 795, 795, 795, 795, 795, 795, 794, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 794, 795, 795, 795, 795, 795,
- 795, 794, 795, 795, 795, 795, 795, 795,
- 794, 794, 794, 794, 794, 794, 794, 794,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 794, 795, 795, 795, 795, 795, 795, 795,
- 795, 794, 795, 795, 795, 795, 795, 794,
- 794, 794, 794, 794, 794, 794, 794, 795,
- 795, 795, 795, 795, 795, 794, 795, 795,
- 795, 795, 795, 795, 795, 794, 795, 794,
- 795, 795, 794, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 794, 795, 795, 795, 795, 795, 794, 795,
- 795, 795, 795, 795, 795, 795, 794, 795,
- 795, 795, 794, 795, 795, 795, 794, 795,
- 794, 829, 830, 831, 832, 833, 834, 835,
- 836, 837, 838, 839, 840, 841, 842, 843,
- 844, 845, 846, 847, 848, 849, 850, 851,
- 852, 853, 854, 855, 856, 857, 858, 859,
- 860, 861, 862, 863, 864, 801, 865, 866,
- 867, 868, 869, 870, 801, 846, 801, 794,
- 795, 794, 795, 795, 794, 794, 795, 794,
- 794, 794, 794, 795, 794, 794, 794, 794,
- 794, 795, 794, 794, 794, 794, 794, 795,
- 795, 795, 795, 795, 794, 794, 794, 795,
- 794, 794, 794, 795, 795, 795, 794, 794,
- 794, 795, 795, 794, 794, 794, 795, 795,
- 795, 794, 794, 794, 795, 795, 795, 795,
- 794, 795, 795, 795, 795, 794, 794, 794,
- 794, 794, 795, 795, 795, 795, 794, 794,
- 795, 795, 795, 794, 794, 795, 795, 795,
- 795, 794, 795, 795, 794, 795, 795, 794,
- 794, 794, 795, 795, 795, 794, 794, 794,
- 794, 795, 795, 795, 795, 795, 794, 794,
- 794, 794, 795, 794, 795, 795, 794, 795,
- 795, 794, 795, 794, 795, 795, 795, 794,
- 795, 795, 794, 794, 794, 795, 794, 794,
- 794, 794, 794, 794, 794, 795, 795, 795,
- 795, 794, 795, 795, 795, 795, 795, 795,
- 795, 794, 871, 872, 873, 874, 875, 876,
- 877, 878, 879, 801, 880, 881, 882, 883,
- 884, 794, 795, 794, 794, 794, 794, 794,
- 795, 795, 794, 795, 795, 795, 794, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 794, 795, 795, 795, 794, 794, 795,
- 795, 795, 794, 794, 795, 794, 794, 795,
- 795, 795, 795, 795, 794, 794, 794, 794,
- 795, 795, 795, 795, 795, 795, 794, 795,
- 795, 795, 795, 795, 794, 885, 840, 886,
- 887, 888, 801, 889, 890, 846, 801, 794,
- 795, 795, 795, 795, 794, 794, 794, 795,
- 794, 794, 795, 795, 795, 794, 794, 794,
- 795, 795, 794, 851, 794, 846, 801, 801,
- 891, 794, 801, 794, 795, 846, 892, 893,
- 846, 894, 895, 846, 896, 897, 898, 899,
- 900, 901, 846, 902, 903, 904, 846, 905,
- 906, 907, 865, 908, 909, 910, 865, 911,
- 846, 801, 794, 794, 795, 795, 794, 794,
- 794, 795, 795, 795, 795, 794, 795, 795,
- 794, 794, 794, 794, 795, 795, 794, 794,
- 795, 795, 794, 794, 794, 794, 794, 794,
- 795, 795, 795, 794, 794, 794, 795, 794,
- 794, 794, 795, 795, 794, 795, 795, 795,
- 795, 794, 795, 795, 795, 795, 794, 795,
- 795, 795, 795, 795, 795, 794, 794, 794,
- 795, 795, 795, 795, 794, 912, 913, 794,
- 801, 794, 795, 794, 794, 795, 846, 914,
- 915, 916, 917, 896, 918, 919, 920, 921,
- 922, 923, 924, 925, 926, 927, 928, 929,
- 801, 794, 794, 795, 794, 795, 795, 795,
- 795, 795, 795, 795, 794, 795, 795, 795,
- 794, 795, 794, 794, 795, 794, 795, 794,
- 794, 795, 795, 795, 795, 794, 795, 795,
- 795, 794, 794, 795, 795, 795, 795, 794,
- 795, 795, 794, 794, 795, 795, 795, 795,
- 795, 794, 930, 931, 932, 933, 934, 935,
- 936, 937, 938, 939, 940, 936, 942, 943,
- 944, 945, 941, 794, 946, 947, 846, 948,
- 949, 950, 951, 952, 953, 954, 955, 956,
- 846, 801, 957, 958, 959, 960, 846, 961,
- 962, 963, 964, 965, 966, 967, 968, 969,
- 970, 971, 972, 973, 974, 975, 846, 877,
- 801, 976, 794, 795, 795, 795, 795, 795,
- 794, 794, 794, 795, 794, 795, 795, 794,
- 795, 794, 795, 795, 794, 794, 794, 795,
- 795, 795, 794, 794, 794, 795, 795, 795,
- 794, 794, 794, 794, 795, 794, 794, 795,
- 794, 794, 795, 795, 795, 794, 794, 795,
- 794, 795, 795, 795, 794, 795, 795, 795,
- 795, 795, 795, 794, 794, 794, 795, 795,
- 794, 795, 795, 794, 795, 795, 794, 795,
- 795, 794, 795, 795, 795, 795, 795, 795,
- 795, 794, 795, 794, 795, 794, 795, 795,
- 794, 795, 794, 795, 795, 794, 795, 794,
- 795, 794, 977, 948, 978, 979, 980, 981,
- 982, 983, 984, 985, 986, 829, 987, 846,
- 988, 989, 990, 846, 991, 861, 992, 993,
- 994, 995, 996, 997, 998, 999, 846, 794,
- 794, 794, 795, 795, 795, 794, 795, 795,
- 794, 795, 795, 794, 794, 794, 794, 794,
- 795, 795, 795, 795, 794, 795, 795, 795,
- 795, 795, 795, 794, 794, 794, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 794,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 794, 795, 795, 794, 794, 794, 794, 795,
- 795, 795, 794, 794, 794, 795, 794, 794,
- 794, 795, 795, 794, 795, 795, 795, 794,
- 795, 794, 794, 794, 795, 795, 794, 795,
- 795, 795, 794, 795, 795, 795, 794, 794,
- 794, 794, 795, 846, 915, 1000, 1001, 801,
- 846, 801, 794, 794, 795, 794, 795, 846,
- 1000, 801, 794, 846, 1002, 801, 794, 794,
- 795, 846, 1003, 1004, 1005, 906, 1006, 1007,
- 846, 1008, 1009, 1010, 801, 794, 794, 795,
- 795, 795, 794, 795, 795, 794, 795, 795,
- 795, 795, 794, 794, 795, 794, 794, 795,
- 795, 794, 795, 794, 846, 801, 794, 1011,
- 846, 1012, 794, 801, 794, 795, 794, 795,
- 1013, 846, 1014, 1015, 794, 795, 794, 794,
- 794, 795, 795, 795, 795, 794, 1016, 1017,
- 1018, 846, 1019, 1020, 1021, 1022, 1023, 1024,
- 1025, 1026, 1027, 1028, 1029, 1030, 1031, 1032,
- 801, 794, 795, 795, 795, 794, 794, 794,
- 794, 795, 795, 794, 794, 795, 794, 794,
- 794, 794, 794, 794, 794, 795, 794, 795,
- 794, 794, 794, 794, 794, 794, 795, 795,
- 795, 795, 795, 794, 794, 795, 794, 794,
- 794, 795, 794, 794, 795, 794, 794, 795,
- 794, 794, 795, 794, 794, 794, 795, 795,
- 795, 794, 794, 794, 795, 795, 795, 795,
- 794, 1033, 846, 1034, 846, 1035, 1036, 1037,
- 1038, 801, 794, 795, 795, 795, 795, 795,
- 794, 794, 794, 795, 794, 794, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 794, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 794, 795, 795, 795,
- 795, 795, 794, 1039, 846, 801, 794, 795,
- 1040, 846, 831, 801, 794, 795, 1041, 794,
- 801, 794, 795, 846, 1042, 801, 794, 794,
- 795, 1043, 794, 846, 1044, 801, 794, 794,
- 795, 1046, 1045, 795, 795, 795, 795, 1046,
- 1045, 795, 1046, 1045, 1046, 1046, 795, 1046,
- 1045, 795, 1046, 795, 1046, 1045, 795, 1046,
- 795, 1046, 795, 1045, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1045, 795, 795, 1046,
- 1046, 795, 1046, 795, 1046, 1045, 1046, 1046,
- 1046, 1046, 1046, 795, 1046, 795, 1046, 795,
- 1046, 1045, 1046, 1046, 795, 1046, 795, 1046,
- 1045, 1046, 1046, 1046, 1046, 1046, 795, 1046,
- 795, 1046, 1045, 795, 795, 1046, 795, 1046,
- 1045, 1046, 1046, 1046, 795, 1046, 795, 1046,
- 795, 1046, 795, 1046, 1045, 1046, 795, 1046,
- 795, 1046, 1045, 795, 1046, 1046, 1046, 1046,
- 795, 1046, 795, 1046, 795, 1046, 795, 1046,
- 795, 1046, 795, 1046, 1045, 795, 1046, 1045,
- 1046, 1046, 1046, 795, 1046, 795, 1046, 1045,
- 1046, 795, 1046, 795, 1046, 1045, 795, 1046,
- 1046, 1046, 1046, 795, 1046, 795, 1046, 1045,
- 795, 1046, 795, 1046, 795, 1046, 1045, 1046,
- 1046, 795, 1046, 795, 1046, 1045, 795, 1046,
- 795, 1046, 795, 1046, 795, 1045, 1046, 1046,
- 1046, 795, 1046, 795, 1046, 1045, 795, 1046,
- 1045, 1046, 1046, 795, 1046, 1045, 1046, 1046,
- 1046, 795, 1046, 1046, 1046, 1046, 1046, 1046,
- 795, 795, 1046, 795, 1046, 795, 1046, 795,
- 1046, 1045, 1046, 795, 1046, 795, 1046, 1045,
- 795, 1046, 1045, 1046, 795, 1046, 1045, 1046,
- 795, 1046, 1045, 795, 795, 1046, 1045, 795,
- 1046, 795, 1046, 795, 1046, 795, 1046, 795,
- 1046, 795, 1045, 1046, 1046, 795, 1046, 1046,
- 1046, 1046, 795, 795, 1046, 1046, 1046, 1046,
- 1046, 795, 1046, 1046, 1046, 1046, 1046, 1045,
- 795, 1046, 1046, 795, 1046, 795, 1045, 1046,
- 1046, 795, 1046, 1045, 795, 795, 1046, 795,
- 1045, 1046, 1046, 1045, 795, 1046, 795, 1045,
- 1046, 1045, 795, 1046, 795, 1046, 795, 1045,
- 1046, 1046, 1045, 795, 1046, 795, 1046, 795,
- 1046, 1045, 1046, 795, 1046, 795, 1046, 1045,
- 795, 1046, 1045, 795, 795, 1046, 1045, 1046,
- 795, 1045, 1046, 1045, 795, 1046, 795, 1046,
- 795, 1045, 1046, 1045, 795, 795, 1046, 1045,
- 1046, 795, 1046, 795, 1046, 1045, 795, 1046,
- 795, 1045, 1046, 1045, 795, 795, 1046, 795,
- 1045, 1046, 1045, 795, 795, 1046, 1045, 1046,
- 795, 1046, 1045, 1046, 795, 1046, 1045, 1046,
- 795, 1046, 795, 1046, 795, 1045, 1046, 1045,
- 795, 795, 1046, 1045, 1046, 795, 1046, 795,
- 1046, 1045, 795, 1046, 1045, 1046, 1046, 795,
- 1046, 795, 1046, 1045, 1045, 795, 1045, 795,
- 1046, 1046, 795, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1045, 795, 1046, 1046, 1046, 795,
- 1045, 1046, 1046, 1046, 795, 1046, 795, 1046,
- 795, 1046, 795, 1046, 795, 1046, 1045, 795,
- 795, 1046, 1045, 1046, 795, 1046, 1045, 795,
- 795, 1046, 795, 795, 795, 1046, 795, 1046,
- 795, 1046, 795, 1046, 795, 1045, 795, 1046,
- 795, 1046, 795, 1045, 1046, 1045, 795, 1046,
- 795, 1045, 1046, 795, 1046, 1046, 1046, 1045,
- 795, 1046, 795, 795, 1046, 795, 1045, 1046,
- 1046, 1045, 795, 1046, 1046, 1046, 1046, 795,
- 1046, 795, 1045, 1046, 1046, 1046, 795, 1046,
- 1045, 1046, 795, 1046, 795, 1046, 795, 1046,
- 795, 1046, 1045, 1046, 1046, 795, 1046, 1045,
- 795, 1046, 795, 1046, 795, 1045, 1046, 1046,
- 1045, 795, 1046, 795, 1045, 1046, 1045, 795,
- 1046, 1045, 795, 1046, 795, 1046, 1045, 1046,
- 1046, 1046, 1045, 795, 795, 795, 1046, 1045,
- 795, 1046, 795, 1045, 1046, 1045, 795, 1046,
- 795, 1046, 795, 1045, 1046, 1046, 1046, 1045,
- 795, 1046, 795, 1045, 1046, 1046, 1046, 1046,
- 1045, 795, 1046, 795, 1046, 1045, 795, 795,
- 1046, 795, 1046, 1045, 1046, 795, 1046, 795,
- 1045, 1046, 1046, 1045, 795, 1046, 795, 1046,
- 1045, 795, 1046, 1046, 1046, 795, 1046, 795,
- 1045, 795, 1046, 1045, 1046, 795, 795, 1046,
- 795, 1046, 795, 1045, 1046, 1046, 1046, 1046,
- 1045, 795, 1046, 795, 1046, 795, 1046, 795,
- 1046, 795, 1046, 1045, 1046, 1046, 1046, 795,
- 1046, 795, 1046, 795, 1046, 795, 1045, 1046,
- 1046, 795, 795, 1046, 1045, 1046, 795, 1046,
- 1046, 1045, 795, 1046, 795, 1046, 1045, 795,
- 795, 1046, 1046, 1046, 1046, 795, 1046, 795,
- 1046, 795, 1045, 1046, 1046, 795, 1045, 1046,
- 1045, 795, 1046, 795, 1045, 1046, 1045, 795,
- 1046, 795, 1045, 1046, 795, 1046, 1046, 1045,
- 795, 1046, 1046, 795, 1045, 1046, 1045, 795,
- 1046, 795, 1046, 1045, 1046, 795, 1046, 795,
- 1045, 1046, 1045, 795, 1046, 795, 1046, 795,
- 1046, 795, 1046, 795, 1046, 1045, 1047, 1045,
- 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
- 1056, 1057, 1058, 1050, 1059, 1060, 1061, 1062,
- 1063, 1050, 1064, 1065, 1066, 1067, 1068, 1069,
- 1070, 1071, 1072, 1073, 1074, 1075, 1076, 1077,
- 1078, 1050, 1079, 1047, 1059, 1047, 1080, 1047,
- 1045, 1046, 1046, 1046, 1046, 795, 1045, 1046,
- 1046, 1045, 795, 1046, 1045, 795, 795, 1046,
- 1045, 795, 1046, 795, 1045, 1046, 1045, 795,
- 795, 1046, 795, 1045, 1046, 1046, 1045, 795,
- 1046, 1046, 1046, 1045, 795, 1046, 795, 1046,
- 1046, 1045, 795, 795, 1046, 795, 1045, 1046,
- 1045, 795, 1046, 1045, 795, 795, 1046, 795,
- 1046, 1045, 795, 1046, 795, 795, 1046, 795,
- 1046, 795, 1045, 1046, 1046, 1045, 795, 1046,
- 1046, 795, 1046, 1045, 795, 1046, 795, 1046,
- 1045, 795, 1046, 795, 1045, 795, 1046, 1046,
- 1046, 795, 1046, 1045, 1046, 795, 1046, 1045,
- 795, 1046, 1045, 1046, 795, 1046, 1045, 795,
- 1046, 1045, 795, 1046, 795, 1046, 1045, 795,
- 1046, 1045, 795, 1046, 1045, 1081, 1082, 1083,
- 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091,
- 1092, 1052, 1093, 1094, 1095, 1096, 1097, 1094,
- 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105,
- 1106, 1047, 1045, 1046, 795, 1046, 1045, 1046,
- 795, 1046, 1045, 1046, 795, 1046, 1045, 1046,
- 795, 1046, 1045, 795, 1046, 795, 1046, 1045,
- 1046, 795, 1046, 1045, 1046, 795, 795, 795,
- 1046, 1045, 1046, 795, 1046, 1045, 1046, 1046,
- 1046, 1046, 795, 1046, 795, 1045, 1046, 1045,
- 795, 795, 1046, 795, 1046, 1045, 1046, 795,
- 1046, 1045, 795, 1046, 1045, 1046, 1046, 795,
- 1046, 1045, 795, 1046, 1045, 1046, 795, 1046,
- 1045, 795, 1046, 1045, 795, 1046, 1045, 795,
- 1046, 1045, 1046, 1045, 795, 795, 1046, 1045,
- 1046, 795, 1046, 1045, 795, 1046, 795, 1045,
- 1046, 1045, 795, 1050, 1107, 1047, 1050, 1108,
- 1050, 1109, 1059, 1047, 1045, 1046, 1045, 795,
- 1046, 1045, 795, 1050, 1108, 1059, 1047, 1045,
- 1050, 1110, 1047, 1059, 1047, 1045, 1046, 1045,
- 795, 1050, 1111, 1068, 1112, 1094, 1113, 1106,
- 1050, 1114, 1115, 1116, 1047, 1059, 1047, 1045,
- 1046, 1045, 795, 1046, 795, 1046, 1045, 795,
- 1046, 795, 1046, 795, 1045, 1046, 1046, 1045,
- 795, 1046, 795, 1046, 1045, 795, 1046, 1045,
- 1050, 1059, 801, 1045, 1117, 1050, 1118, 1059,
- 1047, 1045, 801, 1046, 1045, 795, 1046, 1045,
- 795, 1119, 1050, 1120, 1121, 1047, 1045, 795,
- 1046, 1045, 1046, 1046, 1045, 795, 795, 1046,
- 795, 1046, 1045, 1050, 1122, 1123, 1124, 1125,
- 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1047,
- 1059, 1047, 1045, 1046, 795, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 795, 1046, 795, 1046,
- 1046, 1046, 1046, 1046, 1046, 1045, 795, 1046,
- 1046, 795, 1046, 795, 1045, 1046, 795, 1046,
- 1046, 1046, 795, 1046, 1046, 795, 1046, 1046,
- 795, 1046, 1046, 795, 1046, 1046, 1045, 795,
- 1050, 1133, 1050, 1109, 1134, 1135, 1136, 1047,
- 1059, 1047, 1045, 1046, 1045, 795, 1046, 1046,
- 1046, 795, 1046, 1046, 1046, 795, 1046, 795,
- 1046, 1045, 795, 795, 795, 795, 1046, 1046,
- 795, 795, 795, 795, 795, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 795, 1046, 795, 1046,
- 795, 1045, 1046, 1046, 1046, 795, 1046, 795,
- 1046, 1045, 1059, 801, 1137, 1050, 1059, 801,
- 1046, 1045, 795, 1138, 1050, 1139, 1059, 801,
- 1046, 1045, 795, 1046, 795, 1140, 1059, 1047,
- 1045, 801, 1046, 1045, 795, 1050, 1141, 1047,
- 1059, 1047, 1045, 1046, 1045, 795, 1142, 1143,
- 1144, 1142, 1145, 1146, 1147, 1149, 1150, 1151,
- 1152, 1153, 1154, 1155, 670, 670, 419, 1156,
- 1157, 1158, 1159, 670, 1162, 1163, 1165, 1166,
- 1167, 1161, 1168, 1169, 1170, 1171, 1172, 1173,
- 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181,
- 1182, 1183, 1184, 1185, 1186, 1187, 1189, 1190,
- 1191, 1192, 1193, 1194, 670, 1148, 7, 1148,
- 419, 1148, 419, 1161, 1164, 1188, 1195, 1160,
- 1142, 1142, 1196, 1143, 1197, 1199, 1198, 4,
- 1147, 1201, 1198, 1202, 1198, 2, 1147, 1198,
- 6, 8, 8, 7, 1203, 1204, 1198, 1205,
- 1206, 1198, 1207, 1208, 1198, 1209, 1198, 419,
- 419, 1211, 1212, 489, 470, 1213, 470, 1214,
- 1215, 1216, 1217, 1218, 1219, 1220, 1221, 1222,
- 1223, 1224, 544, 1225, 520, 1226, 1227, 1228,
- 1229, 1230, 1231, 1232, 1233, 1234, 1235, 1236,
- 1237, 419, 419, 419, 425, 565, 1210, 1238,
- 1198, 1239, 1198, 670, 1240, 419, 419, 419,
- 670, 1240, 670, 670, 419, 1240, 419, 1240,
- 419, 1240, 419, 670, 670, 670, 670, 670,
- 1240, 419, 670, 670, 670, 419, 670, 419,
- 1240, 419, 670, 670, 670, 670, 419, 1240,
- 670, 419, 670, 419, 670, 419, 670, 670,
- 419, 670, 1240, 419, 670, 419, 670, 419,
- 670, 1240, 670, 419, 1240, 670, 419, 670,
- 419, 1240, 670, 670, 670, 670, 670, 1240,
- 419, 419, 670, 419, 670, 1240, 670, 419,
- 1240, 670, 670, 1240, 419, 419, 670, 419,
- 670, 419, 670, 1240, 1241, 1242, 1243, 1244,
- 1245, 1246, 1247, 1248, 1249, 1250, 1251, 715,
- 1252, 1253, 1254, 1255, 1256, 1257, 1258, 1259,
- 1260, 1261, 1262, 1263, 1262, 1264, 1265, 1266,
- 1267, 1268, 671, 1240, 1269, 1270, 1271, 1272,
- 1273, 1274, 1275, 1276, 1277, 1278, 1279, 1280,
- 1281, 1282, 1283, 1284, 1285, 1286, 1287, 725,
- 1288, 1289, 1290, 692, 1291, 1292, 1293, 1294,
- 1295, 1296, 671, 1297, 1298, 1299, 1300, 1301,
- 1302, 1303, 1304, 674, 1305, 671, 674, 1306,
- 1307, 1308, 1309, 683, 1240, 1310, 1311, 1312,
- 1313, 703, 1314, 1315, 683, 1316, 1317, 1318,
- 1319, 1320, 671, 1240, 1321, 1280, 1322, 1323,
- 1324, 683, 1325, 1326, 674, 671, 683, 425,
- 1240, 1290, 671, 674, 683, 425, 683, 425,
- 1327, 683, 1240, 425, 674, 1328, 1329, 674,
- 1330, 1331, 681, 1332, 1333, 1334, 1335, 1336,
- 1286, 1337, 1338, 1339, 1340, 1341, 1342, 1343,
- 1344, 1345, 1346, 1347, 1348, 1305, 1349, 674,
- 683, 425, 1240, 1350, 1351, 683, 671, 1240,
- 425, 671, 1240, 674, 1352, 731, 1353, 1354,
- 1355, 1356, 1357, 1358, 1359, 1360, 671, 1361,
- 1362, 1363, 1364, 1365, 1366, 671, 683, 1240,
- 1368, 1369, 1370, 1371, 1372, 1373, 1374, 1375,
- 1376, 1377, 1378, 1374, 1380, 1381, 1382, 1383,
- 1367, 1379, 1367, 1240, 1367, 1240, 1384, 1384,
- 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392,
- 1389, 767, 1393, 1393, 1393, 1394, 1393, 1393,
- 768, 769, 770, 1393, 767, 1384, 1384, 1395,
- 1398, 1399, 1397, 1400, 1401, 1400, 1402, 1393,
- 1404, 1403, 1398, 1405, 1397, 1407, 1406, 1396,
- 1396, 1396, 768, 769, 770, 1396, 767, 767,
- 1408, 773, 1408, 1409, 1408, 775, 1410, 1411,
- 1412, 1413, 1414, 1415, 1416, 1413, 776, 775,
- 1410, 1417, 1417, 777, 779, 1418, 1417, 776,
- 1420, 1421, 1419, 1420, 1421, 1422, 1419, 775,
- 1410, 1423, 1417, 775, 1410, 1417, 1425, 1424,
- 1427, 1426, 776, 1428, 777, 1428, 779, 1428,
- 785, 1429, 1430, 1431, 1432, 1433, 1434, 1435,
- 1432, 786, 785, 1429, 1436, 1436, 787, 789,
- 1437, 1436, 786, 1439, 1440, 1438, 1439, 1440,
- 1441, 1438, 785, 1429, 1442, 1436, 785, 1429,
- 1436, 1444, 1443, 1446, 1445, 786, 1447, 787,
- 1447, 789, 1447, 795, 1450, 1451, 1453, 1454,
- 1455, 1449, 1456, 1457, 1458, 1459, 1460, 1461,
- 1462, 1463, 1464, 1465, 1466, 1467, 1468, 1469,
- 1470, 1471, 1472, 1473, 1474, 1475, 1477, 1478,
- 1479, 1480, 1481, 1482, 795, 795, 1448, 1449,
- 1452, 1476, 1483, 1448, 1046, 795, 795, 1485,
- 1486, 865, 846, 1487, 846, 1488, 1489, 1490,
- 1491, 1492, 1493, 1494, 1495, 1496, 1497, 1498,
- 920, 1499, 896, 1500, 1501, 1502, 1503, 1504,
- 1505, 1506, 1507, 1508, 1509, 1510, 1511, 795,
- 795, 795, 801, 941, 1484, 1046, 1512, 795,
- 795, 795, 1046, 1512, 1046, 1046, 795, 1512,
- 795, 1512, 795, 1512, 795, 1046, 1046, 1046,
- 1046, 1046, 1512, 795, 1046, 1046, 1046, 795,
- 1046, 795, 1512, 795, 1046, 1046, 1046, 1046,
- 795, 1512, 1046, 795, 1046, 795, 1046, 795,
- 1046, 1046, 795, 1046, 1512, 795, 1046, 795,
- 1046, 795, 1046, 1512, 1046, 795, 1512, 1046,
- 795, 1046, 795, 1512, 1046, 1046, 1046, 1046,
- 1046, 1512, 795, 795, 1046, 795, 1046, 1512,
- 1046, 795, 1512, 1046, 1046, 1512, 795, 795,
- 1046, 795, 1046, 795, 1046, 1512, 1513, 1514,
- 1515, 1516, 1517, 1518, 1519, 1520, 1521, 1522,
- 1523, 1091, 1524, 1525, 1526, 1527, 1528, 1529,
- 1530, 1531, 1532, 1533, 1534, 1535, 1534, 1536,
- 1537, 1538, 1539, 1540, 1047, 1512, 1541, 1542,
- 1543, 1544, 1545, 1546, 1547, 1548, 1549, 1550,
- 1551, 1552, 1553, 1554, 1555, 1556, 1557, 1558,
- 1559, 1101, 1560, 1561, 1562, 1068, 1563, 1564,
- 1565, 1566, 1567, 1568, 1047, 1569, 1570, 1571,
- 1572, 1573, 1574, 1575, 1576, 1050, 1577, 1047,
- 1050, 1578, 1579, 1580, 1581, 1059, 1512, 1582,
- 1583, 1584, 1585, 1079, 1586, 1587, 1059, 1588,
- 1589, 1590, 1591, 1592, 1047, 1512, 1593, 1552,
- 1594, 1595, 1596, 1059, 1597, 1598, 1050, 1047,
- 1059, 801, 1512, 1562, 1047, 1050, 1059, 801,
- 1059, 801, 1599, 1059, 1512, 801, 1050, 1600,
- 1601, 1050, 1602, 1603, 1057, 1604, 1605, 1606,
- 1607, 1608, 1558, 1609, 1610, 1611, 1612, 1613,
- 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1577,
- 1621, 1050, 1059, 801, 1512, 1622, 1623, 1059,
- 1047, 1512, 801, 1047, 1512, 1050, 1624, 1107,
- 1625, 1626, 1627, 1628, 1629, 1630, 1631, 1632,
- 1047, 1633, 1634, 1635, 1636, 1637, 1638, 1047,
- 1059, 1512, 1640, 1641, 1642, 1643, 1644, 1645,
- 1646, 1647, 1648, 1649, 1650, 1646, 1652, 1653,
- 1654, 1655, 1639, 1651, 1639, 1512, 1639, 1512,
-}
-
-var _hcltok_trans_targs []int16 = []int16{
- 1459, 1459, 2, 3, 1459, 1459, 4, 1467,
- 5, 6, 8, 9, 286, 12, 13, 14,
- 15, 16, 287, 288, 19, 289, 21, 22,
- 290, 291, 292, 293, 294, 295, 296, 297,
- 298, 299, 328, 348, 353, 127, 128, 129,
- 356, 151, 371, 375, 1459, 10, 11, 17,
- 18, 20, 23, 24, 25, 26, 27, 28,
- 29, 30, 31, 32, 64, 105, 120, 131,
- 154, 170, 283, 33, 34, 35, 36, 37,
- 38, 39, 40, 41, 42, 43, 44, 45,
- 46, 47, 48, 49, 50, 51, 52, 53,
- 54, 55, 56, 57, 58, 59, 60, 61,
- 62, 63, 65, 66, 67, 68, 69, 70,
- 71, 72, 73, 74, 75, 76, 77, 78,
- 79, 80, 81, 82, 83, 84, 85, 86,
- 87, 88, 89, 90, 91, 92, 93, 94,
- 95, 96, 97, 98, 99, 100, 101, 102,
- 103, 104, 106, 107, 108, 109, 110, 111,
- 112, 113, 114, 115, 116, 117, 118, 119,
- 121, 122, 123, 124, 125, 126, 130, 132,
- 133, 134, 135, 136, 137, 138, 139, 140,
- 141, 142, 143, 144, 145, 146, 147, 148,
- 149, 150, 152, 153, 155, 156, 157, 158,
- 159, 160, 161, 162, 163, 164, 165, 166,
- 167, 168, 169, 171, 203, 227, 230, 231,
- 233, 242, 243, 246, 250, 268, 275, 277,
- 279, 281, 172, 173, 174, 175, 176, 177,
- 178, 179, 180, 181, 182, 183, 184, 185,
- 186, 187, 188, 189, 190, 191, 192, 193,
- 194, 195, 196, 197, 198, 199, 200, 201,
- 202, 204, 205, 206, 207, 208, 209, 210,
- 211, 212, 213, 214, 215, 216, 217, 218,
- 219, 220, 221, 222, 223, 224, 225, 226,
- 228, 229, 232, 234, 235, 236, 237, 238,
- 239, 240, 241, 244, 245, 247, 248, 249,
- 251, 252, 253, 254, 255, 256, 257, 258,
- 259, 260, 261, 262, 263, 264, 265, 266,
- 267, 269, 270, 271, 272, 273, 274, 276,
- 278, 280, 282, 284, 285, 300, 301, 302,
- 303, 304, 305, 306, 307, 308, 309, 310,
- 311, 312, 313, 314, 315, 316, 317, 318,
- 319, 320, 321, 322, 323, 324, 325, 326,
- 327, 329, 330, 331, 332, 333, 334, 335,
- 336, 337, 338, 339, 340, 341, 342, 343,
- 344, 345, 346, 347, 349, 350, 351, 352,
- 354, 355, 357, 358, 359, 360, 361, 362,
- 363, 364, 365, 366, 367, 368, 369, 370,
- 372, 373, 374, 376, 382, 404, 409, 411,
- 413, 377, 378, 379, 380, 381, 383, 384,
- 385, 386, 387, 388, 389, 390, 391, 392,
- 393, 394, 395, 396, 397, 398, 399, 400,
- 401, 402, 403, 405, 406, 407, 408, 410,
- 412, 414, 1459, 1472, 1459, 437, 438, 439,
- 440, 417, 441, 442, 443, 444, 445, 446,
- 447, 448, 449, 450, 451, 452, 453, 454,
- 455, 456, 457, 458, 459, 460, 461, 462,
- 463, 464, 465, 466, 467, 469, 470, 471,
- 472, 473, 474, 475, 476, 477, 478, 479,
- 480, 481, 482, 483, 484, 485, 419, 486,
- 487, 488, 489, 490, 491, 492, 493, 494,
- 495, 496, 497, 498, 499, 500, 501, 502,
- 503, 418, 504, 505, 506, 507, 508, 510,
- 511, 512, 513, 514, 515, 516, 517, 518,
- 519, 520, 521, 522, 523, 525, 526, 527,
- 528, 529, 530, 534, 536, 537, 538, 539,
- 434, 540, 541, 542, 543, 544, 545, 546,
- 547, 548, 549, 550, 551, 552, 553, 554,
- 556, 557, 559, 560, 561, 562, 563, 564,
- 432, 565, 566, 567, 568, 569, 570, 571,
- 572, 573, 575, 607, 631, 634, 635, 637,
- 646, 647, 650, 654, 672, 532, 679, 681,
- 683, 685, 576, 577, 578, 579, 580, 581,
- 582, 583, 584, 585, 586, 587, 588, 589,
- 590, 591, 592, 593, 594, 595, 596, 597,
- 598, 599, 600, 601, 602, 603, 604, 605,
- 606, 608, 609, 610, 611, 612, 613, 614,
- 615, 616, 617, 618, 619, 620, 621, 622,
- 623, 624, 625, 626, 627, 628, 629, 630,
- 632, 633, 636, 638, 639, 640, 641, 642,
- 643, 644, 645, 648, 649, 651, 652, 653,
- 655, 656, 657, 658, 659, 660, 661, 662,
- 663, 664, 665, 666, 667, 668, 669, 670,
- 671, 673, 674, 675, 676, 677, 678, 680,
- 682, 684, 686, 688, 689, 1459, 1459, 690,
- 827, 828, 759, 829, 830, 831, 832, 833,
- 834, 788, 835, 724, 836, 837, 838, 839,
- 840, 841, 842, 843, 744, 844, 845, 846,
- 847, 848, 849, 850, 851, 852, 853, 769,
- 854, 856, 857, 858, 859, 860, 861, 862,
- 863, 864, 865, 702, 866, 867, 868, 869,
- 870, 871, 872, 873, 874, 740, 875, 876,
- 877, 878, 879, 810, 881, 882, 885, 887,
- 888, 889, 890, 891, 892, 895, 896, 898,
- 899, 900, 902, 903, 904, 905, 906, 907,
- 908, 909, 910, 911, 912, 914, 915, 916,
- 917, 920, 922, 923, 925, 927, 1510, 1511,
- 929, 930, 931, 1510, 1510, 932, 1524, 1524,
- 1525, 935, 1524, 936, 1526, 1527, 1530, 1531,
- 1535, 1535, 1536, 941, 1535, 942, 1537, 1538,
- 1541, 1542, 1546, 1547, 1546, 968, 969, 970,
- 971, 948, 972, 973, 974, 975, 976, 977,
- 978, 979, 980, 981, 982, 983, 984, 985,
- 986, 987, 988, 989, 990, 991, 992, 993,
- 994, 995, 996, 997, 998, 1000, 1001, 1002,
- 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010,
- 1011, 1012, 1013, 1014, 1015, 1016, 950, 1017,
- 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025,
- 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033,
- 1034, 949, 1035, 1036, 1037, 1038, 1039, 1041,
- 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049,
- 1050, 1051, 1052, 1053, 1054, 1056, 1057, 1058,
- 1059, 1060, 1061, 1065, 1067, 1068, 1069, 1070,
- 965, 1071, 1072, 1073, 1074, 1075, 1076, 1077,
- 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085,
- 1087, 1088, 1090, 1091, 1092, 1093, 1094, 1095,
- 963, 1096, 1097, 1098, 1099, 1100, 1101, 1102,
- 1103, 1104, 1106, 1138, 1162, 1165, 1166, 1168,
- 1177, 1178, 1181, 1185, 1203, 1063, 1210, 1212,
- 1214, 1216, 1107, 1108, 1109, 1110, 1111, 1112,
- 1113, 1114, 1115, 1116, 1117, 1118, 1119, 1120,
- 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128,
- 1129, 1130, 1131, 1132, 1133, 1134, 1135, 1136,
- 1137, 1139, 1140, 1141, 1142, 1143, 1144, 1145,
- 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153,
- 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161,
- 1163, 1164, 1167, 1169, 1170, 1171, 1172, 1173,
- 1174, 1175, 1176, 1179, 1180, 1182, 1183, 1184,
- 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193,
- 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201,
- 1202, 1204, 1205, 1206, 1207, 1208, 1209, 1211,
- 1213, 1215, 1217, 1219, 1220, 1546, 1546, 1221,
- 1358, 1359, 1290, 1360, 1361, 1362, 1363, 1364,
- 1365, 1319, 1366, 1255, 1367, 1368, 1369, 1370,
- 1371, 1372, 1373, 1374, 1275, 1375, 1376, 1377,
- 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1300,
- 1385, 1387, 1388, 1389, 1390, 1391, 1392, 1393,
- 1394, 1395, 1396, 1233, 1397, 1398, 1399, 1400,
- 1401, 1402, 1403, 1404, 1405, 1271, 1406, 1407,
- 1408, 1409, 1410, 1341, 1412, 1413, 1416, 1418,
- 1419, 1420, 1421, 1422, 1423, 1426, 1427, 1429,
- 1430, 1431, 1433, 1434, 1435, 1436, 1437, 1438,
- 1439, 1440, 1441, 1442, 1443, 1445, 1446, 1447,
- 1448, 1451, 1453, 1454, 1456, 1458, 1460, 1459,
- 1461, 1462, 1459, 1463, 1459, 1464, 1465, 1466,
- 1468, 1469, 1470, 1471, 1459, 1473, 1459, 1474,
- 1459, 1475, 1476, 1477, 1478, 1479, 1480, 1481,
- 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489,
- 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497,
- 1498, 1499, 1500, 1501, 1502, 1503, 1504, 1505,
- 1506, 1507, 1508, 1509, 1459, 1459, 1459, 1459,
- 1459, 1459, 1, 1459, 1459, 7, 1459, 1459,
- 1459, 1459, 1459, 415, 416, 420, 421, 422,
- 423, 424, 425, 426, 427, 428, 429, 430,
- 431, 433, 435, 436, 468, 509, 524, 531,
- 533, 535, 555, 558, 574, 687, 1459, 1459,
- 1459, 691, 692, 693, 694, 695, 696, 697,
- 698, 699, 700, 701, 703, 704, 705, 706,
- 707, 708, 709, 710, 711, 712, 713, 714,
- 715, 716, 717, 718, 719, 720, 721, 722,
- 723, 725, 726, 727, 728, 729, 730, 731,
- 732, 733, 734, 735, 736, 737, 738, 739,
- 741, 742, 743, 745, 746, 747, 748, 749,
- 750, 751, 752, 753, 754, 755, 756, 757,
- 758, 760, 761, 762, 763, 764, 765, 766,
- 767, 768, 770, 771, 772, 773, 774, 775,
- 776, 777, 778, 779, 780, 781, 782, 783,
- 784, 785, 786, 787, 789, 790, 791, 792,
- 793, 794, 795, 796, 797, 798, 799, 800,
- 801, 802, 803, 804, 805, 806, 807, 808,
- 809, 811, 812, 813, 814, 815, 816, 817,
- 818, 819, 820, 821, 822, 823, 824, 825,
- 826, 855, 880, 883, 884, 886, 893, 894,
- 897, 901, 913, 918, 919, 921, 924, 926,
- 1512, 1510, 1513, 1518, 1520, 1510, 1521, 1522,
- 1523, 1510, 928, 1510, 1510, 1514, 1515, 1517,
- 1510, 1516, 1510, 1510, 1510, 1519, 1510, 1510,
- 1510, 933, 934, 938, 939, 1524, 1532, 1533,
- 1534, 1524, 937, 1524, 1524, 934, 1528, 1529,
- 1524, 1524, 1524, 1524, 1524, 940, 944, 945,
- 1535, 1543, 1544, 1545, 1535, 943, 1535, 1535,
- 940, 1539, 1540, 1535, 1535, 1535, 1535, 1535,
- 1546, 1548, 1549, 1550, 1551, 1552, 1553, 1554,
- 1555, 1556, 1557, 1558, 1559, 1560, 1561, 1562,
- 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570,
- 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578,
- 1579, 1580, 1581, 1582, 1546, 946, 947, 951,
- 952, 953, 954, 955, 956, 957, 958, 959,
- 960, 961, 962, 964, 966, 967, 999, 1040,
- 1055, 1062, 1064, 1066, 1086, 1089, 1105, 1218,
- 1546, 1222, 1223, 1224, 1225, 1226, 1227, 1228,
- 1229, 1230, 1231, 1232, 1234, 1235, 1236, 1237,
- 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245,
- 1246, 1247, 1248, 1249, 1250, 1251, 1252, 1253,
- 1254, 1256, 1257, 1258, 1259, 1260, 1261, 1262,
- 1263, 1264, 1265, 1266, 1267, 1268, 1269, 1270,
- 1272, 1273, 1274, 1276, 1277, 1278, 1279, 1280,
- 1281, 1282, 1283, 1284, 1285, 1286, 1287, 1288,
- 1289, 1291, 1292, 1293, 1294, 1295, 1296, 1297,
- 1298, 1299, 1301, 1302, 1303, 1304, 1305, 1306,
- 1307, 1308, 1309, 1310, 1311, 1312, 1313, 1314,
- 1315, 1316, 1317, 1318, 1320, 1321, 1322, 1323,
- 1324, 1325, 1326, 1327, 1328, 1329, 1330, 1331,
- 1332, 1333, 1334, 1335, 1336, 1337, 1338, 1339,
- 1340, 1342, 1343, 1344, 1345, 1346, 1347, 1348,
- 1349, 1350, 1351, 1352, 1353, 1354, 1355, 1356,
- 1357, 1386, 1411, 1414, 1415, 1417, 1424, 1425,
- 1428, 1432, 1444, 1449, 1450, 1452, 1455, 1457,
-}
-
-var _hcltok_trans_actions []byte = []byte{
- 147, 109, 0, 0, 91, 143, 0, 7,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 123, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 145, 195, 151, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 149, 127, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 31, 171,
- 0, 0, 0, 35, 33, 0, 55, 41,
- 177, 0, 53, 0, 177, 177, 0, 0,
- 75, 61, 183, 0, 73, 0, 183, 183,
- 0, 0, 85, 189, 89, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 87, 79, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 93,
- 0, 0, 121, 0, 113, 0, 7, 7,
- 0, 7, 0, 0, 115, 0, 117, 0,
- 125, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 7,
- 7, 7, 198, 198, 198, 198, 198, 198,
- 7, 7, 198, 7, 129, 141, 137, 97,
- 135, 103, 0, 131, 107, 0, 101, 95,
- 111, 99, 133, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 105, 119,
- 139, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 13, 0, 0, 174, 17, 0, 7,
- 7, 23, 0, 25, 27, 0, 0, 0,
- 153, 0, 15, 19, 9, 0, 21, 11,
- 29, 0, 0, 0, 0, 43, 0, 180,
- 180, 49, 0, 159, 156, 1, 177, 177,
- 45, 37, 47, 39, 51, 0, 0, 0,
- 63, 0, 186, 186, 69, 0, 165, 162,
- 1, 183, 183, 65, 57, 67, 59, 71,
- 77, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 7,
- 7, 7, 192, 192, 192, 192, 192, 192,
- 7, 7, 192, 7, 81, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 83, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
-}
-
-var _hcltok_to_state_actions []byte = []byte{
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 3, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 3, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 168, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 168,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 3, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0,
-}
-
-var _hcltok_from_state_actions []byte = []byte{
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 5, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 5, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 5, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 5,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 5, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0,
-}
-
-var _hcltok_eof_trans []int16 = []int16{
- 0, 1, 1, 1, 6, 6, 6, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 419,
- 419, 421, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 419, 419, 419, 419, 419, 419,
- 419, 419, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 670, 670, 670, 670, 670, 670, 670, 670,
- 767, 772, 772, 772, 773, 773, 775, 775,
- 775, 779, 0, 0, 785, 785, 785, 789,
- 0, 0, 795, 795, 797, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 795, 795, 795,
- 795, 795, 795, 795, 795, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
- 1046, 1046, 1046, 0, 1197, 1198, 1199, 1201,
- 1199, 1199, 1199, 1204, 1199, 1199, 1199, 1199,
- 1211, 1199, 1199, 1241, 1241, 1241, 1241, 1241,
- 1241, 1241, 1241, 1241, 1241, 1241, 1241, 1241,
- 1241, 1241, 1241, 1241, 1241, 1241, 1241, 1241,
- 1241, 1241, 1241, 1241, 1241, 1241, 1241, 1241,
- 1241, 1241, 1241, 1241, 1241, 1241, 0, 1394,
- 1396, 1397, 1401, 1401, 1394, 1404, 1397, 1407,
- 1397, 1409, 1409, 1409, 0, 1418, 1420, 1420,
- 1418, 1418, 1425, 1427, 1429, 1429, 1429, 0,
- 1437, 1439, 1439, 1437, 1437, 1444, 1446, 1448,
- 1448, 1448, 0, 1485, 1513, 1513, 1513, 1513,
- 1513, 1513, 1513, 1513, 1513, 1513, 1513, 1513,
- 1513, 1513, 1513, 1513, 1513, 1513, 1513, 1513,
- 1513, 1513, 1513, 1513, 1513, 1513, 1513, 1513,
- 1513, 1513, 1513, 1513, 1513, 1513, 1513,
-}
-
-const hcltok_start int = 1459
-const hcltok_first_final int = 1459
-const hcltok_error int = 0
-
-const hcltok_en_stringTemplate int = 1510
-const hcltok_en_heredocTemplate int = 1524
-const hcltok_en_bareTemplate int = 1535
-const hcltok_en_identOnly int = 1546
-const hcltok_en_main int = 1459
-
-//line scan_tokens.rl:18
-
-func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token {
- stripData := stripUTF8BOM(data)
- start.Byte += len(data) - len(stripData)
- data = stripData
-
- f := &tokenAccum{
- Filename: filename,
- Bytes: data,
- Pos: start,
- StartByte: start.Byte,
- }
-
-//line scan_tokens.rl:317
-
- // Ragel state
- p := 0 // "Pointer" into data
- pe := len(data) // End-of-data "pointer"
- ts := 0
- te := 0
- act := 0
- eof := pe
- var stack []int
- var top int
-
- var cs int // current state
- switch mode {
- case scanNormal:
- cs = hcltok_en_main
- case scanTemplate:
- cs = hcltok_en_bareTemplate
- case scanIdentOnly:
- cs = hcltok_en_identOnly
- default:
- panic("invalid scanMode")
- }
-
- braces := 0
- var retBraces []int // stack of brace levels that cause us to use fret
- var heredocs []heredocInProgress // stack of heredocs we're currently processing
-
-//line scan_tokens.rl:352
-
- // Make Go compiler happy
- _ = ts
- _ = te
- _ = act
- _ = eof
-
- token := func(ty TokenType) {
- f.emitToken(ty, ts, te)
- }
- selfToken := func() {
- b := data[ts:te]
- if len(b) != 1 {
- // should never happen
- panic("selfToken only works for single-character tokens")
- }
- f.emitToken(TokenType(b[0]), ts, te)
- }
-
-//line scan_tokens.go:4292
- {
- top = 0
- ts = 0
- te = 0
- act = 0
- }
-
-//line scan_tokens.go:4300
- {
- var _klen int
- var _trans int
- var _acts int
- var _nacts uint
- var _keys int
- if p == pe {
- goto _test_eof
- }
- if cs == 0 {
- goto _out
- }
- _resume:
- _acts = int(_hcltok_from_state_actions[cs])
- _nacts = uint(_hcltok_actions[_acts])
- _acts++
- for ; _nacts > 0; _nacts-- {
- _acts++
- switch _hcltok_actions[_acts-1] {
- case 3:
-//line NONE:1
- ts = p
-
-//line scan_tokens.go:4323
- }
- }
-
- _keys = int(_hcltok_key_offsets[cs])
- _trans = int(_hcltok_index_offsets[cs])
-
- _klen = int(_hcltok_single_lengths[cs])
- if _klen > 0 {
- _lower := int(_keys)
- var _mid int
- _upper := int(_keys + _klen - 1)
- for {
- if _upper < _lower {
- break
- }
-
- _mid = _lower + ((_upper - _lower) >> 1)
- switch {
- case data[p] < _hcltok_trans_keys[_mid]:
- _upper = _mid - 1
- case data[p] > _hcltok_trans_keys[_mid]:
- _lower = _mid + 1
- default:
- _trans += int(_mid - int(_keys))
- goto _match
- }
- }
- _keys += _klen
- _trans += _klen
- }
-
- _klen = int(_hcltok_range_lengths[cs])
- if _klen > 0 {
- _lower := int(_keys)
- var _mid int
- _upper := int(_keys + (_klen << 1) - 2)
- for {
- if _upper < _lower {
- break
- }
-
- _mid = _lower + (((_upper - _lower) >> 1) & ^1)
- switch {
- case data[p] < _hcltok_trans_keys[_mid]:
- _upper = _mid - 2
- case data[p] > _hcltok_trans_keys[_mid+1]:
- _lower = _mid + 2
- default:
- _trans += int((_mid - int(_keys)) >> 1)
- goto _match
- }
- }
- _trans += _klen
- }
-
- _match:
- _trans = int(_hcltok_indicies[_trans])
- _eof_trans:
- cs = int(_hcltok_trans_targs[_trans])
-
- if _hcltok_trans_actions[_trans] == 0 {
- goto _again
- }
-
- _acts = int(_hcltok_trans_actions[_trans])
- _nacts = uint(_hcltok_actions[_acts])
- _acts++
- for ; _nacts > 0; _nacts-- {
- _acts++
- switch _hcltok_actions[_acts-1] {
- case 0:
-//line scan_tokens.rl:235
- p--
-
- case 4:
-//line NONE:1
- te = p + 1
-
- case 5:
-//line scan_tokens.rl:259
- act = 4
- case 6:
-//line scan_tokens.rl:261
- act = 6
- case 7:
-//line scan_tokens.rl:171
- te = p + 1
- {
- token(TokenTemplateInterp)
- braces++
- retBraces = append(retBraces, braces)
- if len(heredocs) > 0 {
- heredocs[len(heredocs)-1].StartOfLine = false
- }
- {
- stack = append(stack, 0)
- stack[top] = cs
- top++
- cs = 1459
- goto _again
- }
- }
- case 8:
-//line scan_tokens.rl:181
- te = p + 1
- {
- token(TokenTemplateControl)
- braces++
- retBraces = append(retBraces, braces)
- if len(heredocs) > 0 {
- heredocs[len(heredocs)-1].StartOfLine = false
- }
- {
- stack = append(stack, 0)
- stack[top] = cs
- top++
- cs = 1459
- goto _again
- }
- }
- case 9:
-//line scan_tokens.rl:95
- te = p + 1
- {
- token(TokenCQuote)
- top--
- cs = stack[top]
- {
- stack = stack[:len(stack)-1]
- }
- goto _again
-
- }
- case 10:
-//line scan_tokens.rl:259
- te = p + 1
- {
- token(TokenQuotedLit)
- }
- case 11:
-//line scan_tokens.rl:262
- te = p + 1
- {
- token(TokenBadUTF8)
- }
- case 12:
-//line scan_tokens.rl:171
- te = p
- p--
- {
- token(TokenTemplateInterp)
- braces++
- retBraces = append(retBraces, braces)
- if len(heredocs) > 0 {
- heredocs[len(heredocs)-1].StartOfLine = false
- }
- {
- stack = append(stack, 0)
- stack[top] = cs
- top++
- cs = 1459
- goto _again
- }
- }
- case 13:
-//line scan_tokens.rl:181
- te = p
- p--
- {
- token(TokenTemplateControl)
- braces++
- retBraces = append(retBraces, braces)
- if len(heredocs) > 0 {
- heredocs[len(heredocs)-1].StartOfLine = false
- }
- {
- stack = append(stack, 0)
- stack[top] = cs
- top++
- cs = 1459
- goto _again
- }
- }
- case 14:
-//line scan_tokens.rl:259
- te = p
- p--
- {
- token(TokenQuotedLit)
- }
- case 15:
-//line scan_tokens.rl:260
- te = p
- p--
- {
- token(TokenQuotedNewline)
- }
- case 16:
-//line scan_tokens.rl:261
- te = p
- p--
- {
- token(TokenInvalid)
- }
- case 17:
-//line scan_tokens.rl:262
- te = p
- p--
- {
- token(TokenBadUTF8)
- }
- case 18:
-//line scan_tokens.rl:259
- p = (te) - 1
- {
- token(TokenQuotedLit)
- }
- case 19:
-//line scan_tokens.rl:262
- p = (te) - 1
- {
- token(TokenBadUTF8)
- }
- case 20:
-//line NONE:1
- switch act {
- case 4:
- {
- p = (te) - 1
- token(TokenQuotedLit)
- }
- case 6:
- {
- p = (te) - 1
- token(TokenInvalid)
- }
- }
-
- case 21:
-//line scan_tokens.rl:159
- act = 11
- case 22:
-//line scan_tokens.rl:270
- act = 12
- case 23:
-//line scan_tokens.rl:171
- te = p + 1
- {
- token(TokenTemplateInterp)
- braces++
- retBraces = append(retBraces, braces)
- if len(heredocs) > 0 {
- heredocs[len(heredocs)-1].StartOfLine = false
- }
- {
- stack = append(stack, 0)
- stack[top] = cs
- top++
- cs = 1459
- goto _again
- }
- }
- case 24:
-//line scan_tokens.rl:181
- te = p + 1
- {
- token(TokenTemplateControl)
- braces++
- retBraces = append(retBraces, braces)
- if len(heredocs) > 0 {
- heredocs[len(heredocs)-1].StartOfLine = false
- }
- {
- stack = append(stack, 0)
- stack[top] = cs
- top++
- cs = 1459
- goto _again
- }
- }
- case 25:
-//line scan_tokens.rl:122
- te = p + 1
- {
- // This action is called specificially when a heredoc literal
- // ends with a newline character.
-
- // This might actually be our end marker.
- topdoc := &heredocs[len(heredocs)-1]
- if topdoc.StartOfLine {
- maybeMarker := bytes.TrimSpace(data[ts:te])
- if bytes.Equal(maybeMarker, topdoc.Marker) {
- // We actually emit two tokens here: the end-of-heredoc
- // marker first, and then separately the newline that
- // follows it. This then avoids issues with the closing
- // marker consuming a newline that would normally be used
- // to mark the end of an attribute definition.
- // We might have either a \n sequence or an \r\n sequence
- // here, so we must handle both.
- nls := te - 1
- nle := te
- te--
- if data[te-1] == '\r' {
- // back up one more byte
- nls--
- te--
- }
- token(TokenCHeredoc)
- ts = nls
- te = nle
- token(TokenNewline)
- heredocs = heredocs[:len(heredocs)-1]
- top--
- cs = stack[top]
- {
- stack = stack[:len(stack)-1]
- }
- goto _again
-
- }
- }
-
- topdoc.StartOfLine = true
- token(TokenStringLit)
- }
- case 26:
-//line scan_tokens.rl:270
- te = p + 1
- {
- token(TokenBadUTF8)
- }
- case 27:
-//line scan_tokens.rl:171
- te = p
- p--
- {
- token(TokenTemplateInterp)
- braces++
- retBraces = append(retBraces, braces)
- if len(heredocs) > 0 {
- heredocs[len(heredocs)-1].StartOfLine = false
- }
- {
- stack = append(stack, 0)
- stack[top] = cs
- top++
- cs = 1459
- goto _again
- }
- }
- case 28:
-//line scan_tokens.rl:181
- te = p
- p--
- {
- token(TokenTemplateControl)
- braces++
- retBraces = append(retBraces, braces)
- if len(heredocs) > 0 {
- heredocs[len(heredocs)-1].StartOfLine = false
- }
- {
- stack = append(stack, 0)
- stack[top] = cs
- top++
- cs = 1459
- goto _again
- }
- }
- case 29:
-//line scan_tokens.rl:159
- te = p
- p--
- {
- // This action is called when a heredoc literal _doesn't_ end
- // with a newline character, e.g. because we're about to enter
- // an interpolation sequence.
- heredocs[len(heredocs)-1].StartOfLine = false
- token(TokenStringLit)
- }
- case 30:
-//line scan_tokens.rl:270
- te = p
- p--
- {
- token(TokenBadUTF8)
- }
- case 31:
-//line scan_tokens.rl:159
- p = (te) - 1
- {
- // This action is called when a heredoc literal _doesn't_ end
- // with a newline character, e.g. because we're about to enter
- // an interpolation sequence.
- heredocs[len(heredocs)-1].StartOfLine = false
- token(TokenStringLit)
- }
- case 32:
-//line NONE:1
- switch act {
- case 0:
- {
- cs = 0
- goto _again
- }
- case 11:
- {
- p = (te) - 1
-
- // This action is called when a heredoc literal _doesn't_ end
- // with a newline character, e.g. because we're about to enter
- // an interpolation sequence.
- heredocs[len(heredocs)-1].StartOfLine = false
- token(TokenStringLit)
- }
- case 12:
- {
- p = (te) - 1
- token(TokenBadUTF8)
- }
- }
-
- case 33:
-//line scan_tokens.rl:167
- act = 15
- case 34:
-//line scan_tokens.rl:277
- act = 16
- case 35:
-//line scan_tokens.rl:171
- te = p + 1
- {
- token(TokenTemplateInterp)
- braces++
- retBraces = append(retBraces, braces)
- if len(heredocs) > 0 {
- heredocs[len(heredocs)-1].StartOfLine = false
- }
- {
- stack = append(stack, 0)
- stack[top] = cs
- top++
- cs = 1459
- goto _again
- }
- }
- case 36:
-//line scan_tokens.rl:181
- te = p + 1
- {
- token(TokenTemplateControl)
- braces++
- retBraces = append(retBraces, braces)
- if len(heredocs) > 0 {
- heredocs[len(heredocs)-1].StartOfLine = false
- }
- {
- stack = append(stack, 0)
- stack[top] = cs
- top++
- cs = 1459
- goto _again
- }
- }
- case 37:
-//line scan_tokens.rl:167
- te = p + 1
- {
- token(TokenStringLit)
- }
- case 38:
-//line scan_tokens.rl:277
- te = p + 1
- {
- token(TokenBadUTF8)
- }
- case 39:
-//line scan_tokens.rl:171
- te = p
- p--
- {
- token(TokenTemplateInterp)
- braces++
- retBraces = append(retBraces, braces)
- if len(heredocs) > 0 {
- heredocs[len(heredocs)-1].StartOfLine = false
- }
- {
- stack = append(stack, 0)
- stack[top] = cs
- top++
- cs = 1459
- goto _again
- }
- }
- case 40:
-//line scan_tokens.rl:181
- te = p
- p--
- {
- token(TokenTemplateControl)
- braces++
- retBraces = append(retBraces, braces)
- if len(heredocs) > 0 {
- heredocs[len(heredocs)-1].StartOfLine = false
- }
- {
- stack = append(stack, 0)
- stack[top] = cs
- top++
- cs = 1459
- goto _again
- }
- }
- case 41:
-//line scan_tokens.rl:167
- te = p
- p--
- {
- token(TokenStringLit)
- }
- case 42:
-//line scan_tokens.rl:277
- te = p
- p--
- {
- token(TokenBadUTF8)
- }
- case 43:
-//line scan_tokens.rl:167
- p = (te) - 1
- {
- token(TokenStringLit)
- }
- case 44:
-//line NONE:1
- switch act {
- case 0:
- {
- cs = 0
- goto _again
- }
- case 15:
- {
- p = (te) - 1
-
- token(TokenStringLit)
- }
- case 16:
- {
- p = (te) - 1
- token(TokenBadUTF8)
- }
- }
-
- case 45:
-//line scan_tokens.rl:281
- act = 17
- case 46:
-//line scan_tokens.rl:282
- act = 18
- case 47:
-//line scan_tokens.rl:282
- te = p + 1
- {
- token(TokenBadUTF8)
- }
- case 48:
-//line scan_tokens.rl:283
- te = p + 1
- {
- token(TokenInvalid)
- }
- case 49:
-//line scan_tokens.rl:281
- te = p
- p--
- {
- token(TokenIdent)
- }
- case 50:
-//line scan_tokens.rl:282
- te = p
- p--
- {
- token(TokenBadUTF8)
- }
- case 51:
-//line scan_tokens.rl:281
- p = (te) - 1
- {
- token(TokenIdent)
- }
- case 52:
-//line scan_tokens.rl:282
- p = (te) - 1
- {
- token(TokenBadUTF8)
- }
- case 53:
-//line NONE:1
- switch act {
- case 17:
- {
- p = (te) - 1
- token(TokenIdent)
- }
- case 18:
- {
- p = (te) - 1
- token(TokenBadUTF8)
- }
- }
-
- case 54:
-//line scan_tokens.rl:289
- act = 22
- case 55:
-//line scan_tokens.rl:313
- act = 40
- case 56:
-//line scan_tokens.rl:291
- te = p + 1
- {
- token(TokenComment)
- }
- case 57:
-//line scan_tokens.rl:292
- te = p + 1
- {
- token(TokenNewline)
- }
- case 58:
-//line scan_tokens.rl:294
- te = p + 1
- {
- token(TokenEqualOp)
- }
- case 59:
-//line scan_tokens.rl:295
- te = p + 1
- {
- token(TokenNotEqual)
- }
- case 60:
-//line scan_tokens.rl:296
- te = p + 1
- {
- token(TokenGreaterThanEq)
- }
- case 61:
-//line scan_tokens.rl:297
- te = p + 1
- {
- token(TokenLessThanEq)
- }
- case 62:
-//line scan_tokens.rl:298
- te = p + 1
- {
- token(TokenAnd)
- }
- case 63:
-//line scan_tokens.rl:299
- te = p + 1
- {
- token(TokenOr)
- }
- case 64:
-//line scan_tokens.rl:300
- te = p + 1
- {
- token(TokenDoubleColon)
- }
- case 65:
-//line scan_tokens.rl:301
- te = p + 1
- {
- token(TokenEllipsis)
- }
- case 66:
-//line scan_tokens.rl:302
- te = p + 1
- {
- token(TokenFatArrow)
- }
- case 67:
-//line scan_tokens.rl:303
- te = p + 1
- {
- selfToken()
- }
- case 68:
-//line scan_tokens.rl:191
- te = p + 1
- {
- token(TokenOBrace)
- braces++
- }
- case 69:
-//line scan_tokens.rl:196
- te = p + 1
- {
- if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces {
- token(TokenTemplateSeqEnd)
- braces--
- retBraces = retBraces[0 : len(retBraces)-1]
- top--
- cs = stack[top]
- {
- stack = stack[:len(stack)-1]
- }
- goto _again
-
- } else {
- token(TokenCBrace)
- braces--
- }
- }
- case 70:
-//line scan_tokens.rl:208
- te = p + 1
- {
- // Only consume from the retBraces stack and return if we are at
- // a suitable brace nesting level, otherwise things will get
- // confused. (Not entering this branch indicates a syntax error,
- // which we will catch in the parser.)
- if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces {
- token(TokenTemplateSeqEnd)
- braces--
- retBraces = retBraces[0 : len(retBraces)-1]
- top--
- cs = stack[top]
- {
- stack = stack[:len(stack)-1]
- }
- goto _again
-
- } else {
- // We intentionally generate a TokenTemplateSeqEnd here,
- // even though the user apparently wanted a brace, because
- // we want to allow the parser to catch the incorrect use
- // of a ~} to balance a generic opening brace, rather than
- // a template sequence.
- token(TokenTemplateSeqEnd)
- braces--
- }
- }
- case 71:
-//line scan_tokens.rl:90
- te = p + 1
- {
- token(TokenOQuote)
- {
- stack = append(stack, 0)
- stack[top] = cs
- top++
- cs = 1510
- goto _again
- }
- }
- case 72:
-//line scan_tokens.rl:100
- te = p + 1
- {
- token(TokenOHeredoc)
- // the token is currently the whole heredoc introducer, like
- // < 0; _nacts-- {
- _acts++
- switch _hcltok_actions[_acts-1] {
- case 1:
-//line NONE:1
- ts = 0
-
- case 2:
-//line NONE:1
- act = 0
-
-//line scan_tokens.go:5080
- }
- }
-
- if cs == 0 {
- goto _out
- }
- p++
- if p != pe {
- goto _resume
- }
- _test_eof:
- {
- }
- if p == eof {
- if _hcltok_eof_trans[cs] > 0 {
- _trans = int(_hcltok_eof_trans[cs] - 1)
- goto _eof_trans
- }
- }
-
- _out:
- {
- }
- }
-
-//line scan_tokens.rl:375
-
- // If we fall out here without being in a final state then we've
- // encountered something that the scanner can't match, which we'll
- // deal with as an invalid.
- if cs < hcltok_first_final {
- if mode == scanTemplate && len(stack) == 0 {
- // If we're scanning a bare template then any straggling
- // top-level stuff is actually literal string, rather than
- // invalid. This handles the case where the template ends
- // with a single "$" or "%", which trips us up because we
- // want to see another character to decide if it's a sequence
- // or an escape.
- f.emitToken(TokenStringLit, ts, len(data))
- } else {
- f.emitToken(TokenInvalid, ts, len(data))
- }
- }
-
- // We always emit a synthetic EOF token at the end, since it gives the
- // parser position information for an "unexpected EOF" diagnostic.
- f.emitToken(TokenEOF, len(data), len(data))
-
- return f.Tokens
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.rl b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.rl
deleted file mode 100644
index 66bb4714..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/scan_tokens.rl
+++ /dev/null
@@ -1,399 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hclsyntax
-
-import (
- "bytes"
-
- "github.com/hashicorp/hcl/v2"
-)
-
-// This file is generated from scan_tokens.rl. DO NOT EDIT.
-%%{
- # (except when you are actually in scan_tokens.rl here, so edit away!)
-
- machine hcltok;
- write data;
-}%%
-
-func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token {
- stripData := stripUTF8BOM(data)
- start.Byte += len(data) - len(stripData)
- data = stripData
-
- f := &tokenAccum{
- Filename: filename,
- Bytes: data,
- Pos: start,
- StartByte: start.Byte,
- }
-
- %%{
- include UnicodeDerived "unicode_derived.rl";
-
- UTF8Cont = 0x80 .. 0xBF;
- AnyUTF8 = (
- 0x00..0x7F |
- 0xC0..0xDF . UTF8Cont |
- 0xE0..0xEF . UTF8Cont . UTF8Cont |
- 0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont
- );
- BrokenUTF8 = any - AnyUTF8;
-
- NumberLitContinue = (digit|'.'|('e'|'E') ('+'|'-')? digit);
- NumberLit = digit ("" | (NumberLitContinue - '.') | (NumberLitContinue* (NumberLitContinue - '.')));
- Ident = (ID_Start | '_') (ID_Continue | '-')*;
-
- # Symbols that just represent themselves are handled as a single rule.
- SelfToken = "[" | "]" | "(" | ")" | "." | "," | "*" | "/" | "%" | "+" | "-" | "=" | "<" | ">" | "!" | "?" | ":" | "\n" | "&" | "|" | "~" | "^" | ";" | "`" | "'";
-
- EqualOp = "==";
- NotEqual = "!=";
- GreaterThanEqual = ">=";
- LessThanEqual = "<=";
- LogicalAnd = "&&";
- LogicalOr = "||";
-
- DoubleColon = "::";
- Ellipsis = "...";
- FatArrow = "=>";
-
- Newline = '\r' ? '\n';
- EndOfLine = Newline;
-
- BeginStringTmpl = '"';
- BeginHeredocTmpl = '<<' ('-')? Ident Newline;
-
- Comment = (
- # The :>> operator in these is a "finish-guarded concatenation",
- # which terminates the sequence on its left when it completes
- # the sequence on its right.
- # In the single-line comment cases this is allowing us to make
- # the trailing EndOfLine optional while still having the overall
- # pattern terminate. In the multi-line case it ensures that
- # the first comment in the file ends at the first */, rather than
- # gobbling up all of the "any*" until the _final_ */ in the file.
- ("#" (any - EndOfLine)* :>> EndOfLine?) |
- ("//" (any - EndOfLine)* :>> EndOfLine?) |
- ("/*" any* :>> "*/")
- );
-
- # Note: hclwrite assumes that only ASCII spaces appear between tokens,
- # and uses this assumption to recreate the spaces between tokens by
- # looking at byte offset differences. This means it will produce
- # incorrect results in the presence of tabs, but that's acceptable
- # because the canonical style (which hclwrite itself can impose
- # automatically is to never use tabs).
- Spaces = (' ' | 0x09)+;
-
- action beginStringTemplate {
- token(TokenOQuote);
- fcall stringTemplate;
- }
-
- action endStringTemplate {
- token(TokenCQuote);
- fret;
- }
-
- action beginHeredocTemplate {
- token(TokenOHeredoc);
- // the token is currently the whole heredoc introducer, like
- // < 0 {
- heredocs[len(heredocs)-1].StartOfLine = false;
- }
- fcall main;
- }
-
- action beginTemplateControl {
- token(TokenTemplateControl);
- braces++;
- retBraces = append(retBraces, braces);
- if len(heredocs) > 0 {
- heredocs[len(heredocs)-1].StartOfLine = false;
- }
- fcall main;
- }
-
- action openBrace {
- token(TokenOBrace);
- braces++;
- }
-
- action closeBrace {
- if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces {
- token(TokenTemplateSeqEnd);
- braces--;
- retBraces = retBraces[0:len(retBraces)-1]
- fret;
- } else {
- token(TokenCBrace);
- braces--;
- }
- }
-
- action closeTemplateSeqEatWhitespace {
- // Only consume from the retBraces stack and return if we are at
- // a suitable brace nesting level, otherwise things will get
- // confused. (Not entering this branch indicates a syntax error,
- // which we will catch in the parser.)
- if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces {
- token(TokenTemplateSeqEnd);
- braces--;
- retBraces = retBraces[0:len(retBraces)-1]
- fret;
- } else {
- // We intentionally generate a TokenTemplateSeqEnd here,
- // even though the user apparently wanted a brace, because
- // we want to allow the parser to catch the incorrect use
- // of a ~} to balance a generic opening brace, rather than
- // a template sequence.
- token(TokenTemplateSeqEnd);
- braces--;
- }
- }
-
- TemplateInterp = "${" ("~")?;
- TemplateControl = "%{" ("~")?;
- EndStringTmpl = '"';
- NewlineChars = ("\r"|"\n");
- NewlineCharsSeq = NewlineChars+;
- StringLiteralChars = (AnyUTF8 - NewlineChars);
- TemplateIgnoredNonBrace = (^'{' %{ fhold; });
- TemplateNotInterp = '$' (TemplateIgnoredNonBrace | TemplateInterp);
- TemplateNotControl = '%' (TemplateIgnoredNonBrace | TemplateControl);
- QuotedStringLiteralWithEsc = ('\\' StringLiteralChars) | (StringLiteralChars - ("$" | '%' | '"' | "\\"));
- TemplateStringLiteral = (
- (TemplateNotInterp) |
- (TemplateNotControl) |
- (QuotedStringLiteralWithEsc)+
- );
- HeredocStringLiteral = (
- (TemplateNotInterp) |
- (TemplateNotControl) |
- (StringLiteralChars - ("$" | '%'))*
- );
- BareStringLiteral = (
- (TemplateNotInterp) |
- (TemplateNotControl) |
- (StringLiteralChars - ("$" | '%'))*
- ) Newline?;
-
- stringTemplate := |*
- TemplateInterp => beginTemplateInterp;
- TemplateControl => beginTemplateControl;
- EndStringTmpl => endStringTemplate;
- TemplateStringLiteral => { token(TokenQuotedLit); };
- NewlineCharsSeq => { token(TokenQuotedNewline); };
- AnyUTF8 => { token(TokenInvalid); };
- BrokenUTF8 => { token(TokenBadUTF8); };
- *|;
-
- heredocTemplate := |*
- TemplateInterp => beginTemplateInterp;
- TemplateControl => beginTemplateControl;
- HeredocStringLiteral EndOfLine => heredocLiteralEOL;
- HeredocStringLiteral => heredocLiteralMidline;
- BrokenUTF8 => { token(TokenBadUTF8); };
- *|;
-
- bareTemplate := |*
- TemplateInterp => beginTemplateInterp;
- TemplateControl => beginTemplateControl;
- BareStringLiteral => bareTemplateLiteral;
- BrokenUTF8 => { token(TokenBadUTF8); };
- *|;
-
- identOnly := |*
- Ident => { token(TokenIdent) };
- BrokenUTF8 => { token(TokenBadUTF8) };
- AnyUTF8 => { token(TokenInvalid) };
- *|;
-
- main := |*
- Spaces => {};
- NumberLit => { token(TokenNumberLit) };
- Ident => { token(TokenIdent) };
-
- Comment => { token(TokenComment) };
- Newline => { token(TokenNewline) };
-
- EqualOp => { token(TokenEqualOp); };
- NotEqual => { token(TokenNotEqual); };
- GreaterThanEqual => { token(TokenGreaterThanEq); };
- LessThanEqual => { token(TokenLessThanEq); };
- LogicalAnd => { token(TokenAnd); };
- LogicalOr => { token(TokenOr); };
- DoubleColon => { token(TokenDoubleColon); };
- Ellipsis => { token(TokenEllipsis); };
- FatArrow => { token(TokenFatArrow); };
- SelfToken => { selfToken() };
-
- "{" => openBrace;
- "}" => closeBrace;
-
- "~}" => closeTemplateSeqEatWhitespace;
-
- BeginStringTmpl => beginStringTemplate;
- BeginHeredocTmpl => beginHeredocTemplate;
-
- BrokenUTF8 => { token(TokenBadUTF8) };
- AnyUTF8 => { token(TokenInvalid) };
- *|;
-
- }%%
-
- // Ragel state
- p := 0 // "Pointer" into data
- pe := len(data) // End-of-data "pointer"
- ts := 0
- te := 0
- act := 0
- eof := pe
- var stack []int
- var top int
-
- var cs int // current state
- switch mode {
- case scanNormal:
- cs = hcltok_en_main
- case scanTemplate:
- cs = hcltok_en_bareTemplate
- case scanIdentOnly:
- cs = hcltok_en_identOnly
- default:
- panic("invalid scanMode")
- }
-
- braces := 0
- var retBraces []int // stack of brace levels that cause us to use fret
- var heredocs []heredocInProgress // stack of heredocs we're currently processing
-
- %%{
- prepush {
- stack = append(stack, 0);
- }
- postpop {
- stack = stack[:len(stack)-1];
- }
- }%%
-
- // Make Go compiler happy
- _ = ts
- _ = te
- _ = act
- _ = eof
-
- token := func (ty TokenType) {
- f.emitToken(ty, ts, te)
- }
- selfToken := func () {
- b := data[ts:te]
- if len(b) != 1 {
- // should never happen
- panic("selfToken only works for single-character tokens")
- }
- f.emitToken(TokenType(b[0]), ts, te)
- }
-
- %%{
- write init nocs;
- write exec;
- }%%
-
- // If we fall out here without being in a final state then we've
- // encountered something that the scanner can't match, which we'll
- // deal with as an invalid.
- if cs < hcltok_first_final {
- if mode == scanTemplate && len(stack) == 0 {
- // If we're scanning a bare template then any straggling
- // top-level stuff is actually literal string, rather than
- // invalid. This handles the case where the template ends
- // with a single "$" or "%", which trips us up because we
- // want to see another character to decide if it's a sequence
- // or an escape.
- f.emitToken(TokenStringLit, ts, len(data))
- } else {
- f.emitToken(TokenInvalid, ts, len(data))
- }
- }
-
- // We always emit a synthetic EOF token at the end, since it gives the
- // parser position information for an "unexpected EOF" diagnostic.
- f.emitToken(TokenEOF, len(data), len(data))
-
- return f.Tokens
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md
deleted file mode 100644
index 88925410..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/spec.md
+++ /dev/null
@@ -1,943 +0,0 @@
-# HCL Native Syntax Specification
-
-This is the specification of the syntax and semantics of the native syntax
-for HCL. HCL is a system for defining configuration languages for applications.
-The HCL information model is designed to support multiple concrete syntaxes
-for configuration, but this native syntax is considered the primary format
-and is optimized for human authoring and maintenance, as opposed to machine
-generation of configuration.
-
-The language consists of three integrated sub-languages:
-
-- The _structural_ language defines the overall hierarchical configuration
- structure, and is a serialization of HCL bodies, blocks and attributes.
-
-- The _expression_ language is used to express attribute values, either as
- literals or as derivations of other values.
-
-- The _template_ language is used to compose values together into strings,
- as one of several types of expression in the expression language.
-
-In normal use these three sub-languages are used together within configuration
-files to describe an overall configuration, with the structural language
-being used at the top level. The expression and template languages can also
-be used in isolation, to implement features such as REPLs, debuggers, and
-integration into more limited HCL syntaxes such as the JSON profile.
-
-## Syntax Notation
-
-Within this specification a semi-formal notation is used to illustrate the
-details of syntax. This notation is intended for human consumption rather
-than machine consumption, with the following conventions:
-
-- A naked name starting with an uppercase letter is a global production,
- common to all of the syntax specifications in this document.
-- A naked name starting with a lowercase letter is a local production,
- meaningful only within the specification where it is defined.
-- Double and single quotes (`"` and `'`) are used to mark literal character
- sequences, which may be either punctuation markers or keywords.
-- The default operator for combining items, which has no punctuation,
- is concatenation.
-- The symbol `|` indicates that any one of its left and right operands may
- be present.
-- The `*` symbol indicates zero or more repetitions of the item to its left.
-- The `?` symbol indicates zero or one of the item to its left.
-- Parentheses (`(` and `)`) are used to group items together to apply
- the `|`, `*` and `?` operators to them collectively.
-
-The grammar notation does not fully describe the language. The prose may
-augment or conflict with the illustrated grammar. In case of conflict, prose
-has priority.
-
-## Source Code Representation
-
-Source code is unicode text expressed in the UTF-8 encoding. The language
-itself does not perform unicode normalization, so syntax features such as
-identifiers are sequences of unicode code points and so e.g. a precombined
-accented character is distinct from a letter associated with a combining
-accent. (String literals have some special handling with regard to Unicode
-normalization which will be covered later in the relevant section.)
-
-UTF-8 encoded Unicode byte order marks are not permitted. Invalid or
-non-normalized UTF-8 encoding is always a parse error.
-
-## Lexical Elements
-
-### Comments and Whitespace
-
-Comments and Whitespace are recognized as lexical elements but are ignored
-except as described below.
-
-Whitespace is defined as a sequence of zero or more space characters
-(U+0020). Newline sequences (either U+000A or U+000D followed by U+000A)
-are _not_ considered whitespace but are ignored as such in certain contexts.
-Horizontal tab characters (U+0009) are also treated as whitespace, but are
-counted only as one "column" for the purpose of reporting source positions.
-
-Comments serve as program documentation and come in two forms:
-
-- _Line comments_ start with either the `//` or `#` sequences and end with
- the next newline sequence. A line comment is considered equivalent to a
- newline sequence.
-
-- _Inline comments_ start with the `/*` sequence and end with the `*/`
- sequence, and may have any characters within except the ending sequence.
- An inline comment is considered equivalent to a whitespace sequence.
-
-Comments and whitespace cannot begin within other comments, or within
-template literals except inside an interpolation sequence or template directive.
-
-### Identifiers
-
-Identifiers name entities such as blocks, attributes and expression variables.
-Identifiers are interpreted as per [UAX #31][uax31] Section 2. Specifically,
-their syntax is defined in terms of the `ID_Start` and `ID_Continue`
-character properties as follows:
-
-```ebnf
-Identifier = ID_Start (ID_Continue | '-')*;
-```
-
-The Unicode specification provides the normative requirements for identifier
-parsing. Non-normatively, the spirit of this specification is that `ID_Start`
-consists of Unicode letter and certain unambiguous punctuation tokens, while
-`ID_Continue` augments that set with Unicode digits, combining marks, etc.
-
-The dash character `-` is additionally allowed in identifiers, even though
-that is not part of the unicode `ID_Continue` definition. This is to allow
-attribute names and block type names to contain dashes, although underscores
-as word separators are considered the idiomatic usage.
-
-[uax31]: http://unicode.org/reports/tr31/ "Unicode Identifier and Pattern Syntax"
-
-### Keywords
-
-There are no globally-reserved words, but in some contexts certain identifiers
-are reserved to function as keywords. These are discussed further in the
-relevant documentation sections that follow. In such situations, the
-identifier's role as a keyword supersedes any other valid interpretation that
-may be possible. Outside of these specific situations, the keywords have no
-special meaning and are interpreted as regular identifiers.
-
-### Operators and Delimiters
-
-The following character sequences represent operators, delimiters, and other
-special tokens:
-
-```
-+ && == < : { [ ( ${
-- || != > ? } ] ) %{
-* ! <= = .
-/ >= => ,
-% ...
-```
-
-### Numeric Literals
-
-A numeric literal is a decimal representation of a
-real number. It has an integer part, a fractional part,
-and an exponent part.
-
-```ebnf
-NumericLit = decimal+ ("." decimal+)? (expmark decimal+)?;
-decimal = '0' .. '9';
-expmark = ('e' | 'E') ("+" | "-")?;
-```
-
-## Structural Elements
-
-The structural language consists of syntax representing the following
-constructs:
-
-- _Attributes_, which assign a value to a specified name.
-- _Blocks_, which create a child body annotated by a type and optional labels.
-- _Body Content_, which consists of a collection of attributes and blocks.
-
-These constructs correspond to the similarly-named concepts in the
-language-agnostic HCL information model.
-
-```ebnf
-ConfigFile = Body;
-Body = (Attribute | Block | OneLineBlock)*;
-Attribute = Identifier "=" Expression Newline;
-Block = Identifier (StringLit|Identifier)* "{" Newline Body "}" Newline;
-OneLineBlock = Identifier (StringLit|Identifier)* "{" (Identifier "=" Expression)? "}" Newline;
-```
-
-### Configuration Files
-
-A _configuration file_ is a sequence of characters whose top-level is
-interpreted as a Body.
-
-### Bodies
-
-A _body_ is a collection of associated attributes and blocks. The meaning of
-this association is defined by the calling application.
-
-### Attribute Definitions
-
-An _attribute definition_ assigns a value to a particular attribute name within
-a body. Each distinct attribute name may be defined no more than once within a
-single body.
-
-The attribute value is given as an expression, which is retained literally
-for later evaluation by the calling application.
-
-### Blocks
-
-A _block_ creates a child body that is annotated with a block _type_ and
-zero or more block _labels_. Blocks create a structural hierarchy which can be
-interpreted by the calling application.
-
-Block labels can either be quoted literal strings or naked identifiers.
-
-## Expressions
-
-The expression sub-language is used within attribute definitions to specify
-values.
-
-```ebnf
-Expression = (
- ExprTerm |
- Operation |
- Conditional
-);
-```
-
-### Types
-
-The value types used within the expression language are those defined by the
-syntax-agnostic HCL information model. An expression may return any valid
-type, but only a subset of the available types have first-class syntax.
-A calling application may make other types available via _variables_ and
-_functions_.
-
-### Expression Terms
-
-Expression _terms_ are the operands for unary and binary expressions, as well
-as acting as expressions in their own right.
-
-```ebnf
-ExprTerm = (
- LiteralValue |
- CollectionValue |
- TemplateExpr |
- VariableExpr |
- FunctionCall |
- ForExpr |
- ExprTerm Index |
- ExprTerm GetAttr |
- ExprTerm Splat |
- "(" Expression ")"
-);
-```
-
-The productions for these different term types are given in their corresponding
-sections.
-
-Between the `(` and `)` characters denoting a sub-expression, newline
-characters are ignored as whitespace.
-
-### Literal Values
-
-A _literal value_ immediately represents a particular value of a primitive
-type.
-
-```ebnf
-LiteralValue = (
- NumericLit |
- "true" |
- "false" |
- "null"
-);
-```
-
-- Numeric literals represent values of type _number_.
-- The `true` and `false` keywords represent values of type _bool_.
-- The `null` keyword represents a null value of the dynamic pseudo-type.
-
-String literals are not directly available in the expression sub-language, but
-are available via the template sub-language, which can in turn be incorporated
-via _template expressions_.
-
-### Collection Values
-
-A _collection value_ combines zero or more other expressions to produce a
-collection value.
-
-```ebnf
-CollectionValue = tuple | object;
-tuple = "[" (
- (Expression (("," | Newline) Expression)* ","?)?
-) "]";
-object = "{" (
- (objectelem (( "," | Newline) objectelem)* ","?)?
-) "}";
-objectelem = (Identifier | Expression) ("=" | ":") Expression;
-```
-
-Only tuple and object values can be directly constructed via native syntax.
-Tuple and object values can in turn be converted to list, set and map values
-with other operations, which behaves as defined by the syntax-agnostic HCL
-information model.
-
-When specifying an object element, an identifier is interpreted as a literal
-attribute name as opposed to a variable reference. To populate an item key
-from a variable, use parentheses to disambiguate:
-
-- `{foo = "baz"}` is interpreted as an attribute literally named `foo`.
-- `{(foo) = "baz"}` is interpreted as an attribute whose name is taken
- from the variable named `foo`.
-
-Between the open and closing delimiters of these sequences, newline sequences
-are ignored as whitespace.
-
-There is a syntax ambiguity between _for expressions_ and collection values
-whose first element starts with an identifier named `for`. The _for expression_
-interpretation has priority, so to write a key literally named `for`
-or an expression derived from a variable named `for` you must use parentheses
-or quotes to disambiguate:
-
-- `[for, foo, baz]` is a syntax error.
-- `[(for), foo, baz]` is a tuple whose first element is the value of variable
- `for`.
-- `{for = 1, baz = 2}` is a syntax error.
-- `{"for" = 1, baz = 2}` is an object with an attribute literally named `for`.
-- `{baz = 2, for = 1}` is equivalent to the previous example, and resolves the
- ambiguity by reordering.
-- `{(for) = 1, baz = 2}` is an object with a key with the same value as the
- variable `for`.
-
-### Template Expressions
-
-A _template expression_ embeds a program written in the template sub-language
-as an expression. Template expressions come in two forms:
-
-- A _quoted_ template expression is delimited by quote characters (`"`) and
- defines a template as a single-line expression with escape characters.
-- A _heredoc_ template expression is introduced by a `<<` sequence and
- defines a template via a multi-line sequence terminated by a user-chosen
- delimiter.
-
-In both cases the template interpolation and directive syntax is available for
-use within the delimiters, and any text outside of these special sequences is
-interpreted as a literal string.
-
-In _quoted_ template expressions any literal string sequences within the
-template behave in a special way: literal newline sequences are not permitted
-and instead _escape sequences_ can be included, starting with the
-backslash `\`:
-
-```
- \n Unicode newline control character
- \r Unicode carriage return control character
- \t Unicode tab control character
- \" Literal quote mark, used to prevent interpretation as end of string
- \\ Literal backslash, used to prevent interpretation as escape sequence
- \uNNNN Unicode character from Basic Multilingual Plane (NNNN is four hexadecimal digits)
- \UNNNNNNNN Unicode character from supplementary planes (NNNNNNNN is eight hexadecimal digits)
-```
-
-The _heredoc_ template expression type is introduced by either `<<` or `<<-`,
-followed by an identifier. The template expression ends when the given
-identifier subsequently appears again on a line of its own.
-
-If a heredoc template is introduced with the `<<-` symbol, any literal string
-at the start of each line is analyzed to find the minimum number of leading
-spaces, and then that number of prefix spaces is removed from all line-leading
-literal strings. The final closing marker may also have an arbitrary number
-of spaces preceding it on its line.
-
-```ebnf
-TemplateExpr = quotedTemplate | heredocTemplate;
-quotedTemplate = (as defined in prose above);
-heredocTemplate = (
- ("<<" | "<<-") Identifier Newline
- (content as defined in prose above)
- Identifier Newline
-);
-```
-
-A quoted template expression containing only a single literal string serves
-as a syntax for defining literal string _expressions_. In certain contexts
-the template syntax is restricted in this manner:
-
-```ebnf
-StringLit = '"' (quoted literals as defined in prose above) '"';
-```
-
-The `StringLit` production permits the escape sequences discussed for quoted
-template expressions as above, but does _not_ permit template interpolation
-or directive sequences.
-
-### Variables and Variable Expressions
-
-A _variable_ is a value that has been assigned a symbolic name. Variables are
-made available for use in expressions by the calling application, by populating
-the _global scope_ used for expression evaluation.
-
-Variables can also be created by expressions themselves, which always creates
-a _child scope_ that incorporates the variables from its parent scope but
-(re-)defines zero or more names with new values.
-
-The value of a variable is accessed using a _variable expression_, which is
-a standalone `Identifier` whose name corresponds to a defined variable:
-
-```ebnf
-VariableExpr = Identifier;
-```
-
-Variables in a particular scope are immutable, but child scopes may _hide_
-a variable from an ancestor scope by defining a new variable of the same name.
-When looking up variables, the most locally-defined variable of the given name
-is used, and ancestor-scoped variables of the same name cannot be accessed.
-
-No direct syntax is provided for declaring or assigning variables, but other
-expression constructs implicitly create child scopes and define variables as
-part of their evaluation.
-
-### Functions and Function Calls
-
-A _function_ is an operation that has been assigned a symbolic name. Functions
-are made available for use in expressions by the calling application, by
-populating the _function table_ used for expression evaluation.
-
-The namespace of functions is distinct from the namespace of variables. A
-function and a variable may share the same name with no implication that they
-are in any way related.
-
-A function can be executed via a _function call_ expression:
-
-```ebnf
-FunctionCall = Identifier "(" arguments ")";
-Arguments = (
- () ||
- (Expression ("," Expression)* ("," | "...")?)
-);
-```
-
-The definition of functions and the semantics of calling them are defined by
-the language-agnostic HCL information model. The given arguments are mapped
-onto the function's _parameters_ and the result of a function call expression
-is the return value of the named function when given those arguments.
-
-If the final argument expression is followed by the ellipsis symbol (`...`),
-the final argument expression must evaluate to either a list or tuple value.
-The elements of the value are each mapped to a single parameter of the
-named function, beginning at the first parameter remaining after all other
-argument expressions have been mapped.
-
-Within the parentheses that delimit the function arguments, newline sequences
-are ignored as whitespace.
-
-### For Expressions
-
-A _for expression_ is a construct for constructing a collection by projecting
-the items from another collection.
-
-```ebnf
-ForExpr = forTupleExpr | forObjectExpr;
-forTupleExpr = "[" forIntro Expression forCond? "]";
-forObjectExpr = "{" forIntro Expression "=>" Expression "..."? forCond? "}";
-forIntro = "for" Identifier ("," Identifier)? "in" Expression ":";
-forCond = "if" Expression;
-```
-
-The punctuation used to delimit a for expression decide whether it will produce
-a tuple value (`[` and `]`) or an object value (`{` and `}`).
-
-The "introduction" is equivalent in both cases: the keyword `for` followed by
-either one or two identifiers separated by a comma which define the temporary
-variable names used for iteration, followed by the keyword `in` and then
-an expression that must evaluate to a value that can be iterated. The
-introduction is then terminated by the colon (`:`) symbol.
-
-If only one identifier is provided, it is the name of a variable that will
-be temporarily assigned the value of each element during iteration. If both
-are provided, the first is the key and the second is the value.
-
-Tuple, object, list, map, and set types are iterable. The type of collection
-used defines how the key and value variables are populated:
-
-- For tuple and list types, the _key_ is the zero-based index into the
- sequence for each element, and the _value_ is the element value. The
- elements are visited in index order.
-- For object and map types, the _key_ is the string attribute name or element
- key, and the _value_ is the attribute or element value. The elements are
- visited in the order defined by a lexicographic sort of the attribute names
- or keys.
-- For set types, the _key_ and _value_ are both the element value. The elements
- are visited in an undefined but consistent order.
-
-The expression after the colon and (in the case of object `for`) the expression
-after the `=>` are both evaluated once for each element of the source
-collection, in a local scope that defines the key and value variable names
-specified.
-
-The results of evaluating these expressions for each input element are used
-to populate an element in the new collection. In the case of tuple `for`, the
-single expression becomes an element, appending values to the tuple in visit
-order. In the case of object `for`, the pair of expressions is used as an
-attribute name and value respectively, creating an element in the resulting
-object.
-
-In the case of object `for`, it is an error if two input elements produce
-the same result from the attribute name expression, since duplicate
-attributes are not possible. If the ellipsis symbol (`...`) appears
-immediately after the value expression, this activates the grouping mode in
-which each value in the resulting object is a _tuple_ of all of the values
-that were produced against each distinct key.
-
-- `[for v in ["a", "b"]: v]` returns `["a", "b"]`.
-- `[for i, v in ["a", "b"]: i]` returns `[0, 1]`.
-- `{for i, v in ["a", "b"]: v => i}` returns `{a = 0, b = 1}`.
-- `{for i, v in ["a", "a", "b"]: v => i}` produces an error, because attribute
- `a` is defined twice.
-- `{for i, v in ["a", "a", "b"]: v => i...}` returns `{a = [0, 1], b = [2]}`.
-
-If the `if` keyword is used after the element expression(s), it applies an
-additional predicate that can be used to conditionally filter elements from
-the source collection from consideration. The expression following `if` is
-evaluated once for each source element, in the same scope used for the
-element expression(s). It must evaluate to a boolean value; if `true`, the
-element will be evaluated as normal, while if `false` the element will be
-skipped.
-
-- `[for i, v in ["a", "b", "c"]: v if i < 2]` returns `["a", "b"]`.
-
-If the collection value, element expression(s) or condition expression return
-unknown values that are otherwise type-valid, the result is a value of the
-dynamic pseudo-type.
-
-### Index Operator
-
-The _index_ operator returns the value of a single element of a collection
-value. It is a postfix operator and can be applied to any value that has
-a tuple, object, map, or list type.
-
-```ebnf
-Index = "[" Expression "]";
-```
-
-The expression delimited by the brackets is the _key_ by which an element
-will be looked up.
-
-If the index operator is applied to a value of tuple or list type, the
-key expression must be an non-negative integer number representing the
-zero-based element index to access. If applied to a value of object or map
-type, the key expression must be a string representing the attribute name
-or element key. If the given key value is not of the appropriate type, a
-conversion is attempted using the conversion rules from the HCL
-syntax-agnostic information model.
-
-An error is produced if the given key expression does not correspond to
-an element in the collection, either because it is of an unconvertable type,
-because it is outside the range of elements for a tuple or list, or because
-the given attribute or key does not exist.
-
-If either the collection or the key are an unknown value of an
-otherwise-suitable type, the return value is an unknown value whose type
-matches what type would be returned given known values, or a value of the
-dynamic pseudo-type if type information alone cannot determine a suitable
-return type.
-
-Within the brackets that delimit the index key, newline sequences are ignored
-as whitespace.
-
-The HCL native syntax also includes a _legacy_ index operator that exists
-only for compatibility with the precursor language HIL:
-
-```ebnf
-LegacyIndex = '.' digit+
-```
-
-This legacy index operator must be supported by parser for compatibility but
-should not be used in new configurations. This allows an attribute-access-like
-syntax for indexing, must still be interpreted as an index operation rather
-than attribute access.
-
-The legacy syntax does not support chaining of index operations, like
-`foo.0.0.bar`, because the interpretation of `0.0` as a number literal token
-takes priority and thus renders the resulting sequence invalid.
-
-### Attribute Access Operator
-
-The _attribute access_ operator returns the value of a single attribute in
-an object value. It is a postfix operator and can be applied to any value
-that has an object type.
-
-```ebnf
-GetAttr = "." Identifier;
-```
-
-The given identifier is interpreted as the name of the attribute to access.
-An error is produced if the object to which the operator is applied does not
-have an attribute with the given name.
-
-If the object is an unknown value of a type that has the attribute named, the
-result is an unknown value of the attribute's type.
-
-### Splat Operators
-
-The _splat operators_ allow convenient access to attributes or elements of
-elements in a tuple, list, or set value.
-
-There are two kinds of "splat" operator:
-
-- The _attribute-only_ splat operator supports only attribute lookups into
- the elements from a list, but supports an arbitrary number of them.
-
-- The _full_ splat operator additionally supports indexing into the elements
- from a list, and allows any combination of attribute access and index
- operations.
-
-```ebnf
-Splat = attrSplat | fullSplat;
-attrSplat = "." "*" GetAttr*;
-fullSplat = "[" "*" "]" (GetAttr | Index)*;
-```
-
-The splat operators can be thought of as shorthands for common operations that
-could otherwise be performed using _for expressions_:
-
-- `tuple.*.foo.bar[0]` is approximately equivalent to
- `[for v in tuple: v.foo.bar][0]`.
-- `tuple[*].foo.bar[0]` is approximately equivalent to
- `[for v in tuple: v.foo.bar[0]]`
-
-Note the difference in how the trailing index operator is interpreted in
-each case. This different interpretation is the key difference between the
-_attribute-only_ and _full_ splat operators.
-
-Splat operators have one additional behavior compared to the equivalent
-_for expressions_ shown above: if a splat operator is applied to a value that
-is _not_ of tuple, list, or set type, the value is coerced automatically into
-a single-value list of the value type:
-
-- `any_object.*.id` is equivalent to `[any_object.id]`, assuming that `any_object`
- is a single object.
-- `any_number.*` is equivalent to `[any_number]`, assuming that `any_number`
- is a single number.
-
-If applied to a null value that is not tuple, list, or set, the result is always
-an empty tuple, which allows conveniently converting a possibly-null scalar
-value into a tuple of zero or one elements. It is illegal to apply a splat
-operator to a null value of tuple, list, or set type.
-
-### Operations
-
-Operations apply a particular operator to either one or two expression terms.
-
-```ebnf
-Operation = unaryOp | binaryOp;
-unaryOp = ("-" | "!") ExprTerm;
-binaryOp = ExprTerm binaryOperator ExprTerm;
-binaryOperator = compareOperator | arithmeticOperator | logicOperator;
-compareOperator = "==" | "!=" | "<" | ">" | "<=" | ">=";
-arithmeticOperator = "+" | "-" | "*" | "/" | "%";
-logicOperator = "&&" | "||";
-```
-
-The unary operators have the highest precedence.
-
-The binary operators are grouped into the following precedence levels:
-
-```
-Level Operators
- 6 * / %
- 5 + -
- 4 > >= < <=
- 3 == !=
- 2 &&
- 1 ||
-```
-
-Higher values of "level" bind tighter. Operators within the same precedence
-level have left-to-right associativity. For example, `x / y * z` is equivalent
-to `(x / y) * z`.
-
-### Comparison Operators
-
-Comparison operators always produce boolean values, as a result of testing
-the relationship between two values.
-
-The two equality operators apply to values of any type:
-
-```
-a == b equal
-a != b not equal
-```
-
-Two values are equal if they are of identical types and their values are
-equal as defined in the HCL syntax-agnostic information model. The equality
-operators are commutative and opposite, such that `(a == b) == !(a != b)`
-and `(a == b) == (b == a)` for all values `a` and `b`.
-
-The four numeric comparison operators apply only to numbers:
-
-```
-a < b less than
-a <= b less than or equal to
-a > b greater than
-a >= b greater than or equal to
-```
-
-If either operand of a comparison operator is a correctly-typed unknown value
-or a value of the dynamic pseudo-type, the result is an unknown boolean.
-
-### Arithmetic Operators
-
-Arithmetic operators apply only to number values and always produce number
-values as results.
-
-```
-a + b sum (addition)
-a - b difference (subtraction)
-a * b product (multiplication)
-a / b quotient (division)
-a % b remainder (modulo)
--a negation
-```
-
-Arithmetic operations are considered to be performed in an arbitrary-precision
-number space.
-
-If either operand of an arithmetic operator is an unknown number or a value
-of the dynamic pseudo-type, the result is an unknown number.
-
-### Logic Operators
-
-Logic operators apply only to boolean values and always produce boolean values
-as results.
-
-```
-a && b logical AND
-a || b logical OR
-!a logical NOT
-```
-
-If either operand of a logic operator is an unknown bool value or a value
-of the dynamic pseudo-type, the result is an unknown bool value.
-
-### Conditional Operator
-
-The conditional operator allows selecting from one of two expressions based on
-the outcome of a boolean expression.
-
-```ebnf
-Conditional = Expression "?" Expression ":" Expression;
-```
-
-The first expression is the _predicate_, which is evaluated and must produce
-a boolean result. If the predicate value is `true`, the result of the second
-expression is the result of the conditional. If the predicate value is
-`false`, the result of the third expression is the result of the conditional.
-
-The second and third expressions must be of the same type or must be able to
-unify into a common type using the type unification rules defined in the
-HCL syntax-agnostic information model. This unified type is the result type
-of the conditional, with both expressions converted as necessary to the
-unified type.
-
-If the predicate is an unknown boolean value or a value of the dynamic
-pseudo-type then the result is an unknown value of the unified type of the
-other two expressions.
-
-If either the second or third expressions produce errors when evaluated,
-these errors are passed through only if the erroneous expression is selected.
-This allows for expressions such as
-`length(some_list) > 0 ? some_list[0] : default` (given some suitable `length`
-function) without producing an error when the predicate is `false`.
-
-## Templates
-
-The template sub-language is used within template expressions to concisely
-combine strings and other values to produce other strings. It can also be
-used in isolation as a standalone template language.
-
-```ebnf
-Template = (
- TemplateLiteral |
- TemplateInterpolation |
- TemplateDirective
-)*
-TemplateDirective = TemplateIf | TemplateFor;
-```
-
-A template behaves like an expression that always returns a string value.
-The different elements of the template are evaluated and combined into a
-single string to return. If any of the elements produce an unknown string
-or a value of the dynamic pseudo-type, the result is an unknown string.
-
-An important use-case for standalone templates is to enable the use of
-expressions in alternative HCL syntaxes where a native expression grammar is
-not available. For example, the HCL JSON profile treats the values of JSON
-strings as standalone templates when attributes are evaluated in expression
-mode.
-
-### Template Literals
-
-A template literal is a literal sequence of characters to include in the
-resulting string. When the template sub-language is used standalone, a
-template literal can contain any unicode character, with the exception
-of the sequences that introduce interpolations and directives, and for the
-sequences that escape those introductions.
-
-The interpolation and directive introductions are escaped by doubling their
-leading characters. The `${` sequence is escaped as `$${` and the `%{`
-sequence is escaped as `%%{`.
-
-When the template sub-language is embedded in the expression language via
-_template expressions_, additional constraints and transforms are applied to
-template literals as described in the definition of template expressions.
-
-The value of a template literal can be modified by _strip markers_ in any
-interpolations or directives that are adjacent to it. A strip marker is
-a tilde (`~`) placed immediately after the opening `{` or before the closing
-`}` of a template sequence:
-
-- `hello ${~ "world" }` produces `"helloworld"`.
-- `%{ if true ~} hello %{~ endif }` produces `"hello"`.
-
-When a strip marker is present, any spaces adjacent to it in the corresponding
-string literal (if any) are removed before producing the final value. Space
-characters are interpreted as per Unicode's definition.
-
-Stripping is done at syntax level rather than value level. Values returned
-by interpolations or directives are not subject to stripping:
-
-- `${"hello" ~}${" world"}` produces `"hello world"`, and not `"helloworld"`,
- because the space is not in a template literal directly adjacent to the
- strip marker.
-
-### Template Interpolations
-
-An _interpolation sequence_ evaluates an expression (written in the
-expression sub-language), converts the result to a string value, and
-replaces itself with the resulting string.
-
-```ebnf
-TemplateInterpolation = ("${" | "${~") Expression ("}" | "~}";
-```
-
-If the expression result cannot be converted to a string, an error is
-produced.
-
-### Template If Directive
-
-The template `if` directive is the template equivalent of the
-_conditional expression_, allowing selection of one of two sub-templates based
-on the value of a predicate expression.
-
-```ebnf
-TemplateIf = (
- ("%{" | "%{~") "if" Expression ("}" | "~}")
- Template
- (
- ("%{" | "%{~") "else" ("}" | "~}")
- Template
- )?
- ("%{" | "%{~") "endif" ("}" | "~}")
-);
-```
-
-The evaluation of the `if` directive is equivalent to the conditional
-expression, with the following exceptions:
-
-- The two sub-templates always produce strings, and thus the result value is
- also always a string.
-- The `else` clause may be omitted, in which case the conditional's third
- expression result is implied to be the empty string.
-
-### Template For Directive
-
-The template `for` directive is the template equivalent of the _for expression_,
-producing zero or more copies of its sub-template based on the elements of
-a collection.
-
-```ebnf
-TemplateFor = (
- ("%{" | "%{~") "for" Identifier ("," Identifier) "in" Expression ("}" | "~}")
- Template
- ("%{" | "%{~") "endfor" ("}" | "~}")
-);
-```
-
-The evaluation of the `for` directive is equivalent to the _for expression_
-when producing a tuple, with the following exceptions:
-
-- The sub-template always produces a string.
-- There is no equivalent of the "if" clause on the for expression.
-- The elements of the resulting tuple are all converted to strings and
- concatenated to produce a flat string result.
-
-### Template Interpolation Unwrapping
-
-As a special case, a template that consists only of a single interpolation,
-with no surrounding literals, directives or other interpolations, is
-"unwrapped". In this case, the result of the interpolation expression is
-returned verbatim, without conversion to string.
-
-This special case exists primarily to enable the native template language
-to be used inside strings in alternative HCL syntaxes that lack a first-class
-template or expression syntax. Unwrapping allows arbitrary expressions to be
-used to populate attributes when strings in such languages are interpreted
-as templates.
-
-- `${true}` produces the boolean value `true`
-- `${"${true}"}` produces the boolean value `true`, because both the inner
- and outer interpolations are subject to unwrapping.
-- `hello ${true}` produces the string `"hello true"`
-- `${""}${true}` produces the string `"true"` because there are two
- interpolation sequences, even though one produces an empty result.
-- `%{ for v in [true] }${v}%{ endfor }` produces the string `true` because
- the presence of the `for` directive circumvents the unwrapping even though
- the final result is a single value.
-
-In some contexts this unwrapping behavior may be circumvented by the calling
-application, by converting the final template result to string. This is
-necessary, for example, if a standalone template is being used to produce
-the direct contents of a file, since the result in that case must always be a
-string.
-
-## Static Analysis
-
-The HCL static analysis operations are implemented for some expression types
-in the native syntax, as described in the following sections.
-
-A goal for static analysis of the native syntax is for the interpretation to
-be as consistent as possible with the dynamic evaluation interpretation of
-the given expression, though some deviations are intentionally made in order
-to maximize the potential for analysis.
-
-### Static List
-
-The tuple construction syntax can be interpreted as a static list. All of
-the expression elements given are returned as the static list elements,
-with no further interpretation.
-
-### Static Map
-
-The object construction syntax can be interpreted as a static map. All of the
-key/value pairs given are returned as the static pairs, with no further
-interpretation.
-
-The usual requirement that an attribute name be interpretable as a string
-does not apply to this static analysis, allowing callers to provide map-like
-constructs with different key types by building on the map syntax.
-
-### Static Call
-
-The function call syntax can be interpreted as a static call. The called
-function name is returned verbatim and the given argument expressions are
-returned as the static arguments, with no further interpretation.
-
-### Static Traversal
-
-A variable expression and any attached attribute access operations and
-constant index operations can be interpreted as a static traversal.
-
-The keywords `true`, `false` and `null` can also be interpreted as
-static traversals, behaving as if they were references to variables of those
-names, to allow callers to redefine the meaning of those keywords in certain
-contexts.
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go
deleted file mode 100644
index ff272631..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure.go
+++ /dev/null
@@ -1,396 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hclsyntax
-
-import (
- "fmt"
- "strings"
-
- "github.com/hashicorp/hcl/v2"
-)
-
-// AsHCLBlock returns the block data expressed as a *hcl.Block.
-func (b *Block) AsHCLBlock() *hcl.Block {
- if b == nil {
- return nil
- }
-
- return &hcl.Block{
- Type: b.Type,
- Labels: b.Labels,
- Body: b.Body,
-
- DefRange: b.DefRange(),
- TypeRange: b.TypeRange,
- LabelRanges: b.LabelRanges,
- }
-}
-
-// Body is the implementation of hcl.Body for the HCL native syntax.
-type Body struct {
- Attributes Attributes
- Blocks Blocks
-
- // These are used with PartialContent to produce a "remaining items"
- // body to return. They are nil on all bodies fresh out of the parser.
- hiddenAttrs map[string]struct{}
- hiddenBlocks map[string]struct{}
-
- SrcRange hcl.Range
- EndRange hcl.Range // Final token of the body (zero-length range)
-}
-
-// Assert that *Body implements hcl.Body
-var assertBodyImplBody hcl.Body = &Body{}
-
-func (b *Body) walkChildNodes(w internalWalkFunc) {
- w(b.Attributes)
- w(b.Blocks)
-}
-
-func (b *Body) Range() hcl.Range {
- return b.SrcRange
-}
-
-func (b *Body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
- content, remainHCL, diags := b.PartialContent(schema)
-
- // No we'll see if anything actually remains, to produce errors about
- // extraneous items.
- remain := remainHCL.(*Body)
-
- for name, attr := range b.Attributes {
- if _, hidden := remain.hiddenAttrs[name]; !hidden {
- var suggestions []string
- for _, attrS := range schema.Attributes {
- if _, defined := content.Attributes[attrS.Name]; defined {
- continue
- }
- suggestions = append(suggestions, attrS.Name)
- }
- suggestion := nameSuggestion(name, suggestions)
- if suggestion != "" {
- suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
- } else {
- // Is there a block of the same name?
- for _, blockS := range schema.Blocks {
- if blockS.Type == name {
- suggestion = fmt.Sprintf(" Did you mean to define a block of type %q?", name)
- break
- }
- }
- }
-
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Unsupported argument",
- Detail: fmt.Sprintf("An argument named %q is not expected here.%s", name, suggestion),
- Subject: &attr.NameRange,
- })
- }
- }
-
- for _, block := range b.Blocks {
- blockTy := block.Type
- if _, hidden := remain.hiddenBlocks[blockTy]; !hidden {
- var suggestions []string
- for _, blockS := range schema.Blocks {
- suggestions = append(suggestions, blockS.Type)
- }
- suggestion := nameSuggestion(blockTy, suggestions)
- if suggestion != "" {
- suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
- } else {
- // Is there an attribute of the same name?
- for _, attrS := range schema.Attributes {
- if attrS.Name == blockTy {
- suggestion = fmt.Sprintf(" Did you mean to define argument %q? If so, use the equals sign to assign it a value.", blockTy)
- break
- }
- }
- }
-
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Unsupported block type",
- Detail: fmt.Sprintf("Blocks of type %q are not expected here.%s", blockTy, suggestion),
- Subject: &block.TypeRange,
- })
- }
- }
-
- return content, diags
-}
-
-func (b *Body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
- attrs := make(hcl.Attributes)
- var blocks hcl.Blocks
- var diags hcl.Diagnostics
- hiddenAttrs := make(map[string]struct{})
- hiddenBlocks := make(map[string]struct{})
-
- if b.hiddenAttrs != nil {
- for k, v := range b.hiddenAttrs {
- hiddenAttrs[k] = v
- }
- }
- if b.hiddenBlocks != nil {
- for k, v := range b.hiddenBlocks {
- hiddenBlocks[k] = v
- }
- }
-
- for _, attrS := range schema.Attributes {
- name := attrS.Name
- attr, exists := b.Attributes[name]
- _, hidden := hiddenAttrs[name]
- if hidden || !exists {
- if attrS.Required {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Missing required argument",
- Detail: fmt.Sprintf("The argument %q is required, but no definition was found.", attrS.Name),
- Subject: b.MissingItemRange().Ptr(),
- })
- }
- continue
- }
-
- hiddenAttrs[name] = struct{}{}
- attrs[name] = attr.AsHCLAttribute()
- }
-
- blocksWanted := make(map[string]hcl.BlockHeaderSchema)
- for _, blockS := range schema.Blocks {
- blocksWanted[blockS.Type] = blockS
- }
-
- for _, block := range b.Blocks {
- if _, hidden := hiddenBlocks[block.Type]; hidden {
- continue
- }
- blockS, wanted := blocksWanted[block.Type]
- if !wanted {
- continue
- }
-
- if len(block.Labels) > len(blockS.LabelNames) {
- name := block.Type
- if len(blockS.LabelNames) == 0 {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: fmt.Sprintf("Extraneous label for %s", name),
- Detail: fmt.Sprintf(
- "No labels are expected for %s blocks.", name,
- ),
- Subject: block.LabelRanges[0].Ptr(),
- Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(),
- })
- } else {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: fmt.Sprintf("Extraneous label for %s", name),
- Detail: fmt.Sprintf(
- "Only %d labels (%s) are expected for %s blocks.",
- len(blockS.LabelNames), strings.Join(blockS.LabelNames, ", "), name,
- ),
- Subject: block.LabelRanges[len(blockS.LabelNames)].Ptr(),
- Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(),
- })
- }
- continue
- }
-
- if len(block.Labels) < len(blockS.LabelNames) {
- name := block.Type
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: fmt.Sprintf("Missing %s for %s", blockS.LabelNames[len(block.Labels)], name),
- Detail: fmt.Sprintf(
- "All %s blocks must have %d labels (%s).",
- name, len(blockS.LabelNames), strings.Join(blockS.LabelNames, ", "),
- ),
- Subject: &block.OpenBraceRange,
- Context: hcl.RangeBetween(block.TypeRange, block.OpenBraceRange).Ptr(),
- })
- continue
- }
-
- blocks = append(blocks, block.AsHCLBlock())
- }
-
- // We hide blocks only after we've processed all of them, since otherwise
- // we can't process more than one of the same type.
- for _, blockS := range schema.Blocks {
- hiddenBlocks[blockS.Type] = struct{}{}
- }
-
- remain := &Body{
- Attributes: b.Attributes,
- Blocks: b.Blocks,
-
- hiddenAttrs: hiddenAttrs,
- hiddenBlocks: hiddenBlocks,
-
- SrcRange: b.SrcRange,
- EndRange: b.EndRange,
- }
-
- return &hcl.BodyContent{
- Attributes: attrs,
- Blocks: blocks,
-
- MissingItemRange: b.MissingItemRange(),
- }, remain, diags
-}
-
-func (b *Body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
- attrs := make(hcl.Attributes)
- var diags hcl.Diagnostics
-
- if len(b.Blocks) > 0 {
- example := b.Blocks[0]
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: fmt.Sprintf("Unexpected %q block", example.Type),
- Detail: "Blocks are not allowed here.",
- Subject: &example.TypeRange,
- })
- // we will continue processing anyway, and return the attributes
- // we are able to find so that certain analyses can still be done
- // in the face of errors.
- }
-
- if b.Attributes == nil {
- return attrs, diags
- }
-
- for name, attr := range b.Attributes {
- if _, hidden := b.hiddenAttrs[name]; hidden {
- continue
- }
- attrs[name] = attr.AsHCLAttribute()
- }
-
- return attrs, diags
-}
-
-func (b *Body) MissingItemRange() hcl.Range {
- return hcl.Range{
- Filename: b.SrcRange.Filename,
- Start: b.SrcRange.Start,
- End: b.SrcRange.Start,
- }
-}
-
-// Attributes is the collection of attribute definitions within a body.
-type Attributes map[string]*Attribute
-
-func (a Attributes) walkChildNodes(w internalWalkFunc) {
- for _, attr := range a {
- w(attr)
- }
-}
-
-// Range returns the range of some arbitrary point within the set of
-// attributes, or an invalid range if there are no attributes.
-//
-// This is provided only to complete the Node interface, but has no practical
-// use.
-func (a Attributes) Range() hcl.Range {
- // An attributes doesn't really have a useful range to report, since
- // it's just a grouping construct. So we'll arbitrarily take the
- // range of one of the attributes, or produce an invalid range if we have
- // none. In practice, there's little reason to ask for the range of
- // an Attributes.
- for _, attr := range a {
- return attr.Range()
- }
- return hcl.Range{
- Filename: "",
- }
-}
-
-// Attribute represents a single attribute definition within a body.
-type Attribute struct {
- Name string
- Expr Expression
-
- SrcRange hcl.Range
- NameRange hcl.Range
- EqualsRange hcl.Range
-}
-
-func (a *Attribute) walkChildNodes(w internalWalkFunc) {
- w(a.Expr)
-}
-
-func (a *Attribute) Range() hcl.Range {
- return a.SrcRange
-}
-
-// AsHCLAttribute returns the block data expressed as a *hcl.Attribute.
-func (a *Attribute) AsHCLAttribute() *hcl.Attribute {
- if a == nil {
- return nil
- }
- return &hcl.Attribute{
- Name: a.Name,
- Expr: a.Expr,
-
- Range: a.SrcRange,
- NameRange: a.NameRange,
- }
-}
-
-// Blocks is the list of nested blocks within a body.
-type Blocks []*Block
-
-func (bs Blocks) walkChildNodes(w internalWalkFunc) {
- for _, block := range bs {
- w(block)
- }
-}
-
-// Range returns the range of some arbitrary point within the list of
-// blocks, or an invalid range if there are no blocks.
-//
-// This is provided only to complete the Node interface, but has no practical
-// use.
-func (bs Blocks) Range() hcl.Range {
- if len(bs) > 0 {
- return bs[0].Range()
- }
- return hcl.Range{
- Filename: "",
- }
-}
-
-// Block represents a nested block structure
-type Block struct {
- Type string
- Labels []string
- Body *Body
-
- TypeRange hcl.Range
- LabelRanges []hcl.Range
- OpenBraceRange hcl.Range
- CloseBraceRange hcl.Range
-}
-
-func (b *Block) walkChildNodes(w internalWalkFunc) {
- w(b.Body)
-}
-
-func (b *Block) Range() hcl.Range {
- return hcl.RangeBetween(b.TypeRange, b.CloseBraceRange)
-}
-
-func (b *Block) DefRange() hcl.Range {
- lastHeaderRange := b.TypeRange
- if len(b.LabelRanges) > 0 {
- lastHeaderRange = b.LabelRanges[len(b.LabelRanges)-1]
- }
- return hcl.RangeBetween(b.TypeRange, lastHeaderRange)
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure_at_pos.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure_at_pos.go
deleted file mode 100644
index 50857168..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/structure_at_pos.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hclsyntax
-
-import (
- "github.com/hashicorp/hcl/v2"
-)
-
-// -----------------------------------------------------------------------------
-// The methods in this file are all optional extension methods that serve to
-// implement the methods of the same name on *hcl.File when its root body
-// is provided by this package.
-// -----------------------------------------------------------------------------
-
-// BlocksAtPos implements the method of the same name for an *hcl.File that
-// is backed by a *Body.
-func (b *Body) BlocksAtPos(pos hcl.Pos) []*hcl.Block {
- list, _ := b.blocksAtPos(pos, true)
- return list
-}
-
-// InnermostBlockAtPos implements the method of the same name for an *hcl.File
-// that is backed by a *Body.
-func (b *Body) InnermostBlockAtPos(pos hcl.Pos) *hcl.Block {
- _, innermost := b.blocksAtPos(pos, false)
- return innermost.AsHCLBlock()
-}
-
-// OutermostBlockAtPos implements the method of the same name for an *hcl.File
-// that is backed by a *Body.
-func (b *Body) OutermostBlockAtPos(pos hcl.Pos) *hcl.Block {
- return b.outermostBlockAtPos(pos).AsHCLBlock()
-}
-
-// blocksAtPos is the internal engine of both BlocksAtPos and
-// InnermostBlockAtPos, which both need to do the same logic but return a
-// differently-shaped result.
-//
-// list is nil if makeList is false, avoiding an allocation. Innermost is
-// always set, and if the returned list is non-nil it will always match the
-// final element from that list.
-func (b *Body) blocksAtPos(pos hcl.Pos, makeList bool) (list []*hcl.Block, innermost *Block) {
- current := b
-
-Blocks:
- for current != nil {
- for _, block := range current.Blocks {
- wholeRange := hcl.RangeBetween(block.TypeRange, block.CloseBraceRange)
- if wholeRange.ContainsPos(pos) {
- innermost = block
- if makeList {
- list = append(list, innermost.AsHCLBlock())
- }
- current = block.Body
- continue Blocks
- }
- }
-
- // If we fall out here then none of the current body's nested blocks
- // contain the position we are looking for, and so we're done.
- break
- }
-
- return
-}
-
-// outermostBlockAtPos is the internal version of OutermostBlockAtPos that
-// returns a hclsyntax.Block rather than an hcl.Block, allowing for further
-// analysis if necessary.
-func (b *Body) outermostBlockAtPos(pos hcl.Pos) *Block {
- // This is similar to blocksAtPos, but simpler because we know it only
- // ever needs to search the first level of nested blocks.
-
- for _, block := range b.Blocks {
- wholeRange := hcl.RangeBetween(block.TypeRange, block.CloseBraceRange)
- if wholeRange.ContainsPos(pos) {
- return block
- }
- }
-
- return nil
-}
-
-// AttributeAtPos implements the method of the same name for an *hcl.File
-// that is backed by a *Body.
-func (b *Body) AttributeAtPos(pos hcl.Pos) *hcl.Attribute {
- return b.attributeAtPos(pos).AsHCLAttribute()
-}
-
-// attributeAtPos is the internal version of AttributeAtPos that returns a
-// hclsyntax.Block rather than an hcl.Block, allowing for further analysis if
-// necessary.
-func (b *Body) attributeAtPos(pos hcl.Pos) *Attribute {
- searchBody := b
- _, block := b.blocksAtPos(pos, false)
- if block != nil {
- searchBody = block.Body
- }
-
- for _, attr := range searchBody.Attributes {
- if attr.SrcRange.ContainsPos(pos) {
- return attr
- }
- }
-
- return nil
-}
-
-// OutermostExprAtPos implements the method of the same name for an *hcl.File
-// that is backed by a *Body.
-func (b *Body) OutermostExprAtPos(pos hcl.Pos) hcl.Expression {
- attr := b.attributeAtPos(pos)
- if attr == nil {
- return nil
- }
- if !attr.Expr.Range().ContainsPos(pos) {
- return nil
- }
- return attr.Expr
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go b/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go
deleted file mode 100644
index 47648b8f..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/hclsyntax/token.go
+++ /dev/null
@@ -1,337 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hclsyntax
-
-import (
- "bytes"
- "fmt"
-
- "github.com/apparentlymart/go-textseg/v15/textseg"
- "github.com/hashicorp/hcl/v2"
-)
-
-// Token represents a sequence of bytes from some HCL code that has been
-// tagged with a type and its range within the source file.
-type Token struct {
- Type TokenType
- Bytes []byte
- Range hcl.Range
-}
-
-// Tokens is a slice of Token.
-type Tokens []Token
-
-// TokenType is an enumeration used for the Type field on Token.
-type TokenType rune
-
-const (
- // Single-character tokens are represented by their own character, for
- // convenience in producing these within the scanner. However, the values
- // are otherwise arbitrary and just intended to be mnemonic for humans
- // who might see them in debug output.
-
- TokenOBrace TokenType = '{'
- TokenCBrace TokenType = '}'
- TokenOBrack TokenType = '['
- TokenCBrack TokenType = ']'
- TokenOParen TokenType = '('
- TokenCParen TokenType = ')'
- TokenOQuote TokenType = '«'
- TokenCQuote TokenType = '»'
- TokenOHeredoc TokenType = 'H'
- TokenCHeredoc TokenType = 'h'
-
- TokenStar TokenType = '*'
- TokenSlash TokenType = '/'
- TokenPlus TokenType = '+'
- TokenMinus TokenType = '-'
- TokenPercent TokenType = '%'
-
- TokenEqual TokenType = '='
- TokenEqualOp TokenType = '≔'
- TokenNotEqual TokenType = '≠'
- TokenLessThan TokenType = '<'
- TokenLessThanEq TokenType = '≤'
- TokenGreaterThan TokenType = '>'
- TokenGreaterThanEq TokenType = '≥'
-
- TokenAnd TokenType = '∧'
- TokenOr TokenType = '∨'
- TokenBang TokenType = '!'
-
- TokenDot TokenType = '.'
- TokenComma TokenType = ','
-
- TokenDoubleColon TokenType = '⸬'
- TokenEllipsis TokenType = '…'
- TokenFatArrow TokenType = '⇒'
-
- TokenQuestion TokenType = '?'
- TokenColon TokenType = ':'
-
- TokenTemplateInterp TokenType = '∫'
- TokenTemplateControl TokenType = 'λ'
- TokenTemplateSeqEnd TokenType = '∎'
-
- TokenQuotedLit TokenType = 'Q' // might contain backslash escapes
- TokenStringLit TokenType = 'S' // cannot contain backslash escapes
- TokenNumberLit TokenType = 'N'
- TokenIdent TokenType = 'I'
-
- TokenComment TokenType = 'C'
-
- TokenNewline TokenType = '\n'
- TokenEOF TokenType = '␄'
-
- // The rest are not used in the language but recognized by the scanner so
- // we can generate good diagnostics in the parser when users try to write
- // things that might work in other languages they are familiar with, or
- // simply make incorrect assumptions about the HCL language.
-
- TokenBitwiseAnd TokenType = '&'
- TokenBitwiseOr TokenType = '|'
- TokenBitwiseNot TokenType = '~'
- TokenBitwiseXor TokenType = '^'
- TokenStarStar TokenType = '➚'
- TokenApostrophe TokenType = '\''
- TokenBacktick TokenType = '`'
- TokenSemicolon TokenType = ';'
- TokenTabs TokenType = '␉'
- TokenInvalid TokenType = '�'
- TokenBadUTF8 TokenType = '💩'
- TokenQuotedNewline TokenType = ''
-
- // TokenNil is a placeholder for when a token is required but none is
- // available, e.g. when reporting errors. The scanner will never produce
- // this as part of a token stream.
- TokenNil TokenType = '\x00'
-)
-
-func (t TokenType) GoString() string {
- return fmt.Sprintf("hclsyntax.%s", t.String())
-}
-
-type scanMode int
-
-const (
- scanNormal scanMode = iota
- scanTemplate
- scanIdentOnly
-)
-
-type tokenAccum struct {
- Filename string
- Bytes []byte
- Pos hcl.Pos
- Tokens []Token
- StartByte int
-}
-
-func (f *tokenAccum) emitToken(ty TokenType, startOfs, endOfs int) {
- // Walk through our buffer to figure out how much we need to adjust
- // the start pos to get our end pos.
-
- start := f.Pos
- start.Column += startOfs + f.StartByte - f.Pos.Byte // Safe because only ASCII spaces can be in the offset
- start.Byte = startOfs + f.StartByte
-
- end := start
- end.Byte = endOfs + f.StartByte
- b := f.Bytes[startOfs:endOfs]
- for len(b) > 0 {
- advance, seq, _ := textseg.ScanGraphemeClusters(b, true)
- if (len(seq) == 1 && seq[0] == '\n') || (len(seq) == 2 && seq[0] == '\r' && seq[1] == '\n') {
- end.Line++
- end.Column = 1
- } else {
- end.Column++
- }
- b = b[advance:]
- }
-
- f.Pos = end
-
- f.Tokens = append(f.Tokens, Token{
- Type: ty,
- Bytes: f.Bytes[startOfs:endOfs],
- Range: hcl.Range{
- Filename: f.Filename,
- Start: start,
- End: end,
- },
- })
-}
-
-type heredocInProgress struct {
- Marker []byte
- StartOfLine bool
-}
-
-func tokenOpensFlushHeredoc(tok Token) bool {
- if tok.Type != TokenOHeredoc {
- return false
- }
- return bytes.HasPrefix(tok.Bytes, []byte{'<', '<', '-'})
-}
-
-// checkInvalidTokens does a simple pass across the given tokens and generates
-// diagnostics for tokens that should _never_ appear in HCL source. This
-// is intended to avoid the need for the parser to have special support
-// for them all over.
-//
-// Returns a diagnostics with no errors if everything seems acceptable.
-// Otherwise, returns zero or more error diagnostics, though tries to limit
-// repetition of the same information.
-func checkInvalidTokens(tokens Tokens) hcl.Diagnostics {
- var diags hcl.Diagnostics
-
- toldBitwise := 0
- toldExponent := 0
- toldBacktick := 0
- toldApostrophe := 0
- toldSemicolon := 0
- toldTabs := 0
- toldBadUTF8 := 0
-
- for _, tok := range tokens {
- tokRange := func() *hcl.Range {
- r := tok.Range
- return &r
- }
-
- switch tok.Type {
- case TokenBitwiseAnd, TokenBitwiseOr, TokenBitwiseXor, TokenBitwiseNot:
- if toldBitwise < 4 {
- var suggestion string
- switch tok.Type {
- case TokenBitwiseAnd:
- suggestion = " Did you mean boolean AND (\"&&\")?"
- case TokenBitwiseOr:
- suggestion = " Did you mean boolean OR (\"||\")?"
- case TokenBitwiseNot:
- suggestion = " Did you mean boolean NOT (\"!\")?"
- }
-
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Unsupported operator",
- Detail: fmt.Sprintf("Bitwise operators are not supported.%s", suggestion),
- Subject: tokRange(),
- })
- toldBitwise++
- }
- case TokenStarStar:
- if toldExponent < 1 {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Unsupported operator",
- Detail: "\"**\" is not a supported operator. Exponentiation is not supported as an operator.",
- Subject: tokRange(),
- })
-
- toldExponent++
- }
- case TokenBacktick:
- // Only report for alternating (even) backticks, so we won't report both start and ends of the same
- // backtick-quoted string.
- if (toldBacktick % 2) == 0 {
- diags = append(diags, &hcl.Diagnostic{
- Severity: hcl.DiagError,
- Summary: "Invalid character",
- Detail: "The \"`\" character is not valid. To create a multi-line string, use the \"heredoc\" syntax, like \"<
-#
-# This script uses the unicode spec to generate a Ragel state machine
-# that recognizes unicode alphanumeric characters. It generates 5
-# character classes: uupper, ulower, ualpha, udigit, and ualnum.
-# Currently supported encodings are UTF-8 [default] and UCS-4.
-#
-# Usage: unicode2ragel.rb [options]
-# -e, --encoding [ucs4 | utf8] Data encoding
-# -h, --help Show this message
-#
-# This script was originally written as part of the Ferret search
-# engine library.
-#
-# Author: Rakan El-Khalil
-
-require 'optparse'
-require 'open-uri'
-
-ENCODINGS = [ :utf8, :ucs4 ]
-ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" }
-DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt"
-DEFAULT_MACHINE_NAME= "WChar"
-
-###
-# Display vars & default option
-
-TOTAL_WIDTH = 80
-RANGE_WIDTH = 23
-@encoding = :utf8
-@chart_url = DEFAULT_CHART_URL
-machine_name = DEFAULT_MACHINE_NAME
-properties = []
-@output = $stdout
-
-###
-# Option parsing
-
-cli_opts = OptionParser.new do |opts|
- opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o|
- @encoding = o.downcase.to_sym
- end
- opts.on("-h", "--help", "Show this message") do
- puts opts
- exit
- end
- opts.on("-u", "--url URL", "URL to process") do |o|
- @chart_url = o
- end
- opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o|
- machine_name = o
- end
- opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o|
- properties = o
- end
- opts.on("-o", "--output FILE", "output file") do |o|
- @output = File.new(o, "w+")
- end
-end
-
-cli_opts.parse(ARGV)
-unless ENCODINGS.member? @encoding
- puts "Invalid encoding: #{@encoding}"
- puts cli_opts
- exit
-end
-
-##
-# Downloads the document at url and yields every alpha line's hex
-# range and description.
-
-def each_alpha( url, property )
- open( url ) do |file|
- file.each_line do |line|
- next if line =~ /^#/;
- next if line !~ /; #{property} #/;
-
- range, description = line.split(/;/)
- range.strip!
- description.gsub!(/.*#/, '').strip!
-
- if range =~ /\.\./
- start, stop = range.split '..'
- else start = stop = range
- end
-
- yield start.hex .. stop.hex, description
- end
- end
-end
-
-###
-# Formats to hex at minimum width
-
-def to_hex( n )
- r = "%0X" % n
- r = "0#{r}" unless (r.length % 2).zero?
- r
-end
-
-###
-# UCS4 is just a straight hex conversion of the unicode codepoint.
-
-def to_ucs4( range )
- rangestr = "0x" + to_hex(range.begin)
- rangestr << "..0x" + to_hex(range.end) if range.begin != range.end
- [ rangestr ]
-end
-
-##
-# 0x00 - 0x7f -> 0zzzzzzz[7]
-# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6]
-# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6]
-# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6]
-
-UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff]
-
-def to_utf8_enc( n )
- r = 0
- if n <= 0x7f
- r = n
- elsif n <= 0x7ff
- y = 0xc0 | (n >> 6)
- z = 0x80 | (n & 0x3f)
- r = y << 8 | z
- elsif n <= 0xffff
- x = 0xe0 | (n >> 12)
- y = 0x80 | (n >> 6) & 0x3f
- z = 0x80 | n & 0x3f
- r = x << 16 | y << 8 | z
- elsif n <= 0x10ffff
- w = 0xf0 | (n >> 18)
- x = 0x80 | (n >> 12) & 0x3f
- y = 0x80 | (n >> 6) & 0x3f
- z = 0x80 | n & 0x3f
- r = w << 24 | x << 16 | y << 8 | z
- end
-
- to_hex(r)
-end
-
-def from_utf8_enc( n )
- n = n.hex
- r = 0
- if n <= 0x7f
- r = n
- elsif n <= 0xdfff
- y = (n >> 8) & 0x1f
- z = n & 0x3f
- r = y << 6 | z
- elsif n <= 0xefffff
- x = (n >> 16) & 0x0f
- y = (n >> 8) & 0x3f
- z = n & 0x3f
- r = x << 10 | y << 6 | z
- elsif n <= 0xf7ffffff
- w = (n >> 24) & 0x07
- x = (n >> 16) & 0x3f
- y = (n >> 8) & 0x3f
- z = n & 0x3f
- r = w << 18 | x << 12 | y << 6 | z
- end
- r
-end
-
-###
-# Given a range, splits it up into ranges that can be continuously
-# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff]
-# This is not strictly needed since the current [5.1] unicode standard
-# doesn't have ranges that straddle utf8 boundaries. This is included
-# for completeness as there is no telling if that will ever change.
-
-def utf8_ranges( range )
- ranges = []
- UTF8_BOUNDARIES.each do |max|
- if range.begin <= max
- if range.end <= max
- ranges << range
- return ranges
- end
-
- ranges << (range.begin .. max)
- range = (max + 1) .. range.end
- end
- end
- ranges
-end
-
-def build_range( start, stop )
- size = start.size/2
- left = size - 1
- return [""] if size < 1
-
- a = start[0..1]
- b = stop[0..1]
-
- ###
- # Shared prefix
-
- if a == b
- return build_range(start[2..-1], stop[2..-1]).map do |elt|
- "0x#{a} " + elt
- end
- end
-
- ###
- # Unshared prefix, end of run
-
- return ["0x#{a}..0x#{b} "] if left.zero?
-
- ###
- # Unshared prefix, not end of run
- # Range can be 0x123456..0x56789A
- # Which is equivalent to:
- # 0x123456 .. 0x12FFFF
- # 0x130000 .. 0x55FFFF
- # 0x560000 .. 0x56789A
-
- ret = []
- ret << build_range(start, a + "FF" * left)
-
- ###
- # Only generate middle range if need be.
-
- if a.hex+1 != b.hex
- max = to_hex(b.hex - 1)
- max = "FF" if b == "FF"
- ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left
- end
-
- ###
- # Don't generate last range if it is covered by first range
-
- ret << build_range(b + "00" * left, stop) unless b == "FF"
- ret.flatten!
-end
-
-def to_utf8( range )
- utf8_ranges( range ).map do |r|
- begin_enc = to_utf8_enc(r.begin)
- end_enc = to_utf8_enc(r.end)
- build_range begin_enc, end_enc
- end.flatten!
-end
-
-##
-# Perform a 3-way comparison of the number of codepoints advertised by
-# the unicode spec for the given range, the originally parsed range,
-# and the resulting utf8 encoded range.
-
-def count_codepoints( code )
- code.split(' ').inject(1) do |acc, elt|
- if elt =~ /0x(.+)\.\.0x(.+)/
- if @encoding == :utf8
- acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1)
- else
- acc * ($2.hex - $1.hex + 1)
- end
- else
- acc
- end
- end
-end
-
-def is_valid?( range, desc, codes )
- spec_count = 1
- spec_count = $1.to_i if desc =~ /\[(\d+)\]/
- range_count = range.end - range.begin + 1
-
- sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) }
- sum == spec_count and sum == range_count
-end
-
-##
-# Generate the state maching to stdout
-
-def generate_machine( name, property )
- pipe = " "
- @output.puts " #{name} = "
- each_alpha( @chart_url, property ) do |range, desc|
-
- codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range)
-
- #raise "Invalid encoding of range #{range}: #{codes.inspect}" unless
- # is_valid? range, desc, codes
-
- range_width = codes.map { |a| a.size }.max
- range_width = RANGE_WIDTH if range_width < RANGE_WIDTH
-
- desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11
- desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH
-
- if desc.size > desc_width
- desc = desc[0..desc_width - 4] + "..."
- end
-
- codes.each_with_index do |r, idx|
- desc = "" unless idx.zero?
- code = "%-#{range_width}s" % r
- @output.puts " #{pipe} #{code} ##{desc}"
- pipe = "|"
- end
- end
- @output.puts " ;"
- @output.puts ""
-end
-
-@output.puts <",
- }
- }
-
- // arbitrarily use the first body's missing item range
- return mb[0].MissingItemRange()
-}
-
-func (mb mergedBodies) mergedContent(schema *BodySchema, partial bool) (*BodyContent, Body, Diagnostics) {
- // We need to produce a new schema with none of the attributes marked as
- // required, since _any one_ of our bodies can contribute an attribute value.
- // We'll separately check that all required attributes are present at
- // the end.
- mergedSchema := &BodySchema{
- Blocks: schema.Blocks,
- }
- for _, attrS := range schema.Attributes {
- mergedAttrS := attrS
- mergedAttrS.Required = false
- mergedSchema.Attributes = append(mergedSchema.Attributes, mergedAttrS)
- }
-
- var mergedLeftovers []Body
- content := &BodyContent{
- Attributes: map[string]*Attribute{},
- }
-
- var diags Diagnostics
- for _, body := range mb {
- var thisContent *BodyContent
- var thisLeftovers Body
- var thisDiags Diagnostics
-
- if partial {
- thisContent, thisLeftovers, thisDiags = body.PartialContent(mergedSchema)
- } else {
- thisContent, thisDiags = body.Content(mergedSchema)
- }
-
- if thisLeftovers != nil {
- mergedLeftovers = append(mergedLeftovers, thisLeftovers)
- }
- if len(thisDiags) != 0 {
- diags = append(diags, thisDiags...)
- }
-
- if thisContent.Attributes != nil {
- for name, attr := range thisContent.Attributes {
- if existing := content.Attributes[name]; existing != nil {
- diags = diags.Append(&Diagnostic{
- Severity: DiagError,
- Summary: "Duplicate argument",
- Detail: fmt.Sprintf(
- "Argument %q was already set at %s",
- name, existing.NameRange.String(),
- ),
- Subject: &attr.NameRange,
- })
- continue
- }
- content.Attributes[name] = attr
- }
- }
-
- if len(thisContent.Blocks) != 0 {
- content.Blocks = append(content.Blocks, thisContent.Blocks...)
- }
- }
-
- // Finally, we check for required attributes.
- for _, attrS := range schema.Attributes {
- if !attrS.Required {
- continue
- }
-
- if content.Attributes[attrS.Name] == nil {
- // We don't have any context here to produce a good diagnostic,
- // which is why we warn in the Content docstring to minimize the
- // use of required attributes on merged bodies.
- diags = diags.Append(&Diagnostic{
- Severity: DiagError,
- Summary: "Missing required argument",
- Detail: fmt.Sprintf(
- "The argument %q is required, but was not set.",
- attrS.Name,
- ),
- })
- }
- }
-
- leftoverBody := MergeBodies(mergedLeftovers)
- return content, leftoverBody, diags
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/ops.go b/vendor/github.com/hashicorp/hcl/v2/ops.go
deleted file mode 100644
index bdf23614..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/ops.go
+++ /dev/null
@@ -1,435 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hcl
-
-import (
- "fmt"
- "math/big"
-
- "github.com/zclconf/go-cty/cty"
- "github.com/zclconf/go-cty/cty/convert"
-)
-
-// Index is a helper function that performs the same operation as the index
-// operator in the HCL expression language. That is, the result is the
-// same as it would be for collection[key] in a configuration expression.
-//
-// This is exported so that applications can perform indexing in a manner
-// consistent with how the language does it, including handling of null and
-// unknown values, etc.
-//
-// Diagnostics are produced if the given combination of values is not valid.
-// Therefore a pointer to a source range must be provided to use in diagnostics,
-// though nil can be provided if the calling application is going to
-// ignore the subject of the returned diagnostics anyway.
-func Index(collection, key cty.Value, srcRange *Range) (cty.Value, Diagnostics) {
- const invalidIndex = "Invalid index"
-
- if collection.IsNull() {
- return cty.DynamicVal, Diagnostics{
- {
- Severity: DiagError,
- Summary: "Attempt to index null value",
- Detail: "This value is null, so it does not have any indices.",
- Subject: srcRange,
- },
- }
- }
- if key.IsNull() {
- return cty.DynamicVal, Diagnostics{
- {
- Severity: DiagError,
- Summary: invalidIndex,
- Detail: "Can't use a null value as an indexing key.",
- Subject: srcRange,
- },
- }
- }
- ty := collection.Type()
- kty := key.Type()
- if kty == cty.DynamicPseudoType || ty == cty.DynamicPseudoType {
- return cty.DynamicVal, nil
- }
-
- switch {
-
- case ty.IsListType() || ty.IsTupleType() || ty.IsMapType():
- var wantType cty.Type
- switch {
- case ty.IsListType() || ty.IsTupleType():
- wantType = cty.Number
- case ty.IsMapType():
- wantType = cty.String
- default:
- // should never happen
- panic("don't know what key type we want")
- }
-
- key, keyErr := convert.Convert(key, wantType)
- if keyErr != nil {
- return cty.DynamicVal, Diagnostics{
- {
- Severity: DiagError,
- Summary: invalidIndex,
- Detail: fmt.Sprintf(
- "The given key does not identify an element in this collection value: %s.",
- keyErr.Error(),
- ),
- Subject: srcRange,
- },
- }
- }
-
- // Here we drop marks from HasIndex result, in order to allow basic
- // traversal of a marked list, tuple, or map in the same way we can
- // traverse a marked object
- has, _ := collection.HasIndex(key).Unmark()
- if !has.IsKnown() {
- if ty.IsTupleType() {
- return cty.DynamicVal, nil
- } else {
- return cty.UnknownVal(ty.ElementType()), nil
- }
- }
- if has.False() {
- if (ty.IsListType() || ty.IsTupleType()) && key.Type().Equals(cty.Number) {
- if key.IsKnown() && !key.IsNull() {
- // NOTE: we don't know what any marks might've represented
- // up at the calling application layer, so we must avoid
- // showing the literal number value in these error messages
- // in case the mark represents something important, such as
- // a value being "sensitive".
- key, _ := key.Unmark()
- bf := key.AsBigFloat()
- if _, acc := bf.Int(nil); acc != big.Exact {
- // We have a more specialized error message for the
- // situation of using a fractional number to index into
- // a sequence, because that will tend to happen if the
- // user is trying to use division to calculate an index
- // and not realizing that HCL does float division
- // rather than integer division.
- return cty.DynamicVal, Diagnostics{
- {
- Severity: DiagError,
- Summary: invalidIndex,
- Detail: "The given key does not identify an element in this collection value: indexing a sequence requires a whole number, but the given index has a fractional part.",
- Subject: srcRange,
- },
- }
- }
-
- if bf.Sign() < 0 {
- // Some other languages allow negative indices to
- // select "backwards" from the end of the sequence,
- // but HCL doesn't do that in order to give better
- // feedback if a dynamic index is calculated
- // incorrectly.
- return cty.DynamicVal, Diagnostics{
- {
- Severity: DiagError,
- Summary: invalidIndex,
- Detail: "The given key does not identify an element in this collection value: a negative number is not a valid index for a sequence.",
- Subject: srcRange,
- },
- }
- }
- if lenVal := collection.Length(); lenVal.IsKnown() && !lenVal.IsMarked() {
- // Length always returns a number, and we already
- // checked that it's a known number, so this is safe.
- lenBF := lenVal.AsBigFloat()
- var result big.Float
- result.Sub(bf, lenBF)
- if result.Sign() < 1 {
- if lenBF.Sign() == 0 {
- return cty.DynamicVal, Diagnostics{
- {
- Severity: DiagError,
- Summary: invalidIndex,
- Detail: "The given key does not identify an element in this collection value: the collection has no elements.",
- Subject: srcRange,
- },
- }
- } else {
- return cty.DynamicVal, Diagnostics{
- {
- Severity: DiagError,
- Summary: invalidIndex,
- Detail: "The given key does not identify an element in this collection value: the given index is greater than or equal to the length of the collection.",
- Subject: srcRange,
- },
- }
- }
- }
- }
- }
- }
-
- // If this is not one of the special situations we handled above
- // then we'll fall back on a very generic message.
- return cty.DynamicVal, Diagnostics{
- {
- Severity: DiagError,
- Summary: invalidIndex,
- Detail: "The given key does not identify an element in this collection value.",
- Subject: srcRange,
- },
- }
- }
-
- return collection.Index(key), nil
-
- case ty.IsObjectType():
- wasNumber := key.Type() == cty.Number
- key, keyErr := convert.Convert(key, cty.String)
- if keyErr != nil {
- return cty.DynamicVal, Diagnostics{
- {
- Severity: DiagError,
- Summary: invalidIndex,
- Detail: fmt.Sprintf(
- "The given key does not identify an element in this collection value: %s.",
- keyErr.Error(),
- ),
- Subject: srcRange,
- },
- }
- }
- if !collection.IsKnown() {
- return cty.DynamicVal, nil
- }
- if !key.IsKnown() {
- return cty.DynamicVal, nil
- }
-
- key, _ = key.Unmark()
- attrName := key.AsString()
-
- if !ty.HasAttribute(attrName) {
- var suggestion string
- if wasNumber {
- // We note this only as an addendum to an error we would've
- // already returned anyway, because it is valid (albeit weird)
- // to have an attribute whose name is just decimal digits
- // and then access that attribute using a number whose
- // decimal representation is the same digits.
- suggestion = " An object only supports looking up attributes by name, not by numeric index."
- }
- return cty.DynamicVal, Diagnostics{
- {
- Severity: DiagError,
- Summary: invalidIndex,
- Detail: fmt.Sprintf("The given key does not identify an element in this collection value.%s", suggestion),
- Subject: srcRange,
- },
- }
- }
-
- return collection.GetAttr(attrName), nil
-
- case ty.IsSetType():
- return cty.DynamicVal, Diagnostics{
- {
- Severity: DiagError,
- Summary: invalidIndex,
- Detail: "Elements of a set are identified only by their value and don't have any separate index or key to select with, so it's only possible to perform operations across all elements of the set.",
- Subject: srcRange,
- },
- }
-
- default:
- return cty.DynamicVal, Diagnostics{
- {
- Severity: DiagError,
- Summary: invalidIndex,
- Detail: "This value does not have any indices.",
- Subject: srcRange,
- },
- }
- }
-
-}
-
-// GetAttr is a helper function that performs the same operation as the
-// attribute access in the HCL expression language. That is, the result is the
-// same as it would be for obj.attr in a configuration expression.
-//
-// This is exported so that applications can access attributes in a manner
-// consistent with how the language does it, including handling of null and
-// unknown values, etc.
-//
-// Diagnostics are produced if the given combination of values is not valid.
-// Therefore a pointer to a source range must be provided to use in diagnostics,
-// though nil can be provided if the calling application is going to
-// ignore the subject of the returned diagnostics anyway.
-func GetAttr(obj cty.Value, attrName string, srcRange *Range) (cty.Value, Diagnostics) {
- if obj.IsNull() {
- return cty.DynamicVal, Diagnostics{
- {
- Severity: DiagError,
- Summary: "Attempt to get attribute from null value",
- Detail: "This value is null, so it does not have any attributes.",
- Subject: srcRange,
- },
- }
- }
-
- const unsupportedAttr = "Unsupported attribute"
-
- ty := obj.Type()
- switch {
- case ty.IsObjectType():
- if !ty.HasAttribute(attrName) {
- return cty.DynamicVal, Diagnostics{
- {
- Severity: DiagError,
- Summary: unsupportedAttr,
- Detail: fmt.Sprintf("This object does not have an attribute named %q.", attrName),
- Subject: srcRange,
- },
- }
- }
-
- if !obj.IsKnown() {
- return cty.UnknownVal(ty.AttributeType(attrName)), nil
- }
-
- return obj.GetAttr(attrName), nil
- case ty.IsMapType():
- if !obj.IsKnown() {
- return cty.UnknownVal(ty.ElementType()), nil
- }
-
- idx := cty.StringVal(attrName)
-
- // Here we drop marks from HasIndex result, in order to allow basic
- // traversal of a marked map in the same way we can traverse a marked
- // object
- hasIndex, _ := obj.HasIndex(idx).Unmark()
- if hasIndex.False() {
- return cty.DynamicVal, Diagnostics{
- {
- Severity: DiagError,
- Summary: "Missing map element",
- Detail: fmt.Sprintf("This map does not have an element with the key %q.", attrName),
- Subject: srcRange,
- },
- }
- }
-
- return obj.Index(idx), nil
- case ty == cty.DynamicPseudoType:
- return cty.DynamicVal, nil
- case ty.IsListType() && ty.ElementType().IsObjectType():
- // It seems a common mistake to try to access attributes on a whole
- // list of objects rather than on a specific individual element, so
- // we have some extra hints for that case.
-
- switch {
- case ty.ElementType().HasAttribute(attrName):
- // This is a very strong indication that the user mistook the list
- // of objects for a single object, so we can be a little more
- // direct in our suggestion here.
- return cty.DynamicVal, Diagnostics{
- {
- Severity: DiagError,
- Summary: unsupportedAttr,
- Detail: fmt.Sprintf("Can't access attributes on a list of objects. Did you mean to access attribute %q for a specific element of the list, or across all elements of the list?", attrName),
- Subject: srcRange,
- },
- }
- default:
- return cty.DynamicVal, Diagnostics{
- {
- Severity: DiagError,
- Summary: unsupportedAttr,
- Detail: "Can't access attributes on a list of objects. Did you mean to access an attribute for a specific element of the list, or across all elements of the list?",
- Subject: srcRange,
- },
- }
- }
-
- case ty.IsSetType() && ty.ElementType().IsObjectType():
- // This is similar to the previous case, but we can't give such a
- // direct suggestion because there is no mechanism to select a single
- // item from a set.
- // We could potentially suggest using a for expression or splat
- // operator here, but we typically don't get into syntax specifics
- // in hcl.GetAttr suggestions because it's a general function used in
- // various other situations, such as in application-specific operations
- // that might have a more constraint set of alternative approaches.
-
- return cty.DynamicVal, Diagnostics{
- {
- Severity: DiagError,
- Summary: unsupportedAttr,
- Detail: "Can't access attributes on a set of objects. Did you mean to access an attribute across all elements of the set?",
- Subject: srcRange,
- },
- }
-
- case ty.IsPrimitiveType():
- return cty.DynamicVal, Diagnostics{
- {
- Severity: DiagError,
- Summary: unsupportedAttr,
- Detail: fmt.Sprintf("Can't access attributes on a primitive-typed value (%s).", ty.FriendlyName()),
- Subject: srcRange,
- },
- }
-
- default:
- return cty.DynamicVal, Diagnostics{
- {
- Severity: DiagError,
- Summary: unsupportedAttr,
- Detail: "This value does not have any attributes.",
- Subject: srcRange,
- },
- }
- }
-
-}
-
-// ApplyPath is a helper function that applies a cty.Path to a value using the
-// indexing and attribute access operations from HCL.
-//
-// This is similar to calling the path's own Apply method, but ApplyPath uses
-// the more relaxed typing rules that apply to these operations in HCL, rather
-// than cty's relatively-strict rules. ApplyPath is implemented in terms of
-// Index and GetAttr, and so it has the same behavior for individual steps
-// but will stop and return any errors returned by intermediate steps.
-//
-// Diagnostics are produced if the given path cannot be applied to the given
-// value. Therefore a pointer to a source range must be provided to use in
-// diagnostics, though nil can be provided if the calling application is going
-// to ignore the subject of the returned diagnostics anyway.
-func ApplyPath(val cty.Value, path cty.Path, srcRange *Range) (cty.Value, Diagnostics) {
- var diags Diagnostics
-
- for _, step := range path {
- var stepDiags Diagnostics
- switch ts := step.(type) {
- case cty.IndexStep:
- val, stepDiags = Index(val, ts.Key, srcRange)
- case cty.GetAttrStep:
- val, stepDiags = GetAttr(val, ts.Name, srcRange)
- default:
- // Should never happen because the above are all of the step types.
- diags = diags.Append(&Diagnostic{
- Severity: DiagError,
- Summary: "Invalid path step",
- Detail: fmt.Sprintf("Go type %T is not a valid path step. This is a bug in this program.", step),
- Subject: srcRange,
- })
- return cty.DynamicVal, diags
- }
-
- diags = append(diags, stepDiags...)
- if stepDiags.HasErrors() {
- return cty.DynamicVal, diags
- }
- }
-
- return val, diags
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/pos.go b/vendor/github.com/hashicorp/hcl/v2/pos.go
deleted file mode 100644
index 1bbbce87..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/pos.go
+++ /dev/null
@@ -1,278 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hcl
-
-import "fmt"
-
-// Pos represents a single position in a source file, by addressing the
-// start byte of a unicode character encoded in UTF-8.
-//
-// Pos is generally used only in the context of a Range, which then defines
-// which source file the position is within.
-type Pos struct {
- // Line is the source code line where this position points. Lines are
- // counted starting at 1 and incremented for each newline character
- // encountered.
- Line int
-
- // Column is the source code column where this position points, in
- // unicode characters, with counting starting at 1.
- //
- // Column counts characters as they appear visually, so for example a
- // latin letter with a combining diacritic mark counts as one character.
- // This is intended for rendering visual markers against source code in
- // contexts where these diacritics would be rendered in a single character
- // cell. Technically speaking, Column is counting grapheme clusters as
- // used in unicode normalization.
- Column int
-
- // Byte is the byte offset into the file where the indicated character
- // begins. This is a zero-based offset to the first byte of the first
- // UTF-8 codepoint sequence in the character, and thus gives a position
- // that can be resolved _without_ awareness of Unicode characters.
- Byte int
-}
-
-// InitialPos is a suitable position to use to mark the start of a file.
-var InitialPos = Pos{Byte: 0, Line: 1, Column: 1}
-
-// Range represents a span of characters between two positions in a source
-// file.
-//
-// This struct is usually used by value in types that represent AST nodes,
-// but by pointer in types that refer to the positions of other objects,
-// such as in diagnostics.
-type Range struct {
- // Filename is the name of the file into which this range's positions
- // point.
- Filename string
-
- // Start and End represent the bounds of this range. Start is inclusive
- // and End is exclusive.
- Start, End Pos
-}
-
-// RangeBetween returns a new range that spans from the beginning of the
-// start range to the end of the end range.
-//
-// The result is meaningless if the two ranges do not belong to the same
-// source file or if the end range appears before the start range.
-func RangeBetween(start, end Range) Range {
- return Range{
- Filename: start.Filename,
- Start: start.Start,
- End: end.End,
- }
-}
-
-// RangeOver returns a new range that covers both of the given ranges and
-// possibly additional content between them if the two ranges do not overlap.
-//
-// If either range is empty then it is ignored. The result is empty if both
-// given ranges are empty.
-//
-// The result is meaningless if the two ranges to not belong to the same
-// source file.
-func RangeOver(a, b Range) Range {
- if a.Empty() {
- return b
- }
- if b.Empty() {
- return a
- }
-
- var start, end Pos
- if a.Start.Byte < b.Start.Byte {
- start = a.Start
- } else {
- start = b.Start
- }
- if a.End.Byte > b.End.Byte {
- end = a.End
- } else {
- end = b.End
- }
- return Range{
- Filename: a.Filename,
- Start: start,
- End: end,
- }
-}
-
-// ContainsPos returns true if and only if the given position is contained within
-// the receiving range.
-//
-// In the unlikely case that the line/column information disagree with the byte
-// offset information in the given position or receiving range, the byte
-// offsets are given priority.
-func (r Range) ContainsPos(pos Pos) bool {
- return r.ContainsOffset(pos.Byte)
-}
-
-// ContainsOffset returns true if and only if the given byte offset is within
-// the receiving Range.
-func (r Range) ContainsOffset(offset int) bool {
- return offset >= r.Start.Byte && offset < r.End.Byte
-}
-
-// Ptr returns a pointer to a copy of the receiver. This is a convenience when
-// ranges in places where pointers are required, such as in Diagnostic, but
-// the range in question is returned from a method. Go would otherwise not
-// allow one to take the address of a function call.
-func (r Range) Ptr() *Range {
- return &r
-}
-
-// String returns a compact string representation of the receiver.
-// Callers should generally prefer to present a range more visually,
-// e.g. via markers directly on the relevant portion of source code.
-func (r Range) String() string {
- if r.Start.Line == r.End.Line {
- return fmt.Sprintf(
- "%s:%d,%d-%d",
- r.Filename,
- r.Start.Line, r.Start.Column,
- r.End.Column,
- )
- } else {
- return fmt.Sprintf(
- "%s:%d,%d-%d,%d",
- r.Filename,
- r.Start.Line, r.Start.Column,
- r.End.Line, r.End.Column,
- )
- }
-}
-
-func (r Range) Empty() bool {
- return r.Start.Byte == r.End.Byte
-}
-
-// CanSliceBytes returns true if SliceBytes could return an accurate
-// sub-slice of the given slice.
-//
-// This effectively tests whether the start and end offsets of the range
-// are within the bounds of the slice, and thus whether SliceBytes can be
-// trusted to produce an accurate start and end position within that slice.
-func (r Range) CanSliceBytes(b []byte) bool {
- switch {
- case r.Start.Byte < 0 || r.Start.Byte > len(b):
- return false
- case r.End.Byte < 0 || r.End.Byte > len(b):
- return false
- case r.End.Byte < r.Start.Byte:
- return false
- default:
- return true
- }
-}
-
-// SliceBytes returns a sub-slice of the given slice that is covered by the
-// receiving range, assuming that the given slice is the source code of the
-// file indicated by r.Filename.
-//
-// If the receiver refers to any byte offsets that are outside of the slice
-// then the result is constrained to the overlapping portion only, to avoid
-// a panic. Use CanSliceBytes to determine if the result is guaranteed to
-// be an accurate span of the requested range.
-func (r Range) SliceBytes(b []byte) []byte {
- start := r.Start.Byte
- end := r.End.Byte
- if start < 0 {
- start = 0
- } else if start > len(b) {
- start = len(b)
- }
- if end < 0 {
- end = 0
- } else if end > len(b) {
- end = len(b)
- }
- if end < start {
- end = start
- }
- return b[start:end]
-}
-
-// Overlaps returns true if the receiver and the other given range share any
-// characters in common.
-func (r Range) Overlaps(other Range) bool {
- switch {
- case r.Filename != other.Filename:
- // If the ranges are in different files then they can't possibly overlap
- return false
- case r.Empty() || other.Empty():
- // Empty ranges can never overlap
- return false
- case r.ContainsOffset(other.Start.Byte) || r.ContainsOffset(other.End.Byte):
- return true
- case other.ContainsOffset(r.Start.Byte) || other.ContainsOffset(r.End.Byte):
- return true
- default:
- return false
- }
-}
-
-// Overlap finds a range that is either identical to or a sub-range of both
-// the receiver and the other given range. It returns an empty range
-// within the receiver if there is no overlap between the two ranges.
-//
-// A non-empty result is either identical to or a subset of the receiver.
-func (r Range) Overlap(other Range) Range {
- if !r.Overlaps(other) {
- // Start == End indicates an empty range
- return Range{
- Filename: r.Filename,
- Start: r.Start,
- End: r.Start,
- }
- }
-
- var start, end Pos
- if r.Start.Byte > other.Start.Byte {
- start = r.Start
- } else {
- start = other.Start
- }
- if r.End.Byte < other.End.Byte {
- end = r.End
- } else {
- end = other.End
- }
-
- return Range{
- Filename: r.Filename,
- Start: start,
- End: end,
- }
-}
-
-// PartitionAround finds the portion of the given range that overlaps with
-// the reciever and returns three ranges: the portion of the reciever that
-// precedes the overlap, the overlap itself, and then the portion of the
-// reciever that comes after the overlap.
-//
-// If the two ranges do not overlap then all three returned ranges are empty.
-//
-// If the given range aligns with or extends beyond either extent of the
-// reciever then the corresponding outer range will be empty.
-func (r Range) PartitionAround(other Range) (before, overlap, after Range) {
- overlap = r.Overlap(other)
- if overlap.Empty() {
- return overlap, overlap, overlap
- }
-
- before = Range{
- Filename: r.Filename,
- Start: r.Start,
- End: overlap.Start,
- }
- after = Range{
- Filename: r.Filename,
- Start: overlap.End,
- End: r.End,
- }
-
- return before, overlap, after
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/pos_scanner.go b/vendor/github.com/hashicorp/hcl/v2/pos_scanner.go
deleted file mode 100644
index 2232f374..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/pos_scanner.go
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hcl
-
-import (
- "bufio"
- "bytes"
-
- "github.com/apparentlymart/go-textseg/v15/textseg"
-)
-
-// RangeScanner is a helper that will scan over a buffer using a bufio.SplitFunc
-// and visit a source range for each token matched.
-//
-// For example, this can be used with bufio.ScanLines to find the source range
-// for each line in the file, skipping over the actual newline characters, which
-// may be useful when printing source code snippets as part of diagnostic
-// messages.
-//
-// The line and column information in the returned ranges is produced by
-// counting newline characters and grapheme clusters respectively, which
-// mimics the behavior we expect from a parser when producing ranges.
-type RangeScanner struct {
- filename string
- b []byte
- cb bufio.SplitFunc
-
- pos Pos // position of next byte to process in b
- cur Range // latest range
- tok []byte // slice of b that is covered by cur
- err error // error from last scan, if any
-}
-
-// NewRangeScanner creates a new RangeScanner for the given buffer, producing
-// ranges for the given filename.
-//
-// Since ranges have grapheme-cluster granularity rather than byte granularity,
-// the scanner will produce incorrect results if the given SplitFunc creates
-// tokens between grapheme cluster boundaries. In particular, it is incorrect
-// to use RangeScanner with bufio.ScanRunes because it will produce tokens
-// around individual UTF-8 sequences, which will split any multi-sequence
-// grapheme clusters.
-func NewRangeScanner(b []byte, filename string, cb bufio.SplitFunc) *RangeScanner {
- return NewRangeScannerFragment(b, filename, InitialPos, cb)
-}
-
-// NewRangeScannerFragment is like NewRangeScanner but the ranges it produces
-// will be offset by the given starting position, which is appropriate for
-// sub-slices of a file, whereas NewRangeScanner assumes it is scanning an
-// entire file.
-func NewRangeScannerFragment(b []byte, filename string, start Pos, cb bufio.SplitFunc) *RangeScanner {
- return &RangeScanner{
- filename: filename,
- b: b,
- cb: cb,
- pos: start,
- }
-}
-
-func (sc *RangeScanner) Scan() bool {
- if sc.pos.Byte >= len(sc.b) || sc.err != nil {
- // All done
- return false
- }
-
- // Since we're operating on an in-memory buffer, we always pass the whole
- // remainder of the buffer to our SplitFunc and set isEOF to let it know
- // that it has the whole thing.
- advance, token, err := sc.cb(sc.b[sc.pos.Byte:], true)
-
- // Since we are setting isEOF to true this should never happen, but
- // if it does we will just abort and assume the SplitFunc is misbehaving.
- if advance == 0 && token == nil && err == nil {
- return false
- }
-
- if err != nil {
- sc.err = err
- sc.cur = Range{
- Filename: sc.filename,
- Start: sc.pos,
- End: sc.pos,
- }
- sc.tok = nil
- return false
- }
-
- sc.tok = token
- start := sc.pos
- end := sc.pos
- new := sc.pos
-
- // adv is similar to token but it also includes any subsequent characters
- // we're being asked to skip over by the SplitFunc.
- // adv is a slice covering any additional bytes we are skipping over, based
- // on what the SplitFunc told us to do with advance.
- adv := sc.b[sc.pos.Byte : sc.pos.Byte+advance]
-
- // We now need to scan over our token to count the grapheme clusters
- // so we can correctly advance Column, and count the newlines so we
- // can correctly advance Line.
- advR := bytes.NewReader(adv)
- gsc := bufio.NewScanner(advR)
- advanced := 0
- gsc.Split(textseg.ScanGraphemeClusters)
- for gsc.Scan() {
- gr := gsc.Bytes()
- new.Byte += len(gr)
- new.Column++
-
- // We rely here on the fact that \r\n is considered a grapheme cluster
- // and so we don't need to worry about miscounting additional lines
- // on files with Windows-style line endings.
- if len(gr) != 0 && (gr[0] == '\r' || gr[0] == '\n') {
- new.Column = 1
- new.Line++
- }
-
- if advanced < len(token) {
- // If we've not yet found the end of our token then we'll
- // also push our "end" marker along.
- // (if advance > len(token) then we'll stop moving "end" early
- // so that the caller only sees the range covered by token.)
- end = new
- }
- advanced += len(gr)
- }
-
- sc.cur = Range{
- Filename: sc.filename,
- Start: start,
- End: end,
- }
- sc.pos = new
- return true
-}
-
-// Range returns a range that covers the latest token obtained after a call
-// to Scan returns true.
-func (sc *RangeScanner) Range() Range {
- return sc.cur
-}
-
-// Bytes returns the slice of the input buffer that is covered by the range
-// that would be returned by Range.
-func (sc *RangeScanner) Bytes() []byte {
- return sc.tok
-}
-
-// Err can be called after Scan returns false to determine if the latest read
-// resulted in an error, and obtain that error if so.
-func (sc *RangeScanner) Err() error {
- return sc.err
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/schema.go b/vendor/github.com/hashicorp/hcl/v2/schema.go
deleted file mode 100644
index d4e339cb..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/schema.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hcl
-
-// BlockHeaderSchema represents the shape of a block header, and is
-// used for matching blocks within bodies.
-type BlockHeaderSchema struct {
- Type string
- LabelNames []string
-}
-
-// AttributeSchema represents the requirements for an attribute, and is used
-// for matching attributes within bodies.
-type AttributeSchema struct {
- Name string
- Required bool
-}
-
-// BodySchema represents the desired shallow structure of a body.
-type BodySchema struct {
- Attributes []AttributeSchema
- Blocks []BlockHeaderSchema
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/spec.md b/vendor/github.com/hashicorp/hcl/v2/spec.md
deleted file mode 100644
index 97ef6131..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/spec.md
+++ /dev/null
@@ -1,691 +0,0 @@
-# HCL Syntax-Agnostic Information Model
-
-This is the specification for the general information model (abstract types and
-semantics) for hcl. HCL is a system for defining configuration languages for
-applications. The HCL information model is designed to support multiple
-concrete syntaxes for configuration, each with a mapping to the model defined
-in this specification.
-
-The two primary syntaxes intended for use in conjunction with this model are
-[the HCL native syntax](./hclsyntax/spec.md) and [the JSON syntax](./json/spec.md).
-In principle other syntaxes are possible as long as either their language model
-is sufficiently rich to express the concepts described in this specification
-or the language targets a well-defined subset of the specification.
-
-## Structural Elements
-
-The primary structural element is the _body_, which is a container representing
-a set of zero or more _attributes_ and a set of zero or more _blocks_.
-
-A _configuration file_ is the top-level object, and will usually be produced
-by reading a file from disk and parsing it as a particular syntax. A
-configuration file has its own _body_, representing the top-level attributes
-and blocks.
-
-An _attribute_ is a name and value pair associated with a body. Attribute names
-are unique within a given body. Attribute values are provided as _expressions_,
-which are discussed in detail in a later section.
-
-A _block_ is a nested structure that has a _type name_, zero or more string
-_labels_ (e.g. identifiers), and a nested body.
-
-Together the structural elements create a hierarchical data structure, with
-attributes intended to represent the direct properties of a particular object
-in the calling application, and blocks intended to represent child objects
-of a particular object.
-
-## Body Content
-
-To support the expression of the HCL concepts in languages whose information
-model is a subset of HCL's, such as JSON, a _body_ is an opaque container
-whose content can only be accessed by providing information on the expected
-structure of the content.
-
-The specification for each syntax must describe how its physical constructs
-are mapped on to body content given a schema. For syntaxes that have
-first-class syntax distinguishing attributes and bodies this can be relatively
-straightforward, while more detailed mapping rules may be required in syntaxes
-where the representation of attributes vs. blocks is ambiguous.
-
-### Schema-driven Processing
-
-Schema-driven processing is the primary way to access body content.
-A _body schema_ is a description of what is expected within a particular body,
-which can then be used to extract the _body content_, which then provides
-access to the specific attributes and blocks requested.
-
-A _body schema_ consists of a list of _attribute schemata_ and
-_block header schemata_:
-
-- An _attribute schema_ provides the name of an attribute and whether its
- presence is required.
-
-- A _block header schema_ provides a block type name and the semantic names
- assigned to each of the labels of that block type, if any.
-
-Within a schema, it is an error to request the same attribute name twice or
-to request a block type whose name is also an attribute name. While this can
-in principle be supported in some syntaxes, in other syntaxes the attribute
-and block namespaces are combined and so an attribute cannot coexist with
-a block whose type name is identical to the attribute name.
-
-The result of applying a body schema to a body is _body content_, which
-consists of an _attribute map_ and a _block sequence_:
-
-- The _attribute map_ is a map data structure whose keys are attribute names
- and whose values are _expressions_ that represent the corresponding attribute
- values.
-
-- The _block sequence_ is an ordered sequence of blocks, with each specifying
- a block _type name_, the sequence of _labels_ specified for the block,
- and the body object (not body _content_) representing the block's own body.
-
-After obtaining _body content_, the calling application may continue processing
-by evaluating attribute expressions and/or recursively applying further
-schema-driven processing to the child block bodies.
-
-**Note:** The _body schema_ is intentionally minimal, to reduce the set of
-mapping rules that must be defined for each syntax. Higher-level utility
-libraries may be provided to assist in the construction of a schema and
-perform additional processing, such as automatically evaluating attribute
-expressions and assigning their result values into a data structure, or
-recursively applying a schema to child blocks. Such utilities are not part of
-this core specification and will vary depending on the capabilities and idiom
-of the implementation language.
-
-### _Dynamic Attributes_ Processing
-
-The _schema-driven_ processing model is useful when the expected structure
-of a body is known a priori by the calling application. Some blocks are
-instead more free-form, such as a user-provided set of arbitrary key/value
-pairs.
-
-The alternative _dynamic attributes_ processing mode allows for this more
-ad-hoc approach. Processing in this mode behaves as if a schema had been
-constructed without any _block header schemata_ and with an attribute
-schema for each distinct key provided within the physical representation
-of the body.
-
-The means by which _distinct keys_ are identified is dependent on the
-physical syntax; this processing mode assumes that the syntax has a way
-to enumerate keys provided by the author and identify expressions that
-correspond with those keys, but does not define the means by which this is
-done.
-
-The result of _dynamic attributes_ processing is an _attribute map_ as
-defined in the previous section. No _block sequence_ is produced in this
-processing mode.
-
-### Partial Processing of Body Content
-
-Under _schema-driven processing_, by default the given schema is assumed
-to be exhaustive, such that any attribute or block not matched by schema
-elements is considered an error. This allows feedback about unsupported
-attributes and blocks (such as typos) to be provided.
-
-An alternative is _partial processing_, where any additional elements within
-the body are not considered an error.
-
-Under partial processing, the result is both body content as described
-above _and_ a new body that represents any body elements that remain after
-the schema has been processed.
-
-Specifically:
-
-- Any attribute whose name is specified in the schema is returned in body
- content and elided from the new body.
-
-- Any block whose type is specified in the schema is returned in body content
- and elided from the new body.
-
-- Any attribute or block _not_ meeting the above conditions is placed into
- the new body, unmodified.
-
-The new body can then be recursively processed using any of the body
-processing models. This facility allows different subsets of body content
-to be processed by different parts of the calling application.
-
-Processing a body in two steps — first partial processing of a source body,
-then exhaustive processing of the returned body — is equivalent to single-step
-processing with a schema that is the union of the schemata used
-across the two steps.
-
-## Expressions
-
-Attribute values are represented by _expressions_. Depending on the concrete
-syntax in use, an expression may just be a literal value or it may describe
-a computation in terms of literal values, variables, and functions.
-
-Each syntax defines its own representation of expressions. For syntaxes based
-in languages that do not have any non-literal expression syntax, it is
-recommended to embed the template language from
-[the native syntax](./hclsyntax/spec.md) e.g. as a post-processing step on
-string literals.
-
-### Expression Evaluation
-
-In order to obtain a concrete value, each expression must be _evaluated_.
-Evaluation is performed in terms of an evaluation context, which
-consists of the following:
-
-- An _evaluation mode_, which is defined below.
-- A _variable scope_, which provides a set of named variables for use in
- expressions.
-- A _function table_, which provides a set of named functions for use in
- expressions.
-
-The _evaluation mode_ allows for two different interpretations of an
-expression:
-
-- In _literal-only mode_, variables and functions are not available and it
- is assumed that the calling application's intent is to treat the attribute
- value as a literal.
-
-- In _full expression mode_, variables and functions are defined and it is
- assumed that the calling application wishes to provide a full expression
- language for definition of the attribute value.
-
-The actual behavior of these two modes depends on the syntax in use. For
-languages with first-class expression syntax, these two modes may be considered
-equivalent, with _literal-only mode_ simply not defining any variables or
-functions. For languages that embed arbitrary expressions via string templates,
-_literal-only mode_ may disable such processing, allowing literal strings to
-pass through without interpretation as templates.
-
-Since literal-only mode does not support variables and functions, it is an
-error for the calling application to enable this mode and yet provide a
-variable scope and/or function table.
-
-## Values and Value Types
-
-The result of expression evaluation is a _value_. Each value has a _type_,
-which is dynamically determined during evaluation. The _variable scope_ in
-the evaluation context is a map from variable name to value, using the same
-definition of value.
-
-The type system for HCL values is intended to be of a level abstraction
-suitable for configuration of various applications. A well-defined,
-implementation-language-agnostic type system is defined to allow for
-consistent processing of configuration across many implementation languages.
-Concrete implementations may provide additional functionality to lower
-HCL values and types to corresponding native language types, which may then
-impose additional constraints on the values outside of the scope of this
-specification.
-
-Two values are _equal_ if and only if they have identical types and their
-values are equal according to the rules of their shared type.
-
-### Primitive Types
-
-The primitive types are _string_, _bool_, and _number_.
-
-A _string_ is a sequence of unicode characters. Two strings are equal if
-NFC normalization ([UAX#15](http://unicode.org/reports/tr15/)
-of each string produces two identical sequences of characters.
-NFC normalization ensures that, for example, a precomposed combination of a
-latin letter and a diacritic compares equal with the letter followed by
-a combining diacritic.
-
-The _bool_ type has only two non-null values: _true_ and _false_. Two bool
-values are equal if and only if they are either both true or both false.
-
-A _number_ is an arbitrary-precision floating point value. An implementation
-_must_ make the full-precision values available to the calling application
-for interpretation into any suitable number representation. An implementation
-may in practice implement numbers with limited precision so long as the
-following constraints are met:
-
-- Integers are represented with at least 256 bits.
-- Non-integer numbers are represented as floating point values with a
- mantissa of at least 256 bits and a signed binary exponent of at least
- 16 bits.
-- An error is produced if an integer value given in source cannot be
- represented precisely.
-- An error is produced if a non-integer value cannot be represented due to
- overflow.
-- A non-integer number is rounded to the nearest possible value when a
- value is of too high a precision to be represented.
-
-The _number_ type also requires representation of both positive and negative
-infinity. A "not a number" (NaN) value is _not_ provided nor used.
-
-Two number values are equal if they are numerically equal to the precision
-associated with the number. Positive infinity and negative infinity are
-equal to themselves but not to each other. Positive infinity is greater than
-any other number value, and negative infinity is less than any other number
-value.
-
-Some syntaxes may be unable to represent numeric literals of arbitrary
-precision. This must be defined in the syntax specification as part of its
-description of mapping numeric literals to HCL values.
-
-### Structural Types
-
-_Structural types_ are types that are constructed by combining other types.
-Each distinct combination of other types is itself a distinct type. There
-are two structural type _kinds_:
-
-- _Object types_ are constructed of a set of named attributes, each of which
- has a type. Attribute names are always strings. (_Object_ attributes are a
- distinct idea from _body_ attributes, though calling applications
- may choose to blur the distinction by use of common naming schemes.)
-- _Tuple types_ are constructed of a sequence of elements, each of which
- has a type.
-
-Values of structural types are compared for equality in terms of their
-attributes or elements. A structural type value is equal to another if and
-only if all of the corresponding attributes or elements are equal.
-
-Two structural types are identical if they are of the same kind and
-have attributes or elements with identical types.
-
-### Collection Types
-
-_Collection types_ are types that combine together an arbitrary number of
-values of some other single type. There are three collection type _kinds_:
-
-- _List types_ represent ordered sequences of values of their element type.
-- _Map types_ represent values of their element type accessed via string keys.
-- _Set types_ represent unordered sets of distinct values of their element type.
-
-For each of these kinds and each distinct element type there is a distinct
-collection type. For example, "list of string" is a distinct type from
-"set of string", and "list of number" is a distinct type from "list of string".
-
-Values of collection types are compared for equality in terms of their
-elements. A collection type value is equal to another if and only if both
-have the same number of elements and their corresponding elements are equal.
-
-Two collection types are identical if they are of the same kind and have
-the same element type.
-
-### Null values
-
-Each type has a null value. The null value of a type represents the absence
-of a value, but with type information retained to allow for type checking.
-
-Null values are used primarily to represent the conditional absence of a
-body attribute. In a syntax with a conditional operator, one of the result
-values of that conditional may be null to indicate that the attribute should be
-considered not present in that case.
-
-Calling applications _should_ consider an attribute with a null value as
-equivalent to the value not being present at all.
-
-A null value of a particular type is equal to itself.
-
-### Unknown Values and the Dynamic Pseudo-type
-
-An _unknown value_ is a placeholder for a value that is not yet known.
-Operations on unknown values themselves return unknown values that have a
-type appropriate to the operation. For example, adding together two unknown
-numbers yields an unknown number, while comparing two unknown values of any
-type for equality yields an unknown bool.
-
-Each type has a distinct unknown value. For example, an unknown _number_ is
-a distinct value from an unknown _string_.
-
-_The dynamic pseudo-type_ is a placeholder for a type that is not yet known.
-The only values of this type are its null value and its unknown value. It is
-referred to as a _pseudo-type_ because it should not be considered a type in
-its own right, but rather as a placeholder for a type yet to be established.
-The unknown value of the dynamic pseudo-type is referred to as _the dynamic
-value_.
-
-Operations on values of the dynamic pseudo-type behave as if it is a value
-of the expected type, optimistically assuming that once the value and type
-are known they will be valid for the operation. For example, adding together
-a number and the dynamic value produces an unknown number.
-
-Unknown values and the dynamic pseudo-type can be used as a mechanism for
-partial type checking and semantic checking: by evaluating an expression with
-all variables set to an unknown value, the expression can be evaluated to
-produce an unknown value of a given type, or produce an error if any operation
-is provably invalid with only type information.
-
-Unknown values and the dynamic pseudo-type must never be returned from
-operations unless at least one operand is unknown or dynamic. Calling
-applications are guaranteed that unless the global scope includes unknown
-values, or the function table includes functions that return unknown values,
-no expression will evaluate to an unknown value. The calling application is
-thus in total control over the use and meaning of unknown values.
-
-The dynamic pseudo-type is identical only to itself.
-
-### Capsule Types
-
-A _capsule type_ is a custom type defined by the calling application. A value
-of a capsule type is considered opaque to HCL, but may be accepted
-by functions provided by the calling application.
-
-A particular capsule type is identical only to itself. The equality of two
-values of the same capsule type is defined by the calling application. No
-other operations are supported for values of capsule types.
-
-Support for capsule types in a HCL implementation is optional. Capsule types
-are intended to allow calling applications to pass through values that are
-not part of the standard type system. For example, an application that
-deals with raw binary data may define a capsule type representing a byte
-array, and provide functions that produce or operate on byte arrays.
-
-### Type Specifications
-
-In certain situations it is necessary to define expectations about the expected
-type of a value. Whereas two _types_ have a commutative _identity_ relationship,
-a type has a non-commutative _matches_ relationship with a _type specification_.
-A type specification is, in practice, just a different interpretation of a
-type such that:
-
-- Any type _matches_ any type that it is identical to.
-
-- Any type _matches_ the dynamic pseudo-type.
-
-For example, given a type specification "list of dynamic pseudo-type", the
-concrete types "list of string" and "list of map" match, but the
-type "set of string" does not.
-
-## Functions and Function Calls
-
-The evaluation context used to evaluate an expression includes a function
-table, which represents an application-defined set of named functions
-available for use in expressions.
-
-Each syntax defines whether function calls are supported and how they are
-physically represented in source code, but the semantics of function calls are
-defined here to ensure consistent results across syntaxes and to allow
-applications to provide functions that are interoperable with all syntaxes.
-
-A _function_ is defined from the following elements:
-
-- Zero or more _positional parameters_, each with a name used for documentation,
- a type specification for expected argument values, and a flag for whether
- each of null values, unknown values, and values of the dynamic pseudo-type
- are accepted.
-
-- Zero or one _variadic parameters_, with the same structure as the _positional_
- parameters, which if present collects any additional arguments provided at
- the function call site.
-
-- A _result type definition_, which specifies the value type returned for each
- valid sequence of argument values.
-
-- A _result value definition_, which specifies the value returned for each
- valid sequence of argument values.
-
-A _function call_, regardless of source syntax, consists of a sequence of
-argument values. The argument values are each mapped to a corresponding
-parameter as follows:
-
-- For each of the function's positional parameters in sequence, take the next
- argument. If there are no more arguments, the call is erroneous.
-
-- If the function has a variadic parameter, take all remaining arguments that
- where not yet assigned to a positional parameter and collect them into
- a sequence of variadic arguments that each correspond to the variadic
- parameter.
-
-- If the function has _no_ variadic parameter, it is an error if any arguments
- remain after taking one argument for each positional parameter.
-
-After mapping each argument to a parameter, semantic checking proceeds
-for each argument:
-
-- If the argument value corresponding to a parameter does not match the
- parameter's type specification, the call is erroneous.
-
-- If the argument value corresponding to a parameter is null and the parameter
- is not specified as accepting nulls, the call is erroneous.
-
-- If the argument value corresponding to a parameter is the dynamic value
- and the parameter is not specified as accepting values of the dynamic
- pseudo-type, the call is valid but its _result type_ is forced to be the
- dynamic pseudo type.
-
-- If neither of the above conditions holds for any argument, the call is
- valid and the function's value type definition is used to determine the
- call's _result type_. A function _may_ vary its result type depending on
- the argument _values_ as well as the argument _types_; for example, a
- function that decodes a JSON value will return a different result type
- depending on the data structure described by the given JSON source code.
-
-If semantic checking succeeds without error, the call is _executed_:
-
-- For each argument, if its value is unknown and its corresponding parameter
- is not specified as accepting unknowns, the _result value_ is forced to be an
- unknown value of the result type.
-
-- If the previous condition does not apply, the function's result value
- definition is used to determine the call's _result value_.
-
-The result of a function call expression is either an error, if one of the
-erroneous conditions above applies, or the _result value_.
-
-## Type Conversions and Unification
-
-Values given in configuration may not always match the expectations of the
-operations applied to them or to the calling application. In such situations,
-automatic type conversion is attempted as a convenience to the user.
-
-Along with conversions to a _specified_ type, it is sometimes necessary to
-ensure that a selection of values are all of the _same_ type, without any
-constraint on which type that is. This is the process of _type unification_,
-which attempts to find the most general type that all of the given types can
-be converted to.
-
-Both type conversions and unification are defined in the syntax-agnostic
-model to ensure consistency of behavior between syntaxes.
-
-Type conversions are broadly characterized into two categories: _safe_ and
-_unsafe_. A conversion is "safe" if any distinct value of the source type
-has a corresponding distinct value in the target type. A conversion is
-"unsafe" if either the target type values are _not_ distinct (information
-may be lost in conversion) or if some values of the source type do not have
-any corresponding value in the target type. An unsafe conversion may result
-in an error.
-
-A given type can always be converted to itself, which is a no-op.
-
-### Conversion of Null Values
-
-All null values are safely convertable to a null value of any other type,
-regardless of other type-specific rules specified in the sections below.
-
-### Conversion to and from the Dynamic Pseudo-type
-
-Conversion _from_ the dynamic pseudo-type _to_ any other type always succeeds,
-producing an unknown value of the target type.
-
-Conversion of any value _to_ the dynamic pseudo-type is a no-op. The result
-is the input value, verbatim. This is the only situation where the conversion
-result value is not of the given target type.
-
-### Primitive Type Conversions
-
-Bidirectional conversions are available between the string and number types,
-and between the string and boolean types.
-
-The bool value true corresponds to the string containing the characters "true",
-while the bool value false corresponds to the string containing the characters
-"false". Conversion from bool to string is safe, while the converse is
-unsafe. The strings "1" and "0" are alternative string representations
-of true and false respectively. It is an error to convert a string other than
-the four in this paragraph to type bool.
-
-A number value is converted to string by translating its integer portion
-into a sequence of decimal digits (`0` through `9`), and then if it has a
-non-zero fractional part, a period `.` followed by a sequence of decimal
-digits representing its fractional part. No exponent portion is included.
-The number is converted at its full precision. Conversion from number to
-string is safe.
-
-A string is converted to a number value by reversing the above mapping.
-No exponent portion is allowed. Conversion from string to number is unsafe.
-It is an error to convert a string that does not comply with the expected
-syntax to type number.
-
-No direct conversion is available between the bool and number types.
-
-### Collection and Structural Type Conversions
-
-Conversion from set types to list types is _safe_, as long as their
-element types are safely convertable. If the element types are _unsafely_
-convertable, then the collection conversion is also unsafe. Each set element
-becomes a corresponding list element, in an undefined order. Although no
-particular ordering is required, implementations _should_ produce list
-elements in a consistent order for a given input set, as a convenience
-to calling applications.
-
-Conversion from list types to set types is _unsafe_, as long as their element
-types are convertable. Each distinct list item becomes a distinct set item.
-If two list items are equal, one of the two is lost in the conversion.
-
-Conversion from tuple types to list types permitted if all of the
-tuple element types are convertable to the target list element type.
-The safety of the conversion depends on the safety of each of the element
-conversions. Each element in turn is converted to the list element type,
-producing a list of identical length.
-
-Conversion from tuple types to set types is permitted, behaving as if the
-tuple type was first converted to a list of the same element type and then
-that list converted to the target set type.
-
-Conversion from object types to map types is permitted if all of the object
-attribute types are convertable to the target map element type. The safety
-of the conversion depends on the safety of each of the attribute conversions.
-Each attribute in turn is converted to the map element type, and map element
-keys are set to the name of each corresponding object attribute.
-
-Conversion from list and set types to tuple types is permitted, following
-the opposite steps as the converse conversions. Such conversions are _unsafe_.
-It is an error to convert a list or set to a tuple type whose number of
-elements does not match the list or set length.
-
-Conversion from map types to object types is permitted if each map key
-corresponds to an attribute in the target object type. It is an error to
-convert from a map value whose set of keys does not exactly match the target
-type's attributes. The conversion takes the opposite steps of the converse
-conversion.
-
-Conversion from one object type to another is permitted as long as the
-common attribute names have convertable types. Any attribute present in the
-target type but not in the source type is populated with a null value of
-the appropriate type.
-
-Conversion from one tuple type to another is permitted as long as the
-tuples have the same length and the elements have convertable types.
-
-### Type Unification
-
-Type unification is an operation that takes a list of types and attempts
-to find a single type to which they can all be converted. Since some
-type pairs have bidirectional conversions, preference is given to _safe_
-conversions. In technical terms, all possible types are arranged into
-a lattice, from which a most general supertype is selected where possible.
-
-The type resulting from type unification may be one of the input types, or
-it may be an entirely new type produced by combination of two or more
-input types.
-
-The following rules do not guarantee a valid result. In addition to these
-rules, unification fails if any of the given types are not convertable
-(per the above rules) to the selected result type.
-
-The following unification rules apply transitively. That is, if a rule is
-defined from A to B, and one from B to C, then A can unify to C.
-
-Number and bool types both unify with string by preferring string.
-
-Two collection types of the same kind unify according to the unification
-of their element types.
-
-List and set types unify by preferring the list type.
-
-Map and object types unify by preferring the object type.
-
-List, set and tuple types unify by preferring the tuple type.
-
-The dynamic pseudo-type unifies with any other type by selecting that other
-type. The dynamic pseudo-type is the result type only if _all_ input types
-are the dynamic pseudo-type.
-
-Two object types unify by constructing a new type whose attributes are
-the union of those of the two input types. Any common attributes themselves
-have their types unified.
-
-Two tuple types of the same length unify constructing a new type of the
-same length whose elements are the unification of the corresponding elements
-in the two input types.
-
-## Static Analysis
-
-In most applications, full expression evaluation is sufficient for understanding
-the provided configuration. However, some specialized applications require more
-direct access to the physical structures in the expressions, which can for
-example allow the construction of new language constructs in terms of the
-existing syntax elements.
-
-Since static analysis analyses the physical structure of configuration, the
-details will vary depending on syntax. Each syntax must decide which of its
-physical structures corresponds to the following analyses, producing error
-diagnostics if they are applied to inappropriate expressions.
-
-The following are the required static analysis functions:
-
-- **Static List**: Require list/tuple construction syntax to be used and
- return a list of expressions for each of the elements given.
-
-- **Static Map**: Require map/object construction syntax to be used and
- return a list of key/value pairs -- both expressions -- for each of
- the elements given. The usual constraint that a map key must be a string
- must not apply to this analysis, thus allowing applications to interpret
- arbitrary keys as they see fit.
-
-- **Static Call**: Require function call syntax to be used and return an
- object describing the called function name and a list of expressions
- representing each of the call arguments.
-
-- **Static Traversal**: Require a reference to a symbol in the variable
- scope and return a description of the path from the root scope to the
- accessed attribute or index.
-
-The intent of a calling application using these features is to require a more
-rigid interpretation of the configuration than in expression evaluation.
-Syntax implementations should make use of the extra contextual information
-provided in order to make an intuitive mapping onto the constructs of the
-underlying syntax, possibly interpreting the expression slightly differently
-than it would be interpreted in normal evaluation.
-
-Each syntax must define which of its expression elements each of the analyses
-above applies to, and how those analyses behave given those expression elements.
-
-## Implementation Considerations
-
-Implementations of this specification are free to adopt any strategy that
-produces behavior consistent with the specification. This non-normative
-section describes some possible implementation strategies that are consistent
-with the goals of this specification.
-
-### Language-agnosticism
-
-The language-agnosticism of this specification assumes that certain behaviors
-are implemented separately for each syntax:
-
-- Matching of a body schema with the physical elements of a body in the
- source language, to determine correspondence between physical constructs
- and schema elements.
-
-- Implementing the _dynamic attributes_ body processing mode by either
- interpreting all physical constructs as attributes or producing an error
- if non-attribute constructs are present.
-
-- Providing an evaluation function for all possible expressions that produces
- a value given an evaluation context.
-
-- Providing the static analysis functionality described above in a manner that
- makes sense within the convention of the syntax.
-
-The suggested implementation strategy is to use an implementation language's
-closest concept to an _abstract type_, _virtual type_ or _interface type_
-to represent both Body and Expression. Each language-specific implementation
-can then provide an implementation of each of these types wrapping AST nodes
-or other physical constructs from the language parser.
diff --git a/vendor/github.com/hashicorp/hcl/v2/static_expr.go b/vendor/github.com/hashicorp/hcl/v2/static_expr.go
deleted file mode 100644
index e14d7f89..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/static_expr.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hcl
-
-import (
- "github.com/zclconf/go-cty/cty"
-)
-
-type staticExpr struct {
- val cty.Value
- rng Range
-}
-
-// StaticExpr returns an Expression that always evaluates to the given value.
-//
-// This is useful to substitute default values for expressions that are
-// not explicitly given in configuration and thus would otherwise have no
-// Expression to return.
-//
-// Since expressions are expected to have a source range, the caller must
-// provide one. Ideally this should be a real source range, but it can
-// be a synthetic one (with an empty-string filename) if no suitable range
-// is available.
-func StaticExpr(val cty.Value, rng Range) Expression {
- return staticExpr{val, rng}
-}
-
-func (e staticExpr) Value(ctx *EvalContext) (cty.Value, Diagnostics) {
- return e.val, nil
-}
-
-func (e staticExpr) Variables() []Traversal {
- return nil
-}
-
-func (e staticExpr) Range() Range {
- return e.rng
-}
-
-func (e staticExpr) StartRange() Range {
- return e.rng
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/structure.go b/vendor/github.com/hashicorp/hcl/v2/structure.go
deleted file mode 100644
index 2bdf579d..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/structure.go
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hcl
-
-import (
- "github.com/zclconf/go-cty/cty"
-)
-
-// File is the top-level node that results from parsing a HCL file.
-type File struct {
- Body Body
- Bytes []byte
-
- // Nav is used to integrate with the "hcled" editor integration package,
- // and with diagnostic information formatters. It is not for direct use
- // by a calling application.
- Nav interface{}
-}
-
-// Block represents a nested block within a Body.
-type Block struct {
- Type string
- Labels []string
- Body Body
-
- DefRange Range // Range that can be considered the "definition" for seeking in an editor
- TypeRange Range // Range for the block type declaration specifically.
- LabelRanges []Range // Ranges for the label values specifically.
-}
-
-// Blocks is a sequence of Block.
-type Blocks []*Block
-
-// Attributes is a set of attributes keyed by their names.
-type Attributes map[string]*Attribute
-
-// Body is a container for attributes and blocks. It serves as the primary
-// unit of hierarchical structure within configuration.
-//
-// The content of a body cannot be meaningfully interpreted without a schema,
-// so Body represents the raw body content and has methods that allow the
-// content to be extracted in terms of a given schema.
-type Body interface {
- // Content verifies that the entire body content conforms to the given
- // schema and then returns it, and/or returns diagnostics. The returned
- // body content is valid if non-nil, regardless of whether Diagnostics
- // are provided, but diagnostics should still be eventually shown to
- // the user.
- Content(schema *BodySchema) (*BodyContent, Diagnostics)
-
- // PartialContent is like Content except that it permits the configuration
- // to contain additional blocks or attributes not specified in the
- // schema. If any are present, the returned Body is non-nil and contains
- // the remaining items from the body that were not selected by the schema.
- PartialContent(schema *BodySchema) (*BodyContent, Body, Diagnostics)
-
- // JustAttributes attempts to interpret all of the contents of the body
- // as attributes, allowing for the contents to be accessed without a priori
- // knowledge of the structure.
- //
- // The behavior of this method depends on the body's source language.
- // Some languages, like JSON, can't distinguish between attributes and
- // blocks without schema hints, but for languages that _can_ error
- // diagnostics will be generated if any blocks are present in the body.
- //
- // Diagnostics may be produced for other reasons too, such as duplicate
- // declarations of the same attribute.
- JustAttributes() (Attributes, Diagnostics)
-
- // MissingItemRange returns a range that represents where a missing item
- // might hypothetically be inserted. This is used when producing
- // diagnostics about missing required attributes or blocks. Not all bodies
- // will have an obvious single insertion point, so the result here may
- // be rather arbitrary.
- MissingItemRange() Range
-}
-
-// BodyContent is the result of applying a BodySchema to a Body.
-type BodyContent struct {
- Attributes Attributes
- Blocks Blocks
-
- MissingItemRange Range
-}
-
-// Attribute represents an attribute from within a body.
-type Attribute struct {
- Name string
- Expr Expression
-
- Range Range
- NameRange Range
-}
-
-// Expression is a literal value or an expression provided in the
-// configuration, which can be evaluated within a scope to produce a value.
-type Expression interface {
- // Value returns the value resulting from evaluating the expression
- // in the given evaluation context.
- //
- // The context may be nil, in which case the expression may contain
- // only constants and diagnostics will be produced for any non-constant
- // sub-expressions. (The exact definition of this depends on the source
- // language.)
- //
- // The context may instead be set but have either its Variables or
- // Functions maps set to nil, in which case only use of these features
- // will return diagnostics.
- //
- // Different diagnostics are provided depending on whether the given
- // context maps are nil or empty. In the former case, the message
- // tells the user that variables/functions are not permitted at all,
- // while in the latter case usage will produce a "not found" error for
- // the specific symbol in question.
- Value(ctx *EvalContext) (cty.Value, Diagnostics)
-
- // Variables returns a list of variables referenced in the receiving
- // expression. These are expressed as absolute Traversals, so may include
- // additional information about how the variable is used, such as
- // attribute lookups, which the calling application can potentially use
- // to only selectively populate the scope.
- Variables() []Traversal
-
- Range() Range
- StartRange() Range
-}
-
-// OfType filters the receiving block sequence by block type name,
-// returning a new block sequence including only the blocks of the
-// requested type.
-func (els Blocks) OfType(typeName string) Blocks {
- ret := make(Blocks, 0)
- for _, el := range els {
- if el.Type == typeName {
- ret = append(ret, el)
- }
- }
- return ret
-}
-
-// ByType transforms the receiving block sequence into a map from type
-// name to block sequences of only that type.
-func (els Blocks) ByType() map[string]Blocks {
- ret := make(map[string]Blocks)
- for _, el := range els {
- ty := el.Type
- if ret[ty] == nil {
- ret[ty] = make(Blocks, 0, 1)
- }
- ret[ty] = append(ret[ty], el)
- }
- return ret
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/structure_at_pos.go b/vendor/github.com/hashicorp/hcl/v2/structure_at_pos.go
deleted file mode 100644
index 62aba139..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/structure_at_pos.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hcl
-
-// -----------------------------------------------------------------------------
-// The methods in this file all have the general pattern of making a best-effort
-// to find one or more constructs that contain a given source position.
-//
-// These all operate by delegating to an optional method of the same name and
-// signature on the file's root body, allowing each syntax to potentially
-// provide its own implementations of these. For syntaxes that don't implement
-// them, the result is always nil.
-// -----------------------------------------------------------------------------
-
-// BlocksAtPos attempts to find all of the blocks that contain the given
-// position, ordered so that the outermost block is first and the innermost
-// block is last. This is a best-effort method that may not be able to produce
-// a complete result for all positions or for all HCL syntaxes.
-//
-// If the returned slice is non-empty, the first element is guaranteed to
-// represent the same block as would be the result of OutermostBlockAtPos and
-// the last element the result of InnermostBlockAtPos. However, the
-// implementation may return two different objects describing the same block,
-// so comparison by pointer identity is not possible.
-//
-// The result is nil if no blocks at all contain the given position.
-func (f *File) BlocksAtPos(pos Pos) []*Block {
- // The root body of the file must implement this interface in order
- // to support BlocksAtPos.
- type Interface interface {
- BlocksAtPos(pos Pos) []*Block
- }
-
- impl, ok := f.Body.(Interface)
- if !ok {
- return nil
- }
- return impl.BlocksAtPos(pos)
-}
-
-// OutermostBlockAtPos attempts to find a top-level block in the receiving file
-// that contains the given position. This is a best-effort method that may not
-// be able to produce a result for all positions or for all HCL syntaxes.
-//
-// The result is nil if no single block could be selected for any reason.
-func (f *File) OutermostBlockAtPos(pos Pos) *Block {
- // The root body of the file must implement this interface in order
- // to support OutermostBlockAtPos.
- type Interface interface {
- OutermostBlockAtPos(pos Pos) *Block
- }
-
- impl, ok := f.Body.(Interface)
- if !ok {
- return nil
- }
- return impl.OutermostBlockAtPos(pos)
-}
-
-// InnermostBlockAtPos attempts to find the most deeply-nested block in the
-// receiving file that contains the given position. This is a best-effort
-// method that may not be able to produce a result for all positions or for
-// all HCL syntaxes.
-//
-// The result is nil if no single block could be selected for any reason.
-func (f *File) InnermostBlockAtPos(pos Pos) *Block {
- // The root body of the file must implement this interface in order
- // to support InnermostBlockAtPos.
- type Interface interface {
- InnermostBlockAtPos(pos Pos) *Block
- }
-
- impl, ok := f.Body.(Interface)
- if !ok {
- return nil
- }
- return impl.InnermostBlockAtPos(pos)
-}
-
-// OutermostExprAtPos attempts to find an expression in the receiving file
-// that contains the given position. This is a best-effort method that may not
-// be able to produce a result for all positions or for all HCL syntaxes.
-//
-// Since expressions are often nested inside one another, this method returns
-// the outermost "root" expression that is not contained by any other.
-//
-// The result is nil if no single expression could be selected for any reason.
-func (f *File) OutermostExprAtPos(pos Pos) Expression {
- // The root body of the file must implement this interface in order
- // to support OutermostExprAtPos.
- type Interface interface {
- OutermostExprAtPos(pos Pos) Expression
- }
-
- impl, ok := f.Body.(Interface)
- if !ok {
- return nil
- }
- return impl.OutermostExprAtPos(pos)
-}
-
-// AttributeAtPos attempts to find an attribute definition in the receiving
-// file that contains the given position. This is a best-effort method that may
-// not be able to produce a result for all positions or for all HCL syntaxes.
-//
-// The result is nil if no single attribute could be selected for any reason.
-func (f *File) AttributeAtPos(pos Pos) *Attribute {
- // The root body of the file must implement this interface in order
- // to support OutermostExprAtPos.
- type Interface interface {
- AttributeAtPos(pos Pos) *Attribute
- }
-
- impl, ok := f.Body.(Interface)
- if !ok {
- return nil
- }
- return impl.AttributeAtPos(pos)
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/tools.go b/vendor/github.com/hashicorp/hcl/v2/tools.go
deleted file mode 100644
index e8c42ad1..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/tools.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-//go:build tools
-// +build tools
-
-package hcl
-
-import (
- _ "golang.org/x/tools/cmd/stringer"
-)
diff --git a/vendor/github.com/hashicorp/hcl/v2/traversal.go b/vendor/github.com/hashicorp/hcl/v2/traversal.go
deleted file mode 100644
index 540dde7e..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/traversal.go
+++ /dev/null
@@ -1,296 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hcl
-
-import (
- "fmt"
-
- "github.com/zclconf/go-cty/cty"
-)
-
-// A Traversal is a description of traversing through a value through a
-// series of operations such as attribute lookup, index lookup, etc.
-//
-// It is used to look up values in scopes, for example.
-//
-// The traversal operations are implementations of interface Traverser.
-// This is a closed set of implementations, so the interface cannot be
-// implemented from outside this package.
-//
-// A traversal can be absolute (its first value is a symbol name) or relative
-// (starts from an existing value).
-type Traversal []Traverser
-
-// TraversalJoin appends a relative traversal to an absolute traversal to
-// produce a new absolute traversal.
-func TraversalJoin(abs Traversal, rel Traversal) Traversal {
- if abs.IsRelative() {
- panic("first argument to TraversalJoin must be absolute")
- }
- if !rel.IsRelative() {
- panic("second argument to TraversalJoin must be relative")
- }
-
- ret := make(Traversal, len(abs)+len(rel))
- copy(ret, abs)
- copy(ret[len(abs):], rel)
- return ret
-}
-
-// TraverseRel applies the receiving traversal to the given value, returning
-// the resulting value. This is supported only for relative traversals,
-// and will panic if applied to an absolute traversal.
-func (t Traversal) TraverseRel(val cty.Value) (cty.Value, Diagnostics) {
- if !t.IsRelative() {
- panic("can't use TraverseRel on an absolute traversal")
- }
-
- current := val
- var diags Diagnostics
- for _, tr := range t {
- var newDiags Diagnostics
- current, newDiags = tr.TraversalStep(current)
- diags = append(diags, newDiags...)
- if newDiags.HasErrors() {
- return cty.DynamicVal, diags
- }
- }
- return current, diags
-}
-
-// TraverseAbs applies the receiving traversal to the given eval context,
-// returning the resulting value. This is supported only for absolute
-// traversals, and will panic if applied to a relative traversal.
-func (t Traversal) TraverseAbs(ctx *EvalContext) (cty.Value, Diagnostics) {
- if t.IsRelative() {
- panic("can't use TraverseAbs on a relative traversal")
- }
-
- split := t.SimpleSplit()
- root := split.Abs[0].(TraverseRoot)
- name := root.Name
-
- thisCtx := ctx
- hasNonNil := false
- for thisCtx != nil {
- if thisCtx.Variables == nil {
- thisCtx = thisCtx.parent
- continue
- }
- hasNonNil = true
- val, exists := thisCtx.Variables[name]
- if exists {
- return split.Rel.TraverseRel(val)
- }
- thisCtx = thisCtx.parent
- }
-
- if !hasNonNil {
- return cty.DynamicVal, Diagnostics{
- {
- Severity: DiagError,
- Summary: "Variables not allowed",
- Detail: "Variables may not be used here.",
- Subject: &root.SrcRange,
- },
- }
- }
-
- suggestions := make([]string, 0, len(ctx.Variables))
- thisCtx = ctx
- for thisCtx != nil {
- for k := range thisCtx.Variables {
- suggestions = append(suggestions, k)
- }
- thisCtx = thisCtx.parent
- }
- suggestion := nameSuggestion(name, suggestions)
- if suggestion != "" {
- suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
- }
-
- return cty.DynamicVal, Diagnostics{
- {
- Severity: DiagError,
- Summary: "Unknown variable",
- Detail: fmt.Sprintf("There is no variable named %q.%s", name, suggestion),
- Subject: &root.SrcRange,
- },
- }
-}
-
-// IsRelative returns true if the receiver is a relative traversal, or false
-// otherwise.
-func (t Traversal) IsRelative() bool {
- if len(t) == 0 {
- return true
- }
- if _, firstIsRoot := t[0].(TraverseRoot); firstIsRoot {
- return false
- }
- return true
-}
-
-// SimpleSplit returns a TraversalSplit where the name lookup is the absolute
-// part and the remainder is the relative part. Supported only for
-// absolute traversals, and will panic if applied to a relative traversal.
-//
-// This can be used by applications that have a relatively-simple variable
-// namespace where only the top-level is directly populated in the scope, with
-// everything else handled by relative lookups from those initial values.
-func (t Traversal) SimpleSplit() TraversalSplit {
- if t.IsRelative() {
- panic("can't use SimpleSplit on a relative traversal")
- }
- return TraversalSplit{
- Abs: t[0:1],
- Rel: t[1:],
- }
-}
-
-// RootName returns the root name for a absolute traversal. Will panic if
-// called on a relative traversal.
-func (t Traversal) RootName() string {
- if t.IsRelative() {
- panic("can't use RootName on a relative traversal")
-
- }
- return t[0].(TraverseRoot).Name
-}
-
-// SourceRange returns the source range for the traversal.
-func (t Traversal) SourceRange() Range {
- if len(t) == 0 {
- // Nothing useful to return here, but we'll return something
- // that's correctly-typed at least.
- return Range{}
- }
-
- return RangeBetween(t[0].SourceRange(), t[len(t)-1].SourceRange())
-}
-
-// TraversalSplit represents a pair of traversals, the first of which is
-// an absolute traversal and the second of which is relative to the first.
-//
-// This is used by calling applications that only populate prefixes of the
-// traversals in the scope, with Abs representing the part coming from the
-// scope and Rel representing the remaining steps once that part is
-// retrieved.
-type TraversalSplit struct {
- Abs Traversal
- Rel Traversal
-}
-
-// TraverseAbs traverses from a scope to the value resulting from the
-// absolute traversal.
-func (t TraversalSplit) TraverseAbs(ctx *EvalContext) (cty.Value, Diagnostics) {
- return t.Abs.TraverseAbs(ctx)
-}
-
-// TraverseRel traverses from a given value, assumed to be the result of
-// TraverseAbs on some scope, to a final result for the entire split traversal.
-func (t TraversalSplit) TraverseRel(val cty.Value) (cty.Value, Diagnostics) {
- return t.Rel.TraverseRel(val)
-}
-
-// Traverse is a convenience function to apply TraverseAbs followed by
-// TraverseRel.
-func (t TraversalSplit) Traverse(ctx *EvalContext) (cty.Value, Diagnostics) {
- v1, diags := t.TraverseAbs(ctx)
- if diags.HasErrors() {
- return cty.DynamicVal, diags
- }
- v2, newDiags := t.TraverseRel(v1)
- diags = append(diags, newDiags...)
- return v2, diags
-}
-
-// Join concatenates together the Abs and Rel parts to produce a single
-// absolute traversal.
-func (t TraversalSplit) Join() Traversal {
- return TraversalJoin(t.Abs, t.Rel)
-}
-
-// RootName returns the root name for the absolute part of the split.
-func (t TraversalSplit) RootName() string {
- return t.Abs.RootName()
-}
-
-// A Traverser is a step within a Traversal.
-type Traverser interface {
- TraversalStep(cty.Value) (cty.Value, Diagnostics)
- SourceRange() Range
- isTraverserSigil() isTraverser
-}
-
-// Embed this in a struct to declare it as a Traverser
-type isTraverser struct {
-}
-
-func (tr isTraverser) isTraverserSigil() isTraverser {
- return isTraverser{}
-}
-
-// TraverseRoot looks up a root name in a scope. It is used as the first step
-// of an absolute Traversal, and cannot itself be traversed directly.
-type TraverseRoot struct {
- isTraverser
- Name string
- SrcRange Range
-}
-
-// TraversalStep on a TraverseName immediately panics, because absolute
-// traversals cannot be directly traversed.
-func (tn TraverseRoot) TraversalStep(cty.Value) (cty.Value, Diagnostics) {
- panic("Cannot traverse an absolute traversal")
-}
-
-func (tn TraverseRoot) SourceRange() Range {
- return tn.SrcRange
-}
-
-// TraverseAttr looks up an attribute in its initial value.
-type TraverseAttr struct {
- isTraverser
- Name string
- SrcRange Range
-}
-
-func (tn TraverseAttr) TraversalStep(val cty.Value) (cty.Value, Diagnostics) {
- return GetAttr(val, tn.Name, &tn.SrcRange)
-}
-
-func (tn TraverseAttr) SourceRange() Range {
- return tn.SrcRange
-}
-
-// TraverseIndex applies the index operation to its initial value.
-type TraverseIndex struct {
- isTraverser
- Key cty.Value
- SrcRange Range
-}
-
-func (tn TraverseIndex) TraversalStep(val cty.Value) (cty.Value, Diagnostics) {
- return Index(val, tn.Key, &tn.SrcRange)
-}
-
-func (tn TraverseIndex) SourceRange() Range {
- return tn.SrcRange
-}
-
-// TraverseSplat applies the splat operation to its initial value.
-type TraverseSplat struct {
- isTraverser
- Each Traversal
- SrcRange Range
-}
-
-func (tn TraverseSplat) TraversalStep(val cty.Value) (cty.Value, Diagnostics) {
- panic("TraverseSplat not yet implemented")
-}
-
-func (tn TraverseSplat) SourceRange() Range {
- return tn.SrcRange
-}
diff --git a/vendor/github.com/hashicorp/hcl/v2/traversal_for_expr.go b/vendor/github.com/hashicorp/hcl/v2/traversal_for_expr.go
deleted file mode 100644
index 87eeb159..00000000
--- a/vendor/github.com/hashicorp/hcl/v2/traversal_for_expr.go
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package hcl
-
-// AbsTraversalForExpr attempts to interpret the given expression as
-// an absolute traversal, or returns error diagnostic(s) if that is
-// not possible for the given expression.
-//
-// A particular Expression implementation can support this function by
-// offering a method called AsTraversal that takes no arguments and
-// returns either a valid absolute traversal or nil to indicate that
-// no traversal is possible. Alternatively, an implementation can support
-// UnwrapExpression to delegate handling of this function to a wrapped
-// Expression object.
-//
-// In most cases the calling application is interested in the value
-// that results from an expression, but in rarer cases the application
-// needs to see the name of the variable and subsequent
-// attributes/indexes itself, for example to allow users to give references
-// to the variables themselves rather than to their values. An implementer
-// of this function should at least support attribute and index steps.
-func AbsTraversalForExpr(expr Expression) (Traversal, Diagnostics) {
- type asTraversal interface {
- AsTraversal() Traversal
- }
-
- physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool {
- _, supported := expr.(asTraversal)
- return supported
- })
-
- if asT, supported := physExpr.(asTraversal); supported {
- if traversal := asT.AsTraversal(); traversal != nil {
- return traversal, nil
- }
- }
- return nil, Diagnostics{
- &Diagnostic{
- Severity: DiagError,
- Summary: "Invalid expression",
- Detail: "A single static variable reference is required: only attribute access and indexing with constant keys. No calculations, function calls, template expressions, etc are allowed here.",
- Subject: expr.Range().Ptr(),
- },
- }
-}
-
-// RelTraversalForExpr is similar to AbsTraversalForExpr but it returns
-// a relative traversal instead. Due to the nature of HCL expressions, the
-// first element of the returned traversal is always a TraverseAttr, and
-// then it will be followed by zero or more other expressions.
-//
-// Any expression accepted by AbsTraversalForExpr is also accepted by
-// RelTraversalForExpr.
-func RelTraversalForExpr(expr Expression) (Traversal, Diagnostics) {
- traversal, diags := AbsTraversalForExpr(expr)
- if len(traversal) > 0 {
- ret := make(Traversal, len(traversal))
- copy(ret, traversal)
- root := traversal[0].(TraverseRoot)
- ret[0] = TraverseAttr{
- Name: root.Name,
- SrcRange: root.SrcRange,
- }
- return ret, diags
- }
- return traversal, diags
-}
-
-// ExprAsKeyword attempts to interpret the given expression as a static keyword,
-// returning the keyword string if possible, and the empty string if not.
-//
-// A static keyword, for the sake of this function, is a single identifier.
-// For example, the following attribute has an expression that would produce
-// the keyword "foo":
-//
-// example = foo
-//
-// This function is a variant of AbsTraversalForExpr, which uses the same
-// interface on the given expression. This helper constrains the result
-// further by requiring only a single root identifier.
-//
-// This function is intended to be used with the following idiom, to recognize
-// situations where one of a fixed set of keywords is required and arbitrary
-// expressions are not allowed:
-//
-// switch hcl.ExprAsKeyword(expr) {
-// case "allow":
-// // (take suitable action for keyword "allow")
-// case "deny":
-// // (take suitable action for keyword "deny")
-// default:
-// diags = append(diags, &hcl.Diagnostic{
-// // ... "invalid keyword" diagnostic message ...
-// })
-// }
-//
-// The above approach will generate the same message for both the use of an
-// unrecognized keyword and for not using a keyword at all, which is usually
-// reasonable if the message specifies that the given value must be a keyword
-// from that fixed list.
-//
-// Note that in the native syntax the keywords "true", "false", and "null" are
-// recognized as literal values during parsing and so these reserved words
-// cannot not be accepted as keywords by this function.
-//
-// Since interpreting an expression as a keyword bypasses usual expression
-// evaluation, it should be used sparingly for situations where e.g. one of
-// a fixed set of keywords is used in a structural way in a special attribute
-// to affect the further processing of a block.
-func ExprAsKeyword(expr Expression) string {
- type asTraversal interface {
- AsTraversal() Traversal
- }
-
- physExpr := UnwrapExpressionUntil(expr, func(expr Expression) bool {
- _, supported := expr.(asTraversal)
- return supported
- })
-
- if asT, supported := physExpr.(asTraversal); supported {
- if traversal := asT.AsTraversal(); len(traversal) == 1 {
- return traversal.RootName()
- }
- }
- return ""
-}
diff --git a/vendor/github.com/hashicorp/logutils/.gitignore b/vendor/github.com/hashicorp/logutils/.gitignore
deleted file mode 100644
index 00268614..00000000
--- a/vendor/github.com/hashicorp/logutils/.gitignore
+++ /dev/null
@@ -1,22 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
diff --git a/vendor/github.com/hashicorp/logutils/LICENSE b/vendor/github.com/hashicorp/logutils/LICENSE
deleted file mode 100644
index c33dcc7c..00000000
--- a/vendor/github.com/hashicorp/logutils/LICENSE
+++ /dev/null
@@ -1,354 +0,0 @@
-Mozilla Public License, version 2.0
-
-1. Definitions
-
-1.1. “Contributor”
-
- means each individual or legal entity that creates, contributes to the
- creation of, or owns Covered Software.
-
-1.2. “Contributor Version”
-
- means the combination of the Contributions of others (if any) used by a
- Contributor and that particular Contributor’s Contribution.
-
-1.3. “Contribution”
-
- means Covered Software of a particular Contributor.
-
-1.4. “Covered Software”
-
- means Source Code Form to which the initial Contributor has attached the
- notice in Exhibit A, the Executable Form of such Source Code Form, and
- Modifications of such Source Code Form, in each case including portions
- thereof.
-
-1.5. “Incompatible With Secondary Licenses”
- means
-
- a. that the initial Contributor has attached the notice described in
- Exhibit B to the Covered Software; or
-
- b. that the Covered Software was made available under the terms of version
- 1.1 or earlier of the License, but not also under the terms of a
- Secondary License.
-
-1.6. “Executable Form”
-
- means any form of the work other than Source Code Form.
-
-1.7. “Larger Work”
-
- means a work that combines Covered Software with other material, in a separate
- file or files, that is not Covered Software.
-
-1.8. “License”
-
- means this document.
-
-1.9. “Licensable”
-
- means having the right to grant, to the maximum extent possible, whether at the
- time of the initial grant or subsequently, any and all of the rights conveyed by
- this License.
-
-1.10. “Modifications”
-
- means any of the following:
-
- a. any file in Source Code Form that results from an addition to, deletion
- from, or modification of the contents of Covered Software; or
-
- b. any new file in Source Code Form that contains any Covered Software.
-
-1.11. “Patent Claims” of a Contributor
-
- means any patent claim(s), including without limitation, method, process,
- and apparatus claims, in any patent Licensable by such Contributor that
- would be infringed, but for the grant of the License, by the making,
- using, selling, offering for sale, having made, import, or transfer of
- either its Contributions or its Contributor Version.
-
-1.12. “Secondary License”
-
- means either the GNU General Public License, Version 2.0, the GNU Lesser
- General Public License, Version 2.1, the GNU Affero General Public
- License, Version 3.0, or any later versions of those licenses.
-
-1.13. “Source Code Form”
-
- means the form of the work preferred for making modifications.
-
-1.14. “You” (or “Your”)
-
- means an individual or a legal entity exercising rights under this
- License. For legal entities, “You” includes any entity that controls, is
- controlled by, or is under common control with You. For purposes of this
- definition, “control” means (a) the power, direct or indirect, to cause
- the direction or management of such entity, whether by contract or
- otherwise, or (b) ownership of more than fifty percent (50%) of the
- outstanding shares or beneficial ownership of such entity.
-
-
-2. License Grants and Conditions
-
-2.1. Grants
-
- Each Contributor hereby grants You a world-wide, royalty-free,
- non-exclusive license:
-
- a. under intellectual property rights (other than patent or trademark)
- Licensable by such Contributor to use, reproduce, make available,
- modify, display, perform, distribute, and otherwise exploit its
- Contributions, either on an unmodified basis, with Modifications, or as
- part of a Larger Work; and
-
- b. under Patent Claims of such Contributor to make, use, sell, offer for
- sale, have made, import, and otherwise transfer either its Contributions
- or its Contributor Version.
-
-2.2. Effective Date
-
- The licenses granted in Section 2.1 with respect to any Contribution become
- effective for each Contribution on the date the Contributor first distributes
- such Contribution.
-
-2.3. Limitations on Grant Scope
-
- The licenses granted in this Section 2 are the only rights granted under this
- License. No additional rights or licenses will be implied from the distribution
- or licensing of Covered Software under this License. Notwithstanding Section
- 2.1(b) above, no patent license is granted by a Contributor:
-
- a. for any code that a Contributor has removed from Covered Software; or
-
- b. for infringements caused by: (i) Your and any other third party’s
- modifications of Covered Software, or (ii) the combination of its
- Contributions with other software (except as part of its Contributor
- Version); or
-
- c. under Patent Claims infringed by Covered Software in the absence of its
- Contributions.
-
- This License does not grant any rights in the trademarks, service marks, or
- logos of any Contributor (except as may be necessary to comply with the
- notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
- No Contributor makes additional grants as a result of Your choice to
- distribute the Covered Software under a subsequent version of this License
- (see Section 10.2) or under the terms of a Secondary License (if permitted
- under the terms of Section 3.3).
-
-2.5. Representation
-
- Each Contributor represents that the Contributor believes its Contributions
- are its original creation(s) or it has sufficient rights to grant the
- rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
- This License is not intended to limit any rights You have under applicable
- copyright doctrines of fair use, fair dealing, or other equivalents.
-
-2.7. Conditions
-
- Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
- Section 2.1.
-
-
-3. Responsibilities
-
-3.1. Distribution of Source Form
-
- All distribution of Covered Software in Source Code Form, including any
- Modifications that You create or to which You contribute, must be under the
- terms of this License. You must inform recipients that the Source Code Form
- of the Covered Software is governed by the terms of this License, and how
- they can obtain a copy of this License. You may not attempt to alter or
- restrict the recipients’ rights in the Source Code Form.
-
-3.2. Distribution of Executable Form
-
- If You distribute Covered Software in Executable Form then:
-
- a. such Covered Software must also be made available in Source Code Form,
- as described in Section 3.1, and You must inform recipients of the
- Executable Form how they can obtain a copy of such Source Code Form by
- reasonable means in a timely manner, at a charge no more than the cost
- of distribution to the recipient; and
-
- b. You may distribute such Executable Form under the terms of this License,
- or sublicense it under different terms, provided that the license for
- the Executable Form does not attempt to limit or alter the recipients’
- rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
- You may create and distribute a Larger Work under terms of Your choice,
- provided that You also comply with the requirements of this License for the
- Covered Software. If the Larger Work is a combination of Covered Software
- with a work governed by one or more Secondary Licenses, and the Covered
- Software is not Incompatible With Secondary Licenses, this License permits
- You to additionally distribute such Covered Software under the terms of
- such Secondary License(s), so that the recipient of the Larger Work may, at
- their option, further distribute the Covered Software under the terms of
- either this License or such Secondary License(s).
-
-3.4. Notices
-
- You may not remove or alter the substance of any license notices (including
- copyright notices, patent notices, disclaimers of warranty, or limitations
- of liability) contained within the Source Code Form of the Covered
- Software, except that You may alter any license notices to the extent
- required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
- You may choose to offer, and to charge a fee for, warranty, support,
- indemnity or liability obligations to one or more recipients of Covered
- Software. However, You may do so only on Your own behalf, and not on behalf
- of any Contributor. You must make it absolutely clear that any such
- warranty, support, indemnity, or liability obligation is offered by You
- alone, and You hereby agree to indemnify every Contributor for any
- liability incurred by such Contributor as a result of warranty, support,
- indemnity or liability terms You offer. You may include additional
- disclaimers of warranty and limitations of liability specific to any
- jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
-
- If it is impossible for You to comply with any of the terms of this License
- with respect to some or all of the Covered Software due to statute, judicial
- order, or regulation then You must: (a) comply with the terms of this License
- to the maximum extent possible; and (b) describe the limitations and the code
- they affect. Such description must be placed in a text file included with all
- distributions of the Covered Software under this License. Except to the
- extent prohibited by statute or regulation, such description must be
- sufficiently detailed for a recipient of ordinary skill to be able to
- understand it.
-
-5. Termination
-
-5.1. The rights granted under this License will terminate automatically if You
- fail to comply with any of its terms. However, if You become compliant,
- then the rights granted under this License from a particular Contributor
- are reinstated (a) provisionally, unless and until such Contributor
- explicitly and finally terminates Your grants, and (b) on an ongoing basis,
- if such Contributor fails to notify You of the non-compliance by some
- reasonable means prior to 60 days after You have come back into compliance.
- Moreover, Your grants from a particular Contributor are reinstated on an
- ongoing basis if such Contributor notifies You of the non-compliance by
- some reasonable means, this is the first time You have received notice of
- non-compliance with this License from such Contributor, and You become
- compliant prior to 30 days after Your receipt of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
- infringement claim (excluding declaratory judgment actions, counter-claims,
- and cross-claims) alleging that a Contributor Version directly or
- indirectly infringes any patent, then the rights granted to You by any and
- all Contributors for the Covered Software under Section 2.1 of this License
- shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
- license agreements (excluding distributors and resellers) which have been
- validly granted by You or Your distributors under this License prior to
- termination shall survive termination.
-
-6. Disclaimer of Warranty
-
- Covered Software is provided under this License on an “as is” basis, without
- warranty of any kind, either expressed, implied, or statutory, including,
- without limitation, warranties that the Covered Software is free of defects,
- merchantable, fit for a particular purpose or non-infringing. The entire
- risk as to the quality and performance of the Covered Software is with You.
- Should any Covered Software prove defective in any respect, You (not any
- Contributor) assume the cost of any necessary servicing, repair, or
- correction. This disclaimer of warranty constitutes an essential part of this
- License. No use of any Covered Software is authorized under this License
- except under this disclaimer.
-
-7. Limitation of Liability
-
- Under no circumstances and under no legal theory, whether tort (including
- negligence), contract, or otherwise, shall any Contributor, or anyone who
- distributes Covered Software as permitted above, be liable to You for any
- direct, indirect, special, incidental, or consequential damages of any
- character including, without limitation, damages for lost profits, loss of
- goodwill, work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses, even if such party shall have been
- informed of the possibility of such damages. This limitation of liability
- shall not apply to liability for death or personal injury resulting from such
- party’s negligence to the extent applicable law prohibits such limitation.
- Some jurisdictions do not allow the exclusion or limitation of incidental or
- consequential damages, so this exclusion and limitation may not apply to You.
-
-8. Litigation
-
- Any litigation relating to this License may be brought only in the courts of
- a jurisdiction where the defendant maintains its principal place of business
- and such litigation shall be governed by laws of that jurisdiction, without
- reference to its conflict-of-law provisions. Nothing in this Section shall
- prevent a party’s ability to bring cross-claims or counter-claims.
-
-9. Miscellaneous
-
- This License represents the complete agreement concerning the subject matter
- hereof. If any provision of this License is held to be unenforceable, such
- provision shall be reformed only to the extent necessary to make it
- enforceable. Any law or regulation which provides that the language of a
- contract shall be construed against the drafter shall not be used to construe
- this License against a Contributor.
-
-
-10. Versions of the License
-
-10.1. New Versions
-
- Mozilla Foundation is the license steward. Except as provided in Section
- 10.3, no one other than the license steward has the right to modify or
- publish new versions of this License. Each version will be given a
- distinguishing version number.
-
-10.2. Effect of New Versions
-
- You may distribute the Covered Software under the terms of the version of
- the License under which You originally received the Covered Software, or
- under the terms of any subsequent version published by the license
- steward.
-
-10.3. Modified Versions
-
- If you create software not governed by this License, and you want to
- create a new license for such software, you may create and use a modified
- version of this License if you rename the license and remove any
- references to the name of the license steward (except to note that such
- modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
- If You choose to distribute Source Code Form that is Incompatible With
- Secondary Licenses under the terms of this version of the License, the
- notice described in Exhibit B of this License must be attached.
-
-Exhibit A - Source Code Form License Notice
-
- This Source Code Form is subject to the
- terms of the Mozilla Public License, v.
- 2.0. If a copy of the MPL was not
- distributed with this file, You can
- obtain one at
- http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular file, then
-You may include the notice in a location (such as a LICENSE file in a relevant
-directory) where a recipient would be likely to look for such a notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - “Incompatible With Secondary Licenses” Notice
-
- This Source Code Form is “Incompatible
- With Secondary Licenses”, as defined by
- the Mozilla Public License, v. 2.0.
-
diff --git a/vendor/github.com/hashicorp/logutils/README.md b/vendor/github.com/hashicorp/logutils/README.md
deleted file mode 100644
index 49490eae..00000000
--- a/vendor/github.com/hashicorp/logutils/README.md
+++ /dev/null
@@ -1,36 +0,0 @@
-# logutils
-
-logutils is a Go package that augments the standard library "log" package
-to make logging a bit more modern, without fragmenting the Go ecosystem
-with new logging packages.
-
-## The simplest thing that could possibly work
-
-Presumably your application already uses the default `log` package. To switch, you'll want your code to look like the following:
-
-```go
-package main
-
-import (
- "log"
- "os"
-
- "github.com/hashicorp/logutils"
-)
-
-func main() {
- filter := &logutils.LevelFilter{
- Levels: []logutils.LogLevel{"DEBUG", "WARN", "ERROR"},
- MinLevel: logutils.LogLevel("WARN"),
- Writer: os.Stderr,
- }
- log.SetOutput(filter)
-
- log.Print("[DEBUG] Debugging") // this will not print
- log.Print("[WARN] Warning") // this will
- log.Print("[ERROR] Erring") // and so will this
- log.Print("Message I haven't updated") // and so will this
-}
-```
-
-This logs to standard error exactly like go's standard logger. Any log messages you haven't converted to have a level will continue to print as before.
diff --git a/vendor/github.com/hashicorp/logutils/level.go b/vendor/github.com/hashicorp/logutils/level.go
deleted file mode 100644
index 6381bf16..00000000
--- a/vendor/github.com/hashicorp/logutils/level.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Package logutils augments the standard log package with levels.
-package logutils
-
-import (
- "bytes"
- "io"
- "sync"
-)
-
-type LogLevel string
-
-// LevelFilter is an io.Writer that can be used with a logger that
-// will filter out log messages that aren't at least a certain level.
-//
-// Once the filter is in use somewhere, it is not safe to modify
-// the structure.
-type LevelFilter struct {
- // Levels is the list of log levels, in increasing order of
- // severity. Example might be: {"DEBUG", "WARN", "ERROR"}.
- Levels []LogLevel
-
- // MinLevel is the minimum level allowed through
- MinLevel LogLevel
-
- // The underlying io.Writer where log messages that pass the filter
- // will be set.
- Writer io.Writer
-
- badLevels map[LogLevel]struct{}
- once sync.Once
-}
-
-// Check will check a given line if it would be included in the level
-// filter.
-func (f *LevelFilter) Check(line []byte) bool {
- f.once.Do(f.init)
-
- // Check for a log level
- var level LogLevel
- x := bytes.IndexByte(line, '[')
- if x >= 0 {
- y := bytes.IndexByte(line[x:], ']')
- if y >= 0 {
- level = LogLevel(line[x+1 : x+y])
- }
- }
-
- _, ok := f.badLevels[level]
- return !ok
-}
-
-func (f *LevelFilter) Write(p []byte) (n int, err error) {
- // Note in general that io.Writer can receive any byte sequence
- // to write, but the "log" package always guarantees that we only
- // get a single line. We use that as a slight optimization within
- // this method, assuming we're dealing with a single, complete line
- // of log data.
-
- if !f.Check(p) {
- return len(p), nil
- }
-
- return f.Writer.Write(p)
-}
-
-// SetMinLevel is used to update the minimum log level
-func (f *LevelFilter) SetMinLevel(min LogLevel) {
- f.MinLevel = min
- f.init()
-}
-
-func (f *LevelFilter) init() {
- badLevels := make(map[LogLevel]struct{})
- for _, level := range f.Levels {
- if level == f.MinLevel {
- break
- }
- badLevels[level] = struct{}{}
- }
- f.badLevels = badLevels
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/LICENSE b/vendor/github.com/hashicorp/terraform-exec/LICENSE
deleted file mode 100644
index c121cee6..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/LICENSE
+++ /dev/null
@@ -1,375 +0,0 @@
-Copyright (c) 2020 HashiCorp, Inc.
-
-Mozilla Public License Version 2.0
-==================================
-
-1. Definitions
---------------
-
-1.1. "Contributor"
- means each individual or legal entity that creates, contributes to
- the creation of, or owns Covered Software.
-
-1.2. "Contributor Version"
- means the combination of the Contributions of others (if any) used
- by a Contributor and that particular Contributor's Contribution.
-
-1.3. "Contribution"
- means Covered Software of a particular Contributor.
-
-1.4. "Covered Software"
- means Source Code Form to which the initial Contributor has attached
- the notice in Exhibit A, the Executable Form of such Source Code
- Form, and Modifications of such Source Code Form, in each case
- including portions thereof.
-
-1.5. "Incompatible With Secondary Licenses"
- means
-
- (a) that the initial Contributor has attached the notice described
- in Exhibit B to the Covered Software; or
-
- (b) that the Covered Software was made available under the terms of
- version 1.1 or earlier of the License, but not also under the
- terms of a Secondary License.
-
-1.6. "Executable Form"
- means any form of the work other than Source Code Form.
-
-1.7. "Larger Work"
- means a work that combines Covered Software with other material, in
- a separate file or files, that is not Covered Software.
-
-1.8. "License"
- means this document.
-
-1.9. "Licensable"
- means having the right to grant, to the maximum extent possible,
- whether at the time of the initial grant or subsequently, any and
- all of the rights conveyed by this License.
-
-1.10. "Modifications"
- means any of the following:
-
- (a) any file in Source Code Form that results from an addition to,
- deletion from, or modification of the contents of Covered
- Software; or
-
- (b) any new file in Source Code Form that contains any Covered
- Software.
-
-1.11. "Patent Claims" of a Contributor
- means any patent claim(s), including without limitation, method,
- process, and apparatus claims, in any patent Licensable by such
- Contributor that would be infringed, but for the grant of the
- License, by the making, using, selling, offering for sale, having
- made, import, or transfer of either its Contributions or its
- Contributor Version.
-
-1.12. "Secondary License"
- means either the GNU General Public License, Version 2.0, the GNU
- Lesser General Public License, Version 2.1, the GNU Affero General
- Public License, Version 3.0, or any later versions of those
- licenses.
-
-1.13. "Source Code Form"
- means the form of the work preferred for making modifications.
-
-1.14. "You" (or "Your")
- means an individual or a legal entity exercising rights under this
- License. For legal entities, "You" includes any entity that
- controls, is controlled by, or is under common control with You. For
- purposes of this definition, "control" means (a) the power, direct
- or indirect, to cause the direction or management of such entity,
- whether by contract or otherwise, or (b) ownership of more than
- fifty percent (50%) of the outstanding shares or beneficial
- ownership of such entity.
-
-2. License Grants and Conditions
---------------------------------
-
-2.1. Grants
-
-Each Contributor hereby grants You a world-wide, royalty-free,
-non-exclusive license:
-
-(a) under intellectual property rights (other than patent or trademark)
- Licensable by such Contributor to use, reproduce, make available,
- modify, display, perform, distribute, and otherwise exploit its
- Contributions, either on an unmodified basis, with Modifications, or
- as part of a Larger Work; and
-
-(b) under Patent Claims of such Contributor to make, use, sell, offer
- for sale, have made, import, and otherwise transfer either its
- Contributions or its Contributor Version.
-
-2.2. Effective Date
-
-The licenses granted in Section 2.1 with respect to any Contribution
-become effective for each Contribution on the date the Contributor first
-distributes such Contribution.
-
-2.3. Limitations on Grant Scope
-
-The licenses granted in this Section 2 are the only rights granted under
-this License. No additional rights or licenses will be implied from the
-distribution or licensing of Covered Software under this License.
-Notwithstanding Section 2.1(b) above, no patent license is granted by a
-Contributor:
-
-(a) for any code that a Contributor has removed from Covered Software;
- or
-
-(b) for infringements caused by: (i) Your and any other third party's
- modifications of Covered Software, or (ii) the combination of its
- Contributions with other software (except as part of its Contributor
- Version); or
-
-(c) under Patent Claims infringed by Covered Software in the absence of
- its Contributions.
-
-This License does not grant any rights in the trademarks, service marks,
-or logos of any Contributor (except as may be necessary to comply with
-the notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
-No Contributor makes additional grants as a result of Your choice to
-distribute the Covered Software under a subsequent version of this
-License (see Section 10.2) or under the terms of a Secondary License (if
-permitted under the terms of Section 3.3).
-
-2.5. Representation
-
-Each Contributor represents that the Contributor believes its
-Contributions are its original creation(s) or it has sufficient rights
-to grant the rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
-This License is not intended to limit any rights You have under
-applicable copyright doctrines of fair use, fair dealing, or other
-equivalents.
-
-2.7. Conditions
-
-Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
-in Section 2.1.
-
-3. Responsibilities
--------------------
-
-3.1. Distribution of Source Form
-
-All distribution of Covered Software in Source Code Form, including any
-Modifications that You create or to which You contribute, must be under
-the terms of this License. You must inform recipients that the Source
-Code Form of the Covered Software is governed by the terms of this
-License, and how they can obtain a copy of this License. You may not
-attempt to alter or restrict the recipients' rights in the Source Code
-Form.
-
-3.2. Distribution of Executable Form
-
-If You distribute Covered Software in Executable Form then:
-
-(a) such Covered Software must also be made available in Source Code
- Form, as described in Section 3.1, and You must inform recipients of
- the Executable Form how they can obtain a copy of such Source Code
- Form by reasonable means in a timely manner, at a charge no more
- than the cost of distribution to the recipient; and
-
-(b) You may distribute such Executable Form under the terms of this
- License, or sublicense it under different terms, provided that the
- license for the Executable Form does not attempt to limit or alter
- the recipients' rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
-You may create and distribute a Larger Work under terms of Your choice,
-provided that You also comply with the requirements of this License for
-the Covered Software. If the Larger Work is a combination of Covered
-Software with a work governed by one or more Secondary Licenses, and the
-Covered Software is not Incompatible With Secondary Licenses, this
-License permits You to additionally distribute such Covered Software
-under the terms of such Secondary License(s), so that the recipient of
-the Larger Work may, at their option, further distribute the Covered
-Software under the terms of either this License or such Secondary
-License(s).
-
-3.4. Notices
-
-You may not remove or alter the substance of any license notices
-(including copyright notices, patent notices, disclaimers of warranty,
-or limitations of liability) contained within the Source Code Form of
-the Covered Software, except that You may alter any license notices to
-the extent required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
-You may choose to offer, and to charge a fee for, warranty, support,
-indemnity or liability obligations to one or more recipients of Covered
-Software. However, You may do so only on Your own behalf, and not on
-behalf of any Contributor. You must make it absolutely clear that any
-such warranty, support, indemnity, or liability obligation is offered by
-You alone, and You hereby agree to indemnify every Contributor for any
-liability incurred by such Contributor as a result of warranty, support,
-indemnity or liability terms You offer. You may include additional
-disclaimers of warranty and limitations of liability specific to any
-jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
----------------------------------------------------
-
-If it is impossible for You to comply with any of the terms of this
-License with respect to some or all of the Covered Software due to
-statute, judicial order, or regulation then You must: (a) comply with
-the terms of this License to the maximum extent possible; and (b)
-describe the limitations and the code they affect. Such description must
-be placed in a text file included with all distributions of the Covered
-Software under this License. Except to the extent prohibited by statute
-or regulation, such description must be sufficiently detailed for a
-recipient of ordinary skill to be able to understand it.
-
-5. Termination
---------------
-
-5.1. The rights granted under this License will terminate automatically
-if You fail to comply with any of its terms. However, if You become
-compliant, then the rights granted under this License from a particular
-Contributor are reinstated (a) provisionally, unless and until such
-Contributor explicitly and finally terminates Your grants, and (b) on an
-ongoing basis, if such Contributor fails to notify You of the
-non-compliance by some reasonable means prior to 60 days after You have
-come back into compliance. Moreover, Your grants from a particular
-Contributor are reinstated on an ongoing basis if such Contributor
-notifies You of the non-compliance by some reasonable means, this is the
-first time You have received notice of non-compliance with this License
-from such Contributor, and You become compliant prior to 30 days after
-Your receipt of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
-infringement claim (excluding declaratory judgment actions,
-counter-claims, and cross-claims) alleging that a Contributor Version
-directly or indirectly infringes any patent, then the rights granted to
-You by any and all Contributors for the Covered Software under Section
-2.1 of this License shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all
-end user license agreements (excluding distributors and resellers) which
-have been validly granted by You or Your distributors under this License
-prior to termination shall survive termination.
-
-************************************************************************
-* *
-* 6. Disclaimer of Warranty *
-* ------------------------- *
-* *
-* Covered Software is provided under this License on an "as is" *
-* basis, without warranty of any kind, either expressed, implied, or *
-* statutory, including, without limitation, warranties that the *
-* Covered Software is free of defects, merchantable, fit for a *
-* particular purpose or non-infringing. The entire risk as to the *
-* quality and performance of the Covered Software is with You. *
-* Should any Covered Software prove defective in any respect, You *
-* (not any Contributor) assume the cost of any necessary servicing, *
-* repair, or correction. This disclaimer of warranty constitutes an *
-* essential part of this License. No use of any Covered Software is *
-* authorized under this License except under this disclaimer. *
-* *
-************************************************************************
-
-************************************************************************
-* *
-* 7. Limitation of Liability *
-* -------------------------- *
-* *
-* Under no circumstances and under no legal theory, whether tort *
-* (including negligence), contract, or otherwise, shall any *
-* Contributor, or anyone who distributes Covered Software as *
-* permitted above, be liable to You for any direct, indirect, *
-* special, incidental, or consequential damages of any character *
-* including, without limitation, damages for lost profits, loss of *
-* goodwill, work stoppage, computer failure or malfunction, or any *
-* and all other commercial damages or losses, even if such party *
-* shall have been informed of the possibility of such damages. This *
-* limitation of liability shall not apply to liability for death or *
-* personal injury resulting from such party's negligence to the *
-* extent applicable law prohibits such limitation. Some *
-* jurisdictions do not allow the exclusion or limitation of *
-* incidental or consequential damages, so this exclusion and *
-* limitation may not apply to You. *
-* *
-************************************************************************
-
-8. Litigation
--------------
-
-Any litigation relating to this License may be brought only in the
-courts of a jurisdiction where the defendant maintains its principal
-place of business and such litigation shall be governed by laws of that
-jurisdiction, without reference to its conflict-of-law provisions.
-Nothing in this Section shall prevent a party's ability to bring
-cross-claims or counter-claims.
-
-9. Miscellaneous
-----------------
-
-This License represents the complete agreement concerning the subject
-matter hereof. If any provision of this License is held to be
-unenforceable, such provision shall be reformed only to the extent
-necessary to make it enforceable. Any law or regulation which provides
-that the language of a contract shall be construed against the drafter
-shall not be used to construe this License against a Contributor.
-
-10. Versions of the License
----------------------------
-
-10.1. New Versions
-
-Mozilla Foundation is the license steward. Except as provided in Section
-10.3, no one other than the license steward has the right to modify or
-publish new versions of this License. Each version will be given a
-distinguishing version number.
-
-10.2. Effect of New Versions
-
-You may distribute the Covered Software under the terms of the version
-of the License under which You originally received the Covered Software,
-or under the terms of any subsequent version published by the license
-steward.
-
-10.3. Modified Versions
-
-If you create software not governed by this License, and you want to
-create a new license for such software, you may create and use a
-modified version of this License if you rename the license and remove
-any references to the name of the license steward (except to note that
-such modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary
-Licenses
-
-If You choose to distribute Source Code Form that is Incompatible With
-Secondary Licenses under the terms of this version of the License, the
-notice described in Exhibit B of this License must be attached.
-
-Exhibit A - Source Code Form License Notice
--------------------------------------------
-
- This Source Code Form is subject to the terms of the Mozilla Public
- License, v. 2.0. If a copy of the MPL was not distributed with this
- file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular
-file, then You may include the notice in a location (such as a LICENSE
-file in a relevant directory) where a recipient would be likely to look
-for such a notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - "Incompatible With Secondary Licenses" Notice
----------------------------------------------------------
-
- This Source Code Form is "Incompatible With Secondary Licenses", as
- defined by the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go b/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go
deleted file mode 100644
index 235d5612..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/internal/version/version.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package version
-
-const version = "0.21.0"
-
-// ModuleVersion returns the current version of the github.com/hashicorp/terraform-exec Go module.
-// This is a function to allow for future possible enhancement using debug.BuildInfo.
-func ModuleVersion() string {
- return version
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go
deleted file mode 100644
index 7a6ea923..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/apply.go
+++ /dev/null
@@ -1,275 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "fmt"
- "io"
- "os/exec"
- "strconv"
-)
-
-type applyConfig struct {
- allowDeferral bool
- backup string
- destroy bool
- dirOrPlan string
- lock bool
-
- // LockTimeout must be a string with time unit, e.g. '10s'
- lockTimeout string
- parallelism int
- reattachInfo ReattachInfo
- refresh bool
- refreshOnly bool
- replaceAddrs []string
- state string
- stateOut string
- targets []string
-
- // Vars: each var must be supplied as a single string, e.g. 'foo=bar'
- vars []string
- varFiles []string
-}
-
-var defaultApplyOptions = applyConfig{
- destroy: false,
- lock: true,
- parallelism: 10,
- refresh: true,
-}
-
-// ApplyOption represents options used in the Apply method.
-type ApplyOption interface {
- configureApply(*applyConfig)
-}
-
-func (opt *ParallelismOption) configureApply(conf *applyConfig) {
- conf.parallelism = opt.parallelism
-}
-
-func (opt *BackupOption) configureApply(conf *applyConfig) {
- conf.backup = opt.path
-}
-
-func (opt *TargetOption) configureApply(conf *applyConfig) {
- conf.targets = append(conf.targets, opt.target)
-}
-
-func (opt *LockTimeoutOption) configureApply(conf *applyConfig) {
- conf.lockTimeout = opt.timeout
-}
-
-func (opt *StateOption) configureApply(conf *applyConfig) {
- conf.state = opt.path
-}
-
-func (opt *StateOutOption) configureApply(conf *applyConfig) {
- conf.stateOut = opt.path
-}
-
-func (opt *VarFileOption) configureApply(conf *applyConfig) {
- conf.varFiles = append(conf.varFiles, opt.path)
-}
-
-func (opt *LockOption) configureApply(conf *applyConfig) {
- conf.lock = opt.lock
-}
-
-func (opt *RefreshOption) configureApply(conf *applyConfig) {
- conf.refresh = opt.refresh
-}
-
-func (opt *RefreshOnlyOption) configureApply(conf *applyConfig) {
- conf.refreshOnly = opt.refreshOnly
-}
-
-func (opt *ReplaceOption) configureApply(conf *applyConfig) {
- conf.replaceAddrs = append(conf.replaceAddrs, opt.address)
-}
-
-func (opt *VarOption) configureApply(conf *applyConfig) {
- conf.vars = append(conf.vars, opt.assignment)
-}
-
-func (opt *DirOrPlanOption) configureApply(conf *applyConfig) {
- conf.dirOrPlan = opt.path
-}
-
-func (opt *ReattachOption) configureApply(conf *applyConfig) {
- conf.reattachInfo = opt.info
-}
-
-func (opt *DestroyFlagOption) configureApply(conf *applyConfig) {
- conf.destroy = opt.destroy
-}
-
-func (opt *AllowDeferralOption) configureApply(conf *applyConfig) {
- conf.allowDeferral = opt.allowDeferral
-}
-
-// Apply represents the terraform apply subcommand.
-func (tf *Terraform) Apply(ctx context.Context, opts ...ApplyOption) error {
- cmd, err := tf.applyCmd(ctx, opts...)
- if err != nil {
- return err
- }
- return tf.runTerraformCmd(ctx, cmd)
-}
-
-// ApplyJSON represents the terraform apply subcommand with the `-json` flag.
-// Using the `-json` flag will result in
-// [machine-readable](https://developer.hashicorp.com/terraform/internals/machine-readable-ui)
-// JSON being written to the supplied `io.Writer`. ApplyJSON is likely to be
-// removed in a future major version in favour of Apply returning JSON by default.
-func (tf *Terraform) ApplyJSON(ctx context.Context, w io.Writer, opts ...ApplyOption) error {
- err := tf.compatible(ctx, tf0_15_3, nil)
- if err != nil {
- return fmt.Errorf("terraform apply -json was added in 0.15.3: %w", err)
- }
-
- tf.SetStdout(w)
-
- cmd, err := tf.applyJSONCmd(ctx, opts...)
- if err != nil {
- return err
- }
-
- return tf.runTerraformCmd(ctx, cmd)
-}
-
-func (tf *Terraform) applyCmd(ctx context.Context, opts ...ApplyOption) (*exec.Cmd, error) {
- c := defaultApplyOptions
-
- for _, o := range opts {
- o.configureApply(&c)
- }
-
- args, err := tf.buildApplyArgs(ctx, c)
- if err != nil {
- return nil, err
- }
-
- return tf.buildApplyCmd(ctx, c, args)
-}
-
-func (tf *Terraform) applyJSONCmd(ctx context.Context, opts ...ApplyOption) (*exec.Cmd, error) {
- c := defaultApplyOptions
-
- for _, o := range opts {
- o.configureApply(&c)
- }
-
- args, err := tf.buildApplyArgs(ctx, c)
- if err != nil {
- return nil, err
- }
-
- args = append(args, "-json")
-
- return tf.buildApplyCmd(ctx, c, args)
-}
-
-func (tf *Terraform) buildApplyArgs(ctx context.Context, c applyConfig) ([]string, error) {
- args := []string{"apply", "-no-color", "-auto-approve", "-input=false"}
-
- // string opts: only pass if set
- if c.backup != "" {
- args = append(args, "-backup="+c.backup)
- }
- if c.lockTimeout != "" {
- args = append(args, "-lock-timeout="+c.lockTimeout)
- }
- if c.state != "" {
- args = append(args, "-state="+c.state)
- }
- if c.stateOut != "" {
- args = append(args, "-state-out="+c.stateOut)
- }
- for _, vf := range c.varFiles {
- args = append(args, "-var-file="+vf)
- }
-
- // boolean and numerical opts: always pass
- args = append(args, "-lock="+strconv.FormatBool(c.lock))
- args = append(args, "-parallelism="+fmt.Sprint(c.parallelism))
- args = append(args, "-refresh="+strconv.FormatBool(c.refresh))
-
- if c.refreshOnly {
- err := tf.compatible(ctx, tf0_15_4, nil)
- if err != nil {
- return nil, fmt.Errorf("refresh-only option was introduced in Terraform 0.15.4: %w", err)
- }
- if !c.refresh {
- return nil, fmt.Errorf("you cannot use refresh=false in refresh-only planning mode")
- }
- args = append(args, "-refresh-only")
- }
-
- // string slice opts: split into separate args
- if c.replaceAddrs != nil {
- err := tf.compatible(ctx, tf0_15_2, nil)
- if err != nil {
- return nil, fmt.Errorf("replace option was introduced in Terraform 0.15.2: %w", err)
- }
- for _, addr := range c.replaceAddrs {
- args = append(args, "-replace="+addr)
- }
- }
- if c.destroy {
- err := tf.compatible(ctx, tf0_15_2, nil)
- if err != nil {
- return nil, fmt.Errorf("-destroy option was introduced in Terraform 0.15.2: %w", err)
- }
- args = append(args, "-destroy")
- }
-
- if c.targets != nil {
- for _, ta := range c.targets {
- args = append(args, "-target="+ta)
- }
- }
- if c.vars != nil {
- for _, v := range c.vars {
- args = append(args, "-var", v)
- }
- }
-
- if c.allowDeferral {
- // Ensure the version is later than 1.9.0
- err := tf.compatible(ctx, tf1_9_0, nil)
- if err != nil {
- return nil, fmt.Errorf("-allow-deferral is an experimental option introduced in Terraform 1.9.0: %w", err)
- }
-
- // Ensure the version has experiments enabled (alpha or dev builds)
- err = tf.experimentsEnabled(ctx)
- if err != nil {
- return nil, fmt.Errorf("-allow-deferral is only available in experimental Terraform builds: %w", err)
- }
-
- args = append(args, "-allow-deferral")
- }
-
- return args, nil
-}
-
-func (tf *Terraform) buildApplyCmd(ctx context.Context, c applyConfig, args []string) (*exec.Cmd, error) {
- // string argument: pass if set
- if c.dirOrPlan != "" {
- args = append(args, c.dirOrPlan)
- }
-
- mergeEnv := map[string]string{}
- if c.reattachInfo != nil {
- reattachStr, err := c.reattachInfo.marshalString()
- if err != nil {
- return nil, err
- }
- mergeEnv[reattachEnvVar] = reattachStr
- }
-
- return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go
deleted file mode 100644
index 5e160324..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd.go
+++ /dev/null
@@ -1,277 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "bufio"
- "bytes"
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "os/exec"
- "strings"
-
- "github.com/hashicorp/terraform-exec/internal/version"
-)
-
-const (
- checkpointDisableEnvVar = "CHECKPOINT_DISABLE"
- cliArgsEnvVar = "TF_CLI_ARGS"
- inputEnvVar = "TF_INPUT"
- automationEnvVar = "TF_IN_AUTOMATION"
- logEnvVar = "TF_LOG"
- logCoreEnvVar = "TF_LOG_CORE"
- logPathEnvVar = "TF_LOG_PATH"
- logProviderEnvVar = "TF_LOG_PROVIDER"
- reattachEnvVar = "TF_REATTACH_PROVIDERS"
- appendUserAgentEnvVar = "TF_APPEND_USER_AGENT"
- workspaceEnvVar = "TF_WORKSPACE"
- disablePluginTLSEnvVar = "TF_DISABLE_PLUGIN_TLS"
- skipProviderVerifyEnvVar = "TF_SKIP_PROVIDER_VERIFY"
-
- varEnvVarPrefix = "TF_VAR_"
- cliArgEnvVarPrefix = "TF_CLI_ARGS_"
-)
-
-var prohibitedEnvVars = []string{
- cliArgsEnvVar,
- inputEnvVar,
- automationEnvVar,
- logEnvVar,
- logCoreEnvVar,
- logPathEnvVar,
- logProviderEnvVar,
- reattachEnvVar,
- appendUserAgentEnvVar,
- workspaceEnvVar,
- disablePluginTLSEnvVar,
- skipProviderVerifyEnvVar,
-}
-
-var prohibitedEnvVarPrefixes = []string{
- varEnvVarPrefix,
- cliArgEnvVarPrefix,
-}
-
-func manualEnvVars(env map[string]string, cb func(k string)) {
- for k := range env {
- for _, p := range prohibitedEnvVars {
- if p == k {
- cb(k)
- goto NextEnvVar
- }
- }
- for _, prefix := range prohibitedEnvVarPrefixes {
- if strings.HasPrefix(k, prefix) {
- cb(k)
- goto NextEnvVar
- }
- }
- NextEnvVar:
- }
-}
-
-// ProhibitedEnv returns a slice of environment variable keys that are not allowed
-// to be set manually from the passed environment.
-func ProhibitedEnv(env map[string]string) []string {
- var p []string
- manualEnvVars(env, func(k string) {
- p = append(p, k)
- })
- return p
-}
-
-// CleanEnv removes any prohibited environment variables from an environment map.
-func CleanEnv(dirty map[string]string) map[string]string {
- clean := dirty
- manualEnvVars(clean, func(k string) {
- delete(clean, k)
- })
- return clean
-}
-
-func envMap(environ []string) map[string]string {
- env := map[string]string{}
- for _, ev := range environ {
- parts := strings.SplitN(ev, "=", 2)
- if len(parts) == 0 {
- continue
- }
- k := parts[0]
- v := ""
- if len(parts) == 2 {
- v = parts[1]
- }
- env[k] = v
- }
- return env
-}
-
-func envSlice(environ map[string]string) []string {
- env := []string{}
- for k, v := range environ {
- env = append(env, k+"="+v)
- }
- return env
-}
-
-func (tf *Terraform) buildEnv(mergeEnv map[string]string) []string {
- // set Terraform level env, if env is nil, fall back to os.Environ
- var env map[string]string
- if tf.env == nil {
- env = envMap(os.Environ())
- } else {
- env = make(map[string]string, len(tf.env))
- for k, v := range tf.env {
- env[k] = v
- }
- }
-
- // override env with any command specific environment
- for k, v := range mergeEnv {
- env[k] = v
- }
-
- // always propagate CHECKPOINT_DISABLE env var unless it is
- // explicitly overridden with tf.SetEnv or command env
- if _, ok := env[checkpointDisableEnvVar]; !ok {
- env[checkpointDisableEnvVar] = os.Getenv(checkpointDisableEnvVar)
- }
-
- // always override user agent
- ua := mergeUserAgent(
- os.Getenv(appendUserAgentEnvVar),
- tf.appendUserAgent,
- fmt.Sprintf("HashiCorp-terraform-exec/%s", version.ModuleVersion()),
- )
- env[appendUserAgentEnvVar] = ua
-
- // always override logging
- if tf.logPath == "" {
- // so logging can't pollute our stderr output
- env[logEnvVar] = ""
- env[logCoreEnvVar] = ""
- env[logPathEnvVar] = ""
- env[logProviderEnvVar] = ""
- } else {
- env[logEnvVar] = tf.log
- env[logCoreEnvVar] = tf.logCore
- env[logPathEnvVar] = tf.logPath
- env[logProviderEnvVar] = tf.logProvider
- }
-
- // constant automation override env vars
- env[automationEnvVar] = "1"
-
- // force usage of workspace methods for switching
- delete(env, workspaceEnvVar)
-
- if tf.disablePluginTLS {
- env[disablePluginTLSEnvVar] = "1"
- }
-
- if tf.skipProviderVerify {
- env[skipProviderVerifyEnvVar] = "1"
- }
-
- return envSlice(env)
-}
-
-func (tf *Terraform) buildTerraformCmd(ctx context.Context, mergeEnv map[string]string, args ...string) *exec.Cmd {
- cmd := exec.CommandContext(ctx, tf.execPath, args...)
-
- cmd.Env = tf.buildEnv(mergeEnv)
- cmd.Dir = tf.workingDir
-
- tf.logger.Printf("[INFO] running Terraform command: %s", cmd.String())
-
- return cmd
-}
-
-func (tf *Terraform) runTerraformCmdJSON(ctx context.Context, cmd *exec.Cmd, v interface{}) error {
- var outbuf = bytes.Buffer{}
- cmd.Stdout = mergeWriters(cmd.Stdout, &outbuf)
-
- err := tf.runTerraformCmd(ctx, cmd)
- if err != nil {
- return err
- }
-
- dec := json.NewDecoder(&outbuf)
- dec.UseNumber()
- return dec.Decode(v)
-}
-
-// mergeUserAgent does some minor deduplication to ensure we aren't
-// just using the same append string over and over.
-func mergeUserAgent(uas ...string) string {
- included := map[string]bool{}
- merged := []string{}
- for _, ua := range uas {
- ua = strings.TrimSpace(ua)
-
- if ua == "" {
- continue
- }
- if included[ua] {
- continue
- }
- included[ua] = true
- merged = append(merged, ua)
- }
- return strings.Join(merged, " ")
-}
-
-func mergeWriters(writers ...io.Writer) io.Writer {
- compact := []io.Writer{}
- for _, w := range writers {
- if w != nil {
- compact = append(compact, w)
- }
- }
- if len(compact) == 0 {
- return ioutil.Discard
- }
- if len(compact) == 1 {
- return compact[0]
- }
- return io.MultiWriter(compact...)
-}
-
-func writeOutput(ctx context.Context, r io.ReadCloser, w io.Writer) error {
- // ReadBytes will block until bytes are read, which can cause a delay in
- // returning even if the command's context has been canceled. Use a separate
- // goroutine to prompt ReadBytes to return on cancel
- closeCtx, closeCancel := context.WithCancel(ctx)
- defer closeCancel()
- go func() {
- select {
- case <-ctx.Done():
- r.Close()
- case <-closeCtx.Done():
- return
- }
- }()
-
- buf := bufio.NewReader(r)
- for {
- line, err := buf.ReadBytes('\n')
- if len(line) > 0 {
- if _, err := w.Write(line); err != nil {
- return err
- }
- }
- if err != nil {
- if errors.Is(err, io.EOF) {
- return nil
- }
-
- return err
- }
- }
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go
deleted file mode 100644
index 3af11c81..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_default.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-//go:build !linux
-// +build !linux
-
-package tfexec
-
-import (
- "context"
- "fmt"
- "os/exec"
- "strings"
- "sync"
-)
-
-func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error {
- var errBuf strings.Builder
-
- // check for early cancellation
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- // Read stdout / stderr logs from pipe instead of setting cmd.Stdout and
- // cmd.Stderr because it can cause hanging when killing the command
- // https://github.com/golang/go/issues/23019
- stdoutWriter := mergeWriters(cmd.Stdout, tf.stdout)
- stderrWriter := mergeWriters(tf.stderr, &errBuf)
-
- cmd.Stderr = nil
- cmd.Stdout = nil
-
- stdoutPipe, err := cmd.StdoutPipe()
- if err != nil {
- return err
- }
-
- stderrPipe, err := cmd.StderrPipe()
- if err != nil {
- return err
- }
-
- err = cmd.Start()
- if ctx.Err() != nil {
- return cmdErr{
- err: err,
- ctxErr: ctx.Err(),
- }
- }
- if err != nil {
- return err
- }
-
- var errStdout, errStderr error
- var wg sync.WaitGroup
- wg.Add(1)
- go func() {
- defer wg.Done()
- errStdout = writeOutput(ctx, stdoutPipe, stdoutWriter)
- }()
-
- wg.Add(1)
- go func() {
- defer wg.Done()
- errStderr = writeOutput(ctx, stderrPipe, stderrWriter)
- }()
-
- // Reads from pipes must be completed before calling cmd.Wait(). Otherwise
- // can cause a race condition
- wg.Wait()
-
- err = cmd.Wait()
- if ctx.Err() != nil {
- return cmdErr{
- err: err,
- ctxErr: ctx.Err(),
- }
- }
- if err != nil {
- return fmt.Errorf("%w\n%s", err, errBuf.String())
- }
-
- // Return error if there was an issue reading the std out/err
- if errStdout != nil && ctx.Err() != nil {
- return fmt.Errorf("%w\n%s", errStdout, errBuf.String())
- }
- if errStderr != nil && ctx.Err() != nil {
- return fmt.Errorf("%w\n%s", errStderr, errBuf.String())
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go
deleted file mode 100644
index 0565372c..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/cmd_linux.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "fmt"
- "os/exec"
- "strings"
- "sync"
- "syscall"
-)
-
-func (tf *Terraform) runTerraformCmd(ctx context.Context, cmd *exec.Cmd) error {
- var errBuf strings.Builder
-
- cmd.SysProcAttr = &syscall.SysProcAttr{
- // kill children if parent is dead
- Pdeathsig: syscall.SIGKILL,
- // set process group ID
- Setpgid: true,
- }
-
- // check for early cancellation
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- // Read stdout / stderr logs from pipe instead of setting cmd.Stdout and
- // cmd.Stderr because it can cause hanging when killing the command
- // https://github.com/golang/go/issues/23019
- stdoutWriter := mergeWriters(cmd.Stdout, tf.stdout)
- stderrWriter := mergeWriters(tf.stderr, &errBuf)
-
- cmd.Stderr = nil
- cmd.Stdout = nil
-
- stdoutPipe, err := cmd.StdoutPipe()
- if err != nil {
- return err
- }
-
- stderrPipe, err := cmd.StderrPipe()
- if err != nil {
- return err
- }
-
- err = cmd.Start()
- if ctx.Err() != nil {
- return cmdErr{
- err: err,
- ctxErr: ctx.Err(),
- }
- }
- if err != nil {
- return err
- }
-
- var errStdout, errStderr error
- var wg sync.WaitGroup
- wg.Add(1)
- go func() {
- defer wg.Done()
- errStdout = writeOutput(ctx, stdoutPipe, stdoutWriter)
- }()
-
- wg.Add(1)
- go func() {
- defer wg.Done()
- errStderr = writeOutput(ctx, stderrPipe, stderrWriter)
- }()
-
- // Reads from pipes must be completed before calling cmd.Wait(). Otherwise
- // can cause a race condition
- wg.Wait()
-
- err = cmd.Wait()
- if ctx.Err() != nil {
- return cmdErr{
- err: err,
- ctxErr: ctx.Err(),
- }
- }
- if err != nil {
- return fmt.Errorf("%w\n%s", err, errBuf.String())
- }
-
- // Return error if there was an issue reading the std out/err
- if errStdout != nil && ctx.Err() != nil {
- return fmt.Errorf("%w\n%s", errStdout, errBuf.String())
- }
- if errStderr != nil && ctx.Err() != nil {
- return fmt.Errorf("%w\n%s", errStderr, errBuf.String())
- }
-
- return nil
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/destroy.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/destroy.go
deleted file mode 100644
index dbef8b37..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/destroy.go
+++ /dev/null
@@ -1,204 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "fmt"
- "io"
- "os/exec"
- "strconv"
-)
-
-type destroyConfig struct {
- backup string
- dir string
- lock bool
-
- // LockTimeout must be a string with time unit, e.g. '10s'
- lockTimeout string
- parallelism int
- reattachInfo ReattachInfo
- refresh bool
- state string
- stateOut string
- targets []string
-
- // Vars: each var must be supplied as a single string, e.g. 'foo=bar'
- vars []string
- varFiles []string
-}
-
-var defaultDestroyOptions = destroyConfig{
- lock: true,
- lockTimeout: "0s",
- parallelism: 10,
- refresh: true,
-}
-
-// DestroyOption represents options used in the Destroy method.
-type DestroyOption interface {
- configureDestroy(*destroyConfig)
-}
-
-func (opt *DirOption) configureDestroy(conf *destroyConfig) {
- conf.dir = opt.path
-}
-
-func (opt *ParallelismOption) configureDestroy(conf *destroyConfig) {
- conf.parallelism = opt.parallelism
-}
-
-func (opt *BackupOption) configureDestroy(conf *destroyConfig) {
- conf.backup = opt.path
-}
-
-func (opt *TargetOption) configureDestroy(conf *destroyConfig) {
- conf.targets = append(conf.targets, opt.target)
-}
-
-func (opt *LockTimeoutOption) configureDestroy(conf *destroyConfig) {
- conf.lockTimeout = opt.timeout
-}
-
-func (opt *StateOption) configureDestroy(conf *destroyConfig) {
- conf.state = opt.path
-}
-
-func (opt *StateOutOption) configureDestroy(conf *destroyConfig) {
- conf.stateOut = opt.path
-}
-
-func (opt *VarFileOption) configureDestroy(conf *destroyConfig) {
- conf.varFiles = append(conf.varFiles, opt.path)
-}
-
-func (opt *LockOption) configureDestroy(conf *destroyConfig) {
- conf.lock = opt.lock
-}
-
-func (opt *RefreshOption) configureDestroy(conf *destroyConfig) {
- conf.refresh = opt.refresh
-}
-
-func (opt *VarOption) configureDestroy(conf *destroyConfig) {
- conf.vars = append(conf.vars, opt.assignment)
-}
-
-func (opt *ReattachOption) configureDestroy(conf *destroyConfig) {
- conf.reattachInfo = opt.info
-}
-
-// Destroy represents the terraform destroy subcommand.
-func (tf *Terraform) Destroy(ctx context.Context, opts ...DestroyOption) error {
- cmd, err := tf.destroyCmd(ctx, opts...)
- if err != nil {
- return err
- }
- return tf.runTerraformCmd(ctx, cmd)
-}
-
-// DestroyJSON represents the terraform destroy subcommand with the `-json` flag.
-// Using the `-json` flag will result in
-// [machine-readable](https://developer.hashicorp.com/terraform/internals/machine-readable-ui)
-// JSON being written to the supplied `io.Writer`. DestroyJSON is likely to be
-// removed in a future major version in favour of Destroy returning JSON by default.
-func (tf *Terraform) DestroyJSON(ctx context.Context, w io.Writer, opts ...DestroyOption) error {
- err := tf.compatible(ctx, tf0_15_3, nil)
- if err != nil {
- return fmt.Errorf("terraform destroy -json was added in 0.15.3: %w", err)
- }
-
- tf.SetStdout(w)
-
- cmd, err := tf.destroyJSONCmd(ctx, opts...)
- if err != nil {
- return err
- }
-
- return tf.runTerraformCmd(ctx, cmd)
-}
-
-func (tf *Terraform) destroyCmd(ctx context.Context, opts ...DestroyOption) (*exec.Cmd, error) {
- c := defaultDestroyOptions
-
- for _, o := range opts {
- o.configureDestroy(&c)
- }
-
- args := tf.buildDestroyArgs(c)
-
- return tf.buildDestroyCmd(ctx, c, args)
-}
-
-func (tf *Terraform) destroyJSONCmd(ctx context.Context, opts ...DestroyOption) (*exec.Cmd, error) {
- c := defaultDestroyOptions
-
- for _, o := range opts {
- o.configureDestroy(&c)
- }
-
- args := tf.buildDestroyArgs(c)
- args = append(args, "-json")
-
- return tf.buildDestroyCmd(ctx, c, args)
-}
-
-func (tf *Terraform) buildDestroyArgs(c destroyConfig) []string {
- args := []string{"destroy", "-no-color", "-auto-approve", "-input=false"}
-
- // string opts: only pass if set
- if c.backup != "" {
- args = append(args, "-backup="+c.backup)
- }
- if c.lockTimeout != "" {
- args = append(args, "-lock-timeout="+c.lockTimeout)
- }
- if c.state != "" {
- args = append(args, "-state="+c.state)
- }
- if c.stateOut != "" {
- args = append(args, "-state-out="+c.stateOut)
- }
- for _, vf := range c.varFiles {
- args = append(args, "-var-file="+vf)
- }
-
- // boolean and numerical opts: always pass
- args = append(args, "-lock="+strconv.FormatBool(c.lock))
- args = append(args, "-parallelism="+fmt.Sprint(c.parallelism))
- args = append(args, "-refresh="+strconv.FormatBool(c.refresh))
-
- // string slice opts: split into separate args
- if c.targets != nil {
- for _, ta := range c.targets {
- args = append(args, "-target="+ta)
- }
- }
- if c.vars != nil {
- for _, v := range c.vars {
- args = append(args, "-var", v)
- }
- }
-
- return args
-}
-
-func (tf *Terraform) buildDestroyCmd(ctx context.Context, c destroyConfig, args []string) (*exec.Cmd, error) {
- // optional positional argument
- if c.dir != "" {
- args = append(args, c.dir)
- }
-
- mergeEnv := map[string]string{}
- if c.reattachInfo != nil {
- reattachStr, err := c.reattachInfo.marshalString()
- if err != nil {
- return nil, err
- }
- mergeEnv[reattachEnvVar] = reattachStr
- }
-
- return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/doc.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/doc.go
deleted file mode 100644
index 288476f5..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/doc.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-// Package tfexec exposes functionality for constructing and running Terraform
-// CLI commands. Structured return values use the data types defined in the
-// github.com/hashicorp/terraform-json package.
-package tfexec
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go
deleted file mode 100644
index c6645e8b..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/errors.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "fmt"
-)
-
-// this file contains non-parsed exported errors
-
-type ErrNoSuitableBinary struct {
- err error
-}
-
-func (e *ErrNoSuitableBinary) Error() string {
- return fmt.Sprintf("no suitable terraform binary could be found: %s", e.err.Error())
-}
-
-func (e *ErrNoSuitableBinary) Unwrap() error {
- return e.err
-}
-
-// ErrVersionMismatch is returned when the detected Terraform version is not compatible with the
-// command or flags being used in this invocation.
-type ErrVersionMismatch struct {
- MinInclusive string
- MaxExclusive string
- Actual string
-}
-
-func (e *ErrVersionMismatch) Error() string {
- return fmt.Sprintf("unexpected version %s (min: %s, max: %s)", e.Actual, e.MinInclusive, e.MaxExclusive)
-}
-
-// ErrManualEnvVar is returned when an env var that should be set programatically via an option or method
-// is set via the manual environment passing functions.
-type ErrManualEnvVar struct {
- Name string
-}
-
-func (err *ErrManualEnvVar) Error() string {
- return fmt.Sprintf("manual setting of env var %q detected", err.Name)
-}
-
-// cmdErr is a custom error type to be returned when a cmd exits with a context
-// error such as context.Canceled or context.DeadlineExceeded.
-// The type is specifically designed to respond true to errors.Is for these two
-// errors.
-// See https://github.com/golang/go/issues/21880 for why this is necessary.
-type cmdErr struct {
- err error
- ctxErr error
-}
-
-func (e cmdErr) Is(target error) bool {
- switch target {
- case context.DeadlineExceeded, context.Canceled:
- return e.ctxErr == context.DeadlineExceeded || e.ctxErr == context.Canceled
- }
- return false
-}
-
-func (e cmdErr) Error() string {
- return e.err.Error()
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/fmt.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/fmt.go
deleted file mode 100644
index 09794923..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/fmt.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "fmt"
- "io"
- "os/exec"
- "path/filepath"
- "strings"
-)
-
-type formatConfig struct {
- recursive bool
- dir string
-}
-
-var defaultFormatConfig = formatConfig{
- recursive: false,
-}
-
-type FormatOption interface {
- configureFormat(*formatConfig)
-}
-
-func (opt *RecursiveOption) configureFormat(conf *formatConfig) {
- conf.recursive = opt.recursive
-}
-
-func (opt *DirOption) configureFormat(conf *formatConfig) {
- conf.dir = opt.path
-}
-
-// FormatString formats a passed string, given a path to Terraform.
-func FormatString(ctx context.Context, execPath string, content string) (string, error) {
- tf, err := NewTerraform(filepath.Dir(execPath), execPath)
- if err != nil {
- return "", err
- }
-
- return tf.FormatString(ctx, content)
-}
-
-// FormatString formats a passed string.
-func (tf *Terraform) FormatString(ctx context.Context, content string) (string, error) {
- in := strings.NewReader(content)
- var outBuf strings.Builder
- err := tf.Format(ctx, in, &outBuf)
- if err != nil {
- return "", err
- }
- return outBuf.String(), nil
-}
-
-// Format performs formatting on the unformatted io.Reader (as stdin to the CLI) and returns
-// the formatted result on the formatted io.Writer.
-func (tf *Terraform) Format(ctx context.Context, unformatted io.Reader, formatted io.Writer) error {
- cmd, err := tf.formatCmd(ctx, nil, Dir("-"))
- if err != nil {
- return err
- }
-
- cmd.Stdin = unformatted
- cmd.Stdout = mergeWriters(cmd.Stdout, formatted)
-
- return tf.runTerraformCmd(ctx, cmd)
-}
-
-// FormatWrite attempts to format and modify all config files in the working or selected (via DirOption) directory.
-func (tf *Terraform) FormatWrite(ctx context.Context, opts ...FormatOption) error {
- for _, o := range opts {
- switch o := o.(type) {
- case *DirOption:
- if o.path == "-" {
- return fmt.Errorf("a path of \"-\" is not supported for this method, please use FormatString")
- }
- }
- }
-
- cmd, err := tf.formatCmd(ctx, []string{"-write=true", "-list=false", "-diff=false"}, opts...)
- if err != nil {
- return err
- }
-
- return tf.runTerraformCmd(ctx, cmd)
-}
-
-// FormatCheck returns true if the config files in the working or selected (via DirOption) directory are already formatted.
-func (tf *Terraform) FormatCheck(ctx context.Context, opts ...FormatOption) (bool, []string, error) {
- for _, o := range opts {
- switch o := o.(type) {
- case *DirOption:
- if o.path == "-" {
- return false, nil, fmt.Errorf("a path of \"-\" is not supported for this method, please use FormatString")
- }
- }
- }
-
- cmd, err := tf.formatCmd(ctx, []string{"-write=false", "-list=true", "-diff=false", "-check=true"}, opts...)
- if err != nil {
- return false, nil, err
- }
-
- var outBuf strings.Builder
- cmd.Stdout = mergeWriters(cmd.Stdout, &outBuf)
-
- err = tf.runTerraformCmd(ctx, cmd)
- if err == nil {
- return true, nil, nil
- }
- if cmd.ProcessState.ExitCode() == 3 {
- // unformatted, parse the file list
-
- files := []string{}
- lines := strings.Split(strings.Replace(outBuf.String(), "\r\n", "\n", -1), "\n")
- for _, l := range lines {
- l = strings.TrimSpace(l)
- if l == "" {
- continue
- }
- files = append(files, l)
- }
-
- return false, files, nil
- }
- return false, nil, err
-}
-
-func (tf *Terraform) formatCmd(ctx context.Context, args []string, opts ...FormatOption) (*exec.Cmd, error) {
- err := tf.compatible(ctx, tf0_7_7, nil)
- if err != nil {
- return nil, fmt.Errorf("fmt was first introduced in Terraform 0.7.7: %w", err)
- }
-
- c := defaultFormatConfig
-
- for _, o := range opts {
- switch o.(type) {
- case *RecursiveOption:
- err := tf.compatible(ctx, tf0_12_0, nil)
- if err != nil {
- return nil, fmt.Errorf("-recursive was added to fmt in Terraform 0.12: %w", err)
- }
- }
-
- o.configureFormat(&c)
- }
-
- args = append([]string{"fmt", "-no-color"}, args...)
-
- if c.recursive {
- args = append(args, "-recursive")
- }
-
- if c.dir != "" {
- args = append(args, c.dir)
- }
-
- return tf.buildTerraformCmd(ctx, nil, args...), nil
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/force_unlock.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/force_unlock.go
deleted file mode 100644
index 7d74a12f..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/force_unlock.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "fmt"
- "os/exec"
-)
-
-type forceUnlockConfig struct {
- dir string
-}
-
-var defaultForceUnlockOptions = forceUnlockConfig{}
-
-type ForceUnlockOption interface {
- configureForceUnlock(*forceUnlockConfig)
-}
-
-func (opt *DirOption) configureForceUnlock(conf *forceUnlockConfig) {
- conf.dir = opt.path
-}
-
-// ForceUnlock represents the `terraform force-unlock` command
-func (tf *Terraform) ForceUnlock(ctx context.Context, lockID string, opts ...ForceUnlockOption) error {
- unlockCmd, err := tf.forceUnlockCmd(ctx, lockID, opts...)
- if err != nil {
- return err
- }
-
- if err := tf.runTerraformCmd(ctx, unlockCmd); err != nil {
- return err
- }
-
- return nil
-}
-
-func (tf *Terraform) forceUnlockCmd(ctx context.Context, lockID string, opts ...ForceUnlockOption) (*exec.Cmd, error) {
- c := defaultForceUnlockOptions
-
- for _, o := range opts {
- o.configureForceUnlock(&c)
- }
- args := []string{"force-unlock", "-no-color", "-force"}
-
- // positional arguments
- args = append(args, lockID)
-
- // optional positional arguments
- if c.dir != "" {
- err := tf.compatible(ctx, nil, tf0_15_0)
- if err != nil {
- return nil, fmt.Errorf("[DIR] option was removed in Terraform v0.15.0")
- }
- args = append(args, c.dir)
- }
-
- return tf.buildTerraformCmd(ctx, nil, args...), nil
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/get.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/get.go
deleted file mode 100644
index 8a1363b5..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/get.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "fmt"
- "os/exec"
-)
-
-type getCmdConfig struct {
- dir string
- update bool
-}
-
-// GetCmdOption represents options used in the Get method.
-type GetCmdOption interface {
- configureGet(*getCmdConfig)
-}
-
-func (opt *DirOption) configureGet(conf *getCmdConfig) {
- conf.dir = opt.path
-}
-
-func (opt *UpdateOption) configureGet(conf *getCmdConfig) {
- conf.update = opt.update
-}
-
-// Get represents the terraform get subcommand.
-func (tf *Terraform) Get(ctx context.Context, opts ...GetCmdOption) error {
- cmd, err := tf.getCmd(ctx, opts...)
- if err != nil {
- return err
- }
- return tf.runTerraformCmd(ctx, cmd)
-}
-
-func (tf *Terraform) getCmd(ctx context.Context, opts ...GetCmdOption) (*exec.Cmd, error) {
- c := getCmdConfig{}
-
- for _, o := range opts {
- o.configureGet(&c)
- }
-
- args := []string{"get", "-no-color"}
-
- args = append(args, "-update="+fmt.Sprint(c.update))
-
- if c.dir != "" {
- args = append(args, c.dir)
- }
-
- return tf.buildTerraformCmd(ctx, nil, args...), nil
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/graph.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/graph.go
deleted file mode 100644
index 0f8b0eee..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/graph.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "fmt"
- "os/exec"
- "strings"
-)
-
-type graphConfig struct {
- plan string
- drawCycles bool
- graphType string
-}
-
-var defaultGraphOptions = graphConfig{}
-
-type GraphOption interface {
- configureGraph(*graphConfig)
-}
-
-func (opt *GraphPlanOption) configureGraph(conf *graphConfig) {
- conf.plan = opt.file
-}
-
-func (opt *DrawCyclesOption) configureGraph(conf *graphConfig) {
- conf.drawCycles = opt.drawCycles
-}
-
-func (opt *GraphTypeOption) configureGraph(conf *graphConfig) {
- conf.graphType = opt.graphType
-}
-
-func (tf *Terraform) Graph(ctx context.Context, opts ...GraphOption) (string, error) {
- graphCmd, err := tf.graphCmd(ctx, opts...)
- if err != nil {
- return "", err
- }
- var outBuf strings.Builder
- graphCmd.Stdout = &outBuf
- err = tf.runTerraformCmd(ctx, graphCmd)
- if err != nil {
- return "", err
- }
-
- return outBuf.String(), nil
-
-}
-
-func (tf *Terraform) graphCmd(ctx context.Context, opts ...GraphOption) (*exec.Cmd, error) {
- c := defaultGraphOptions
-
- for _, o := range opts {
- o.configureGraph(&c)
- }
-
- args := []string{"graph"}
-
- if c.plan != "" {
- // plan was a positional argument prior to Terraform 0.15.0. Ensure proper use by checking version.
- if err := tf.compatible(ctx, tf0_15_0, nil); err == nil {
- args = append(args, "-plan="+c.plan)
- } else {
- args = append(args, c.plan)
- }
- }
-
- if c.drawCycles {
- err := tf.compatible(ctx, tf0_5_0, nil)
- if err != nil {
- return nil, fmt.Errorf("-draw-cycles was first introduced in Terraform 0.5.0: %w", err)
- }
- args = append(args, "-draw-cycles")
- }
-
- if c.graphType != "" {
- err := tf.compatible(ctx, tf0_8_0, nil)
- if err != nil {
- return nil, fmt.Errorf("-graph-type was first introduced in Terraform 0.8.0: %w", err)
- }
- args = append(args, "-type="+c.graphType)
- }
-
- return tf.buildTerraformCmd(ctx, nil, args...), nil
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/import.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/import.go
deleted file mode 100644
index 67275dfa..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/import.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "os/exec"
- "strconv"
-)
-
-type importConfig struct {
- addr string
- id string
- backup string
- config string
- allowMissingConfig bool
- lock bool
- lockTimeout string
- reattachInfo ReattachInfo
- state string
- stateOut string
- vars []string
- varFiles []string
-}
-
-var defaultImportOptions = importConfig{
- allowMissingConfig: false,
- lock: true,
- lockTimeout: "0s",
-}
-
-// ImportOption represents options used in the Import method.
-type ImportOption interface {
- configureImport(*importConfig)
-}
-
-func (opt *BackupOption) configureImport(conf *importConfig) {
- conf.backup = opt.path
-}
-
-func (opt *ConfigOption) configureImport(conf *importConfig) {
- conf.config = opt.path
-}
-
-func (opt *AllowMissingConfigOption) configureImport(conf *importConfig) {
- conf.allowMissingConfig = opt.allowMissingConfig
-}
-
-func (opt *LockOption) configureImport(conf *importConfig) {
- conf.lock = opt.lock
-}
-
-func (opt *LockTimeoutOption) configureImport(conf *importConfig) {
- conf.lockTimeout = opt.timeout
-}
-
-func (opt *ReattachOption) configureImport(conf *importConfig) {
- conf.reattachInfo = opt.info
-}
-
-func (opt *StateOption) configureImport(conf *importConfig) {
- conf.state = opt.path
-}
-
-func (opt *StateOutOption) configureImport(conf *importConfig) {
- conf.stateOut = opt.path
-}
-
-func (opt *VarOption) configureImport(conf *importConfig) {
- conf.vars = append(conf.vars, opt.assignment)
-}
-
-func (opt *VarFileOption) configureImport(conf *importConfig) {
- conf.varFiles = append(conf.varFiles, opt.path)
-}
-
-// Import represents the terraform import subcommand.
-func (tf *Terraform) Import(ctx context.Context, address, id string, opts ...ImportOption) error {
- cmd, err := tf.importCmd(ctx, address, id, opts...)
- if err != nil {
- return err
- }
- return tf.runTerraformCmd(ctx, cmd)
-}
-
-func (tf *Terraform) importCmd(ctx context.Context, address, id string, opts ...ImportOption) (*exec.Cmd, error) {
- c := defaultImportOptions
-
- for _, o := range opts {
- o.configureImport(&c)
- }
-
- args := []string{"import", "-no-color", "-input=false"}
-
- // string opts: only pass if set
- if c.backup != "" {
- args = append(args, "-backup="+c.backup)
- }
- if c.config != "" {
- args = append(args, "-config="+c.config)
- }
- if c.lockTimeout != "" {
- args = append(args, "-lock-timeout="+c.lockTimeout)
- }
- if c.state != "" {
- args = append(args, "-state="+c.state)
- }
- if c.stateOut != "" {
- args = append(args, "-state-out="+c.stateOut)
- }
- for _, vf := range c.varFiles {
- args = append(args, "-var-file="+vf)
- }
-
- // boolean and numerical opts: always pass
- args = append(args, "-lock="+strconv.FormatBool(c.lock))
-
- // unary flags: pass if true
- if c.allowMissingConfig {
- args = append(args, "-allow-missing-config")
- }
-
- // string slice opts: split into separate args
- if c.vars != nil {
- for _, v := range c.vars {
- args = append(args, "-var", v)
- }
- }
-
- // required args, always pass
- args = append(args, address, id)
-
- mergeEnv := map[string]string{}
- if c.reattachInfo != nil {
- reattachStr, err := c.reattachInfo.marshalString()
- if err != nil {
- return nil, err
- }
- mergeEnv[reattachEnvVar] = reattachStr
- }
-
- return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/init.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/init.go
deleted file mode 100644
index c292fdc0..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/init.go
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "fmt"
- "os/exec"
-)
-
-type initConfig struct {
- backend bool
- backendConfig []string
- dir string
- forceCopy bool
- fromModule string
- get bool
- getPlugins bool
- lock bool
- lockTimeout string
- pluginDir []string
- reattachInfo ReattachInfo
- reconfigure bool
- upgrade bool
- verifyPlugins bool
-}
-
-var defaultInitOptions = initConfig{
- backend: true,
- forceCopy: false,
- get: true,
- getPlugins: true,
- lock: true,
- lockTimeout: "0s",
- reconfigure: false,
- upgrade: false,
- verifyPlugins: true,
-}
-
-// InitOption represents options used in the Init method.
-type InitOption interface {
- configureInit(*initConfig)
-}
-
-func (opt *BackendOption) configureInit(conf *initConfig) {
- conf.backend = opt.backend
-}
-
-func (opt *BackendConfigOption) configureInit(conf *initConfig) {
- conf.backendConfig = append(conf.backendConfig, opt.path)
-}
-
-func (opt *DirOption) configureInit(conf *initConfig) {
- conf.dir = opt.path
-}
-
-func (opt *ForceCopyOption) configureInit(conf *initConfig) {
- conf.forceCopy = opt.forceCopy
-}
-
-func (opt *FromModuleOption) configureInit(conf *initConfig) {
- conf.fromModule = opt.source
-}
-
-func (opt *GetOption) configureInit(conf *initConfig) {
- conf.get = opt.get
-}
-
-func (opt *GetPluginsOption) configureInit(conf *initConfig) {
- conf.getPlugins = opt.getPlugins
-}
-
-func (opt *LockOption) configureInit(conf *initConfig) {
- conf.lock = opt.lock
-}
-
-func (opt *LockTimeoutOption) configureInit(conf *initConfig) {
- conf.lockTimeout = opt.timeout
-}
-
-func (opt *PluginDirOption) configureInit(conf *initConfig) {
- conf.pluginDir = append(conf.pluginDir, opt.pluginDir)
-}
-
-func (opt *ReattachOption) configureInit(conf *initConfig) {
- conf.reattachInfo = opt.info
-}
-
-func (opt *ReconfigureOption) configureInit(conf *initConfig) {
- conf.reconfigure = opt.reconfigure
-}
-
-func (opt *UpgradeOption) configureInit(conf *initConfig) {
- conf.upgrade = opt.upgrade
-}
-
-func (opt *VerifyPluginsOption) configureInit(conf *initConfig) {
- conf.verifyPlugins = opt.verifyPlugins
-}
-
-// Init represents the terraform init subcommand.
-func (tf *Terraform) Init(ctx context.Context, opts ...InitOption) error {
- cmd, err := tf.initCmd(ctx, opts...)
- if err != nil {
- return err
- }
- return tf.runTerraformCmd(ctx, cmd)
-}
-
-func (tf *Terraform) initCmd(ctx context.Context, opts ...InitOption) (*exec.Cmd, error) {
- c := defaultInitOptions
-
- for _, o := range opts {
- switch o.(type) {
- case *LockOption, *LockTimeoutOption, *VerifyPluginsOption, *GetPluginsOption:
- err := tf.compatible(ctx, nil, tf0_15_0)
- if err != nil {
- return nil, fmt.Errorf("-lock, -lock-timeout, -verify-plugins, and -get-plugins options are no longer available as of Terraform 0.15: %w", err)
- }
- }
-
- o.configureInit(&c)
- }
-
- args := []string{"init", "-no-color", "-input=false"}
-
- // string opts: only pass if set
- if c.fromModule != "" {
- args = append(args, "-from-module="+c.fromModule)
- }
-
- // string opts removed in 0.15: pass if set and <0.15
- err := tf.compatible(ctx, nil, tf0_15_0)
- if err == nil {
- if c.lockTimeout != "" {
- args = append(args, "-lock-timeout="+c.lockTimeout)
- }
- }
-
- // boolean opts: always pass
- args = append(args, "-backend="+fmt.Sprint(c.backend))
- args = append(args, "-get="+fmt.Sprint(c.get))
- args = append(args, "-upgrade="+fmt.Sprint(c.upgrade))
-
- // boolean opts removed in 0.15: pass if <0.15
- err = tf.compatible(ctx, nil, tf0_15_0)
- if err == nil {
- args = append(args, "-lock="+fmt.Sprint(c.lock))
- args = append(args, "-get-plugins="+fmt.Sprint(c.getPlugins))
- args = append(args, "-verify-plugins="+fmt.Sprint(c.verifyPlugins))
- }
-
- if c.forceCopy {
- args = append(args, "-force-copy")
- }
-
- // unary flags: pass if true
- if c.reconfigure {
- args = append(args, "-reconfigure")
- }
-
- // string slice opts: split into separate args
- if c.backendConfig != nil {
- for _, bc := range c.backendConfig {
- args = append(args, "-backend-config="+bc)
- }
- }
- if c.pluginDir != nil {
- for _, pd := range c.pluginDir {
- args = append(args, "-plugin-dir="+pd)
- }
- }
-
- // optional positional argument
- if c.dir != "" {
- args = append(args, c.dir)
- }
-
- mergeEnv := map[string]string{}
- if c.reattachInfo != nil {
- reattachStr, err := c.reattachInfo.marshalString()
- if err != nil {
- return nil, err
- }
- mergeEnv[reattachEnvVar] = reattachStr
- }
-
- return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/metadata_functions.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/metadata_functions.go
deleted file mode 100644
index 0e642b2d..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/metadata_functions.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "fmt"
- "os/exec"
-
- tfjson "github.com/hashicorp/terraform-json"
-)
-
-// MetadataFunctions represents the terraform metadata functions -json subcommand.
-func (tf *Terraform) MetadataFunctions(ctx context.Context) (*tfjson.MetadataFunctions, error) {
- err := tf.compatible(ctx, tf1_4_0, nil)
- if err != nil {
- return nil, fmt.Errorf("terraform metadata functions was added in 1.4.0: %w", err)
- }
-
- functionsCmd := tf.metadataFunctionsCmd(ctx)
-
- var ret tfjson.MetadataFunctions
- err = tf.runTerraformCmdJSON(ctx, functionsCmd, &ret)
- if err != nil {
- return nil, err
- }
-
- return &ret, nil
-}
-
-func (tf *Terraform) metadataFunctionsCmd(ctx context.Context, args ...string) *exec.Cmd {
- allArgs := []string{"metadata", "functions", "-json"}
- allArgs = append(allArgs, args...)
-
- return tf.buildTerraformCmd(ctx, nil, allArgs...)
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go
deleted file mode 100644
index 339bf39e..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/options.go
+++ /dev/null
@@ -1,452 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "encoding/json"
-)
-
-// AllowDeferralOption represents the -allow-deferral flag. This flag is only enabled in
-// experimental builds of Terraform. (alpha or built via source with experiments enabled)
-type AllowDeferralOption struct {
- allowDeferral bool
-}
-
-// AllowDeferral represents the -allow-deferral flag. This flag is only enabled in
-// experimental builds of Terraform. (alpha or built via source with experiments enabled)
-func AllowDeferral(allowDeferral bool) *AllowDeferralOption {
- return &AllowDeferralOption{allowDeferral}
-}
-
-// AllowMissingConfigOption represents the -allow-missing-config flag.
-type AllowMissingConfigOption struct {
- allowMissingConfig bool
-}
-
-// AllowMissingConfig represents the -allow-missing-config flag.
-func AllowMissingConfig(allowMissingConfig bool) *AllowMissingConfigOption {
- return &AllowMissingConfigOption{allowMissingConfig}
-}
-
-// AllowMissingOption represents the -allow-missing flag.
-type AllowMissingOption struct {
- allowMissing bool
-}
-
-// AllowMissing represents the -allow-missing flag.
-func AllowMissing(allowMissing bool) *AllowMissingOption {
- return &AllowMissingOption{allowMissing}
-}
-
-// BackendOption represents the -backend flag.
-type BackendOption struct {
- backend bool
-}
-
-// Backend represents the -backend flag.
-func Backend(backend bool) *BackendOption {
- return &BackendOption{backend}
-}
-
-// BackendConfigOption represents the -backend-config flag.
-type BackendConfigOption struct {
- path string
-}
-
-// BackendConfig represents the -backend-config flag.
-func BackendConfig(backendConfig string) *BackendConfigOption {
- return &BackendConfigOption{backendConfig}
-}
-
-type BackupOutOption struct {
- path string
-}
-
-// BackupOutOption represents the -backup-out flag.
-func BackupOut(path string) *BackupOutOption {
- return &BackupOutOption{path}
-}
-
-// BackupOption represents the -backup flag.
-type BackupOption struct {
- path string
-}
-
-// Backup represents the -backup flag.
-func Backup(path string) *BackupOption {
- return &BackupOption{path}
-}
-
-// DisableBackup is a convenience method for Backup("-"), indicating backup state should be disabled.
-func DisableBackup() *BackupOption {
- return &BackupOption{"-"}
-}
-
-// ConfigOption represents the -config flag.
-type ConfigOption struct {
- path string
-}
-
-// Config represents the -config flag.
-func Config(path string) *ConfigOption {
- return &ConfigOption{path}
-}
-
-// CopyStateOption represents the -state flag for terraform workspace new. This flag is used
-// to copy an existing state file in to the new workspace.
-type CopyStateOption struct {
- path string
-}
-
-// CopyState represents the -state flag for terraform workspace new. This flag is used
-// to copy an existing state file in to the new workspace.
-func CopyState(path string) *CopyStateOption {
- return &CopyStateOption{path}
-}
-
-type DirOption struct {
- path string
-}
-
-func Dir(path string) *DirOption {
- return &DirOption{path}
-}
-
-type DirOrPlanOption struct {
- path string
-}
-
-func DirOrPlan(path string) *DirOrPlanOption {
- return &DirOrPlanOption{path}
-}
-
-// DestroyFlagOption represents the -destroy flag.
-type DestroyFlagOption struct {
- // named to prevent conflict with DestroyOption interface
-
- destroy bool
-}
-
-// Destroy represents the -destroy flag.
-func Destroy(destroy bool) *DestroyFlagOption {
- return &DestroyFlagOption{destroy}
-}
-
-type DrawCyclesOption struct {
- drawCycles bool
-}
-
-// DrawCycles represents the -draw-cycles flag.
-func DrawCycles(drawCycles bool) *DrawCyclesOption {
- return &DrawCyclesOption{drawCycles}
-}
-
-type DryRunOption struct {
- dryRun bool
-}
-
-// DryRun represents the -dry-run flag.
-func DryRun(dryRun bool) *DryRunOption {
- return &DryRunOption{dryRun}
-}
-
-type FSMirrorOption struct {
- fsMirror string
-}
-
-// FSMirror represents the -fs-mirror option (path to filesystem mirror directory)
-func FSMirror(fsMirror string) *FSMirrorOption {
- return &FSMirrorOption{fsMirror}
-}
-
-type ForceOption struct {
- force bool
-}
-
-func Force(force bool) *ForceOption {
- return &ForceOption{force}
-}
-
-type ForceCopyOption struct {
- forceCopy bool
-}
-
-func ForceCopy(forceCopy bool) *ForceCopyOption {
- return &ForceCopyOption{forceCopy}
-}
-
-type FromModuleOption struct {
- source string
-}
-
-func FromModule(source string) *FromModuleOption {
- return &FromModuleOption{source}
-}
-
-type GetOption struct {
- get bool
-}
-
-func Get(get bool) *GetOption {
- return &GetOption{get}
-}
-
-type GetPluginsOption struct {
- getPlugins bool
-}
-
-func GetPlugins(getPlugins bool) *GetPluginsOption {
- return &GetPluginsOption{getPlugins}
-}
-
-// LockOption represents the -lock flag.
-type LockOption struct {
- lock bool
-}
-
-// Lock represents the -lock flag.
-func Lock(lock bool) *LockOption {
- return &LockOption{lock}
-}
-
-// LockTimeoutOption represents the -lock-timeout flag.
-type LockTimeoutOption struct {
- timeout string
-}
-
-// LockTimeout represents the -lock-timeout flag.
-func LockTimeout(lockTimeout string) *LockTimeoutOption {
- // TODO: should this just use a duration instead?
- return &LockTimeoutOption{lockTimeout}
-}
-
-type NetMirrorOption struct {
- netMirror string
-}
-
-// NetMirror represents the -net-mirror option (base URL of a network mirror)
-func NetMirror(netMirror string) *NetMirrorOption {
- return &NetMirrorOption{netMirror}
-}
-
-type OutOption struct {
- path string
-}
-
-func Out(path string) *OutOption {
- return &OutOption{path}
-}
-
-type ParallelismOption struct {
- parallelism int
-}
-
-func Parallelism(n int) *ParallelismOption {
- return &ParallelismOption{n}
-}
-
-type GraphPlanOption struct {
- file string
-}
-
-// GraphPlan represents the -plan flag which is a specified plan file string
-func GraphPlan(file string) *GraphPlanOption {
- return &GraphPlanOption{file}
-}
-
-type UseJSONNumberOption struct {
- useJSONNumber bool
-}
-
-// JSONNumber determines how numerical values are handled during JSON decoding.
-func JSONNumber(useJSONNumber bool) *UseJSONNumberOption {
- return &UseJSONNumberOption{useJSONNumber}
-}
-
-type PlatformOption struct {
- platform string
-}
-
-// Platform represents the -platform flag which is an os_arch string
-func Platform(platform string) *PlatformOption {
- return &PlatformOption{platform}
-}
-
-type PluginDirOption struct {
- pluginDir string
-}
-
-func PluginDir(pluginDir string) *PluginDirOption {
- return &PluginDirOption{pluginDir}
-}
-
-type ProviderOption struct {
- provider string
-}
-
-// Provider represents the positional argument (provider source address)
-func Provider(providers string) *ProviderOption {
- return &ProviderOption{providers}
-}
-
-type ReattachInfo map[string]ReattachConfig
-
-// ReattachConfig holds the information Terraform needs to be able to attach
-// itself to a provider process, so it can drive the process.
-type ReattachConfig struct {
- Protocol string
- ProtocolVersion int
- Pid int
- Test bool
- Addr ReattachConfigAddr
-}
-
-// ReattachConfigAddr is a JSON-encoding friendly version of net.Addr.
-type ReattachConfigAddr struct {
- Network string
- String string
-}
-
-type ReattachOption struct {
- info ReattachInfo
-}
-
-func (info ReattachInfo) marshalString() (string, error) {
- reattachStr, err := json.Marshal(info)
- if err != nil {
- return "", err
- }
- return string(reattachStr), nil
-}
-
-func Reattach(info ReattachInfo) *ReattachOption {
- return &ReattachOption{info}
-}
-
-type ReconfigureOption struct {
- reconfigure bool
-}
-
-func Reconfigure(reconfigure bool) *ReconfigureOption {
- return &ReconfigureOption{reconfigure}
-}
-
-type RecursiveOption struct {
- recursive bool
-}
-
-func Recursive(r bool) *RecursiveOption {
- return &RecursiveOption{r}
-}
-
-type RefreshOption struct {
- refresh bool
-}
-
-func Refresh(refresh bool) *RefreshOption {
- return &RefreshOption{refresh}
-}
-
-type RefreshOnlyOption struct {
- refreshOnly bool
-}
-
-func RefreshOnly(refreshOnly bool) *RefreshOnlyOption {
- return &RefreshOnlyOption{refreshOnly}
-}
-
-type ReplaceOption struct {
- address string
-}
-
-func Replace(address string) *ReplaceOption {
- return &ReplaceOption{address}
-}
-
-type StateOption struct {
- path string
-}
-
-// State represents the -state flag.
-//
-// Deprecated: The -state CLI flag is a legacy flag and should not be used.
-// If you need a different state file for every run, you can instead use the
-// local backend.
-// See https://github.com/hashicorp/terraform/issues/25920#issuecomment-676560799
-func State(path string) *StateOption {
- return &StateOption{path}
-}
-
-type StateOutOption struct {
- path string
-}
-
-func StateOut(path string) *StateOutOption {
- return &StateOutOption{path}
-}
-
-type TargetOption struct {
- target string
-}
-
-func Target(resource string) *TargetOption {
- return &TargetOption{resource}
-}
-
-type TestsDirectoryOption struct {
- testsDirectory string
-}
-
-// TestsDirectory represents the -tests-directory option (path to tests files)
-func TestsDirectory(testsDirectory string) *TestsDirectoryOption {
- return &TestsDirectoryOption{testsDirectory}
-}
-
-type GraphTypeOption struct {
- graphType string
-}
-
-func GraphType(graphType string) *GraphTypeOption {
- return &GraphTypeOption{graphType}
-}
-
-type UpdateOption struct {
- update bool
-}
-
-func Update(update bool) *UpdateOption {
- return &UpdateOption{update}
-}
-
-type UpgradeOption struct {
- upgrade bool
-}
-
-func Upgrade(upgrade bool) *UpgradeOption {
- return &UpgradeOption{upgrade}
-}
-
-type VarOption struct {
- assignment string
-}
-
-func Var(assignment string) *VarOption {
- return &VarOption{assignment}
-}
-
-type VarFileOption struct {
- path string
-}
-
-func VarFile(path string) *VarFileOption {
- return &VarFileOption{path}
-}
-
-type VerifyPluginsOption struct {
- verifyPlugins bool
-}
-
-func VerifyPlugins(verifyPlugins bool) *VerifyPluginsOption {
- return &VerifyPluginsOption{verifyPlugins}
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/output.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/output.go
deleted file mode 100644
index b1185e8a..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/output.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "encoding/json"
- "os/exec"
-)
-
-type outputConfig struct {
- state string
- json bool
-}
-
-var defaultOutputOptions = outputConfig{}
-
-// OutputOption represents options used in the Output method.
-type OutputOption interface {
- configureOutput(*outputConfig)
-}
-
-func (opt *StateOption) configureOutput(conf *outputConfig) {
- conf.state = opt.path
-}
-
-// OutputMeta represents the JSON output of 'terraform output -json',
-// which resembles state format version 3 due to a historical accident.
-// Please see hashicorp/terraform/command/output.go.
-// TODO KEM: Should this type be in terraform-json?
-type OutputMeta struct {
- Sensitive bool `json:"sensitive"`
- Type json.RawMessage `json:"type"`
- Value json.RawMessage `json:"value"`
-}
-
-// Output represents the terraform output subcommand.
-func (tf *Terraform) Output(ctx context.Context, opts ...OutputOption) (map[string]OutputMeta, error) {
- outputCmd := tf.outputCmd(ctx, opts...)
-
- outputs := map[string]OutputMeta{}
- err := tf.runTerraformCmdJSON(ctx, outputCmd, &outputs)
- if err != nil {
- return nil, err
- }
-
- return outputs, nil
-}
-
-func (tf *Terraform) outputCmd(ctx context.Context, opts ...OutputOption) *exec.Cmd {
- c := defaultOutputOptions
-
- for _, o := range opts {
- o.configureOutput(&c)
- }
-
- args := []string{"output", "-no-color", "-json"}
-
- // string opts: only pass if set
- if c.state != "" {
- args = append(args, "-state="+c.state)
- }
-
- return tf.buildTerraformCmd(ctx, nil, args...)
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go
deleted file mode 100644
index c2ec1f9e..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/plan.go
+++ /dev/null
@@ -1,286 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "fmt"
- "io"
- "os/exec"
- "strconv"
-)
-
-type planConfig struct {
- allowDeferral bool
- destroy bool
- dir string
- lock bool
- lockTimeout string
- out string
- parallelism int
- reattachInfo ReattachInfo
- refresh bool
- refreshOnly bool
- replaceAddrs []string
- state string
- targets []string
- vars []string
- varFiles []string
-}
-
-var defaultPlanOptions = planConfig{
- destroy: false,
- lock: true,
- lockTimeout: "0s",
- parallelism: 10,
- refresh: true,
-}
-
-// PlanOption represents options used in the Plan method.
-type PlanOption interface {
- configurePlan(*planConfig)
-}
-
-func (opt *DirOption) configurePlan(conf *planConfig) {
- conf.dir = opt.path
-}
-
-func (opt *VarFileOption) configurePlan(conf *planConfig) {
- conf.varFiles = append(conf.varFiles, opt.path)
-}
-
-func (opt *VarOption) configurePlan(conf *planConfig) {
- conf.vars = append(conf.vars, opt.assignment)
-}
-
-func (opt *TargetOption) configurePlan(conf *planConfig) {
- conf.targets = append(conf.targets, opt.target)
-}
-
-func (opt *StateOption) configurePlan(conf *planConfig) {
- conf.state = opt.path
-}
-
-func (opt *ReattachOption) configurePlan(conf *planConfig) {
- conf.reattachInfo = opt.info
-}
-
-func (opt *RefreshOption) configurePlan(conf *planConfig) {
- conf.refresh = opt.refresh
-}
-
-func (opt *RefreshOnlyOption) configurePlan(conf *planConfig) {
- conf.refreshOnly = opt.refreshOnly
-}
-
-func (opt *ReplaceOption) configurePlan(conf *planConfig) {
- conf.replaceAddrs = append(conf.replaceAddrs, opt.address)
-}
-
-func (opt *ParallelismOption) configurePlan(conf *planConfig) {
- conf.parallelism = opt.parallelism
-}
-
-func (opt *OutOption) configurePlan(conf *planConfig) {
- conf.out = opt.path
-}
-
-func (opt *LockTimeoutOption) configurePlan(conf *planConfig) {
- conf.lockTimeout = opt.timeout
-}
-
-func (opt *LockOption) configurePlan(conf *planConfig) {
- conf.lock = opt.lock
-}
-
-func (opt *DestroyFlagOption) configurePlan(conf *planConfig) {
- conf.destroy = opt.destroy
-}
-
-func (opt *AllowDeferralOption) configurePlan(conf *planConfig) {
- conf.allowDeferral = opt.allowDeferral
-}
-
-// Plan executes `terraform plan` with the specified options and waits for it
-// to complete.
-//
-// The returned boolean is false when the plan diff is empty (no changes) and
-// true when the plan diff is non-empty (changes present).
-//
-// The returned error is nil if `terraform plan` has been executed and exits
-// with either 0 or 2.
-func (tf *Terraform) Plan(ctx context.Context, opts ...PlanOption) (bool, error) {
- cmd, err := tf.planCmd(ctx, opts...)
- if err != nil {
- return false, err
- }
- err = tf.runTerraformCmd(ctx, cmd)
- if err != nil && cmd.ProcessState.ExitCode() == 2 {
- return true, nil
- }
- return false, err
-}
-
-// PlanJSON executes `terraform plan` with the specified options as well as the
-// `-json` flag and waits for it to complete.
-//
-// Using the `-json` flag will result in
-// [machine-readable](https://developer.hashicorp.com/terraform/internals/machine-readable-ui)
-// JSON being written to the supplied `io.Writer`.
-//
-// The returned boolean is false when the plan diff is empty (no changes) and
-// true when the plan diff is non-empty (changes present).
-//
-// The returned error is nil if `terraform plan` has been executed and exits
-// with either 0 or 2.
-//
-// PlanJSON is likely to be removed in a future major version in favour of
-// Plan returning JSON by default.
-func (tf *Terraform) PlanJSON(ctx context.Context, w io.Writer, opts ...PlanOption) (bool, error) {
- err := tf.compatible(ctx, tf0_15_3, nil)
- if err != nil {
- return false, fmt.Errorf("terraform plan -json was added in 0.15.3: %w", err)
- }
-
- tf.SetStdout(w)
-
- cmd, err := tf.planJSONCmd(ctx, opts...)
- if err != nil {
- return false, err
- }
-
- err = tf.runTerraformCmd(ctx, cmd)
- if err != nil && cmd.ProcessState.ExitCode() == 2 {
- return true, nil
- }
-
- return false, err
-}
-
-func (tf *Terraform) planCmd(ctx context.Context, opts ...PlanOption) (*exec.Cmd, error) {
- c := defaultPlanOptions
-
- for _, o := range opts {
- o.configurePlan(&c)
- }
-
- args, err := tf.buildPlanArgs(ctx, c)
- if err != nil {
- return nil, err
- }
-
- return tf.buildPlanCmd(ctx, c, args)
-}
-
-func (tf *Terraform) planJSONCmd(ctx context.Context, opts ...PlanOption) (*exec.Cmd, error) {
- c := defaultPlanOptions
-
- for _, o := range opts {
- o.configurePlan(&c)
- }
-
- args, err := tf.buildPlanArgs(ctx, c)
- if err != nil {
- return nil, err
- }
-
- args = append(args, "-json")
-
- return tf.buildPlanCmd(ctx, c, args)
-}
-
-func (tf *Terraform) buildPlanArgs(ctx context.Context, c planConfig) ([]string, error) {
- args := []string{"plan", "-no-color", "-input=false", "-detailed-exitcode"}
-
- // string opts: only pass if set
- if c.lockTimeout != "" {
- args = append(args, "-lock-timeout="+c.lockTimeout)
- }
- if c.out != "" {
- args = append(args, "-out="+c.out)
- }
- if c.state != "" {
- args = append(args, "-state="+c.state)
- }
- for _, vf := range c.varFiles {
- args = append(args, "-var-file="+vf)
- }
-
- // boolean and numerical opts: always pass
- args = append(args, "-lock="+strconv.FormatBool(c.lock))
- args = append(args, "-parallelism="+fmt.Sprint(c.parallelism))
- args = append(args, "-refresh="+strconv.FormatBool(c.refresh))
-
- if c.refreshOnly {
- err := tf.compatible(ctx, tf0_15_4, nil)
- if err != nil {
- return nil, fmt.Errorf("refresh-only option was introduced in Terraform 0.15.4: %w", err)
- }
- if !c.refresh {
- return nil, fmt.Errorf("you cannot use refresh=false in refresh-only planning mode")
- }
- args = append(args, "-refresh-only")
- }
-
- // unary flags: pass if true
- if c.replaceAddrs != nil {
- err := tf.compatible(ctx, tf0_15_2, nil)
- if err != nil {
- return nil, fmt.Errorf("replace option was introduced in Terraform 0.15.2: %w", err)
- }
- for _, addr := range c.replaceAddrs {
- args = append(args, "-replace="+addr)
- }
- }
- if c.destroy {
- args = append(args, "-destroy")
- }
-
- // string slice opts: split into separate args
- if c.targets != nil {
- for _, ta := range c.targets {
- args = append(args, "-target="+ta)
- }
- }
- if c.vars != nil {
- for _, v := range c.vars {
- args = append(args, "-var", v)
- }
- }
- if c.allowDeferral {
- // Ensure the version is later than 1.9.0
- err := tf.compatible(ctx, tf1_9_0, nil)
- if err != nil {
- return nil, fmt.Errorf("-allow-deferral is an experimental option introduced in Terraform 1.9.0: %w", err)
- }
-
- // Ensure the version has experiments enabled (alpha or dev builds)
- err = tf.experimentsEnabled(ctx)
- if err != nil {
- return nil, fmt.Errorf("-allow-deferral is only available in experimental Terraform builds: %w", err)
- }
-
- args = append(args, "-allow-deferral")
- }
-
- return args, nil
-}
-
-func (tf *Terraform) buildPlanCmd(ctx context.Context, c planConfig, args []string) (*exec.Cmd, error) {
- // optional positional argument
- if c.dir != "" {
- args = append(args, c.dir)
- }
-
- mergeEnv := map[string]string{}
- if c.reattachInfo != nil {
- reattachStr, err := c.reattachInfo.marshalString()
- if err != nil {
- return nil, err
- }
- mergeEnv[reattachEnvVar] = reattachStr
- }
-
- return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_lock.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_lock.go
deleted file mode 100644
index ef5d995b..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_lock.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "fmt"
- "os/exec"
-)
-
-type providersLockConfig struct {
- fsMirror string
- netMirror string
- platforms []string
- providers []string
-}
-
-var defaultProvidersLockOptions = providersLockConfig{}
-
-type ProvidersLockOption interface {
- configureProvidersLock(*providersLockConfig)
-}
-
-func (opt *FSMirrorOption) configureProvidersLock(conf *providersLockConfig) {
- conf.fsMirror = opt.fsMirror
-}
-
-func (opt *NetMirrorOption) configureProvidersLock(conf *providersLockConfig) {
- conf.netMirror = opt.netMirror
-}
-
-func (opt *PlatformOption) configureProvidersLock(conf *providersLockConfig) {
- conf.platforms = append(conf.platforms, opt.platform)
-}
-
-func (opt *ProviderOption) configureProvidersLock(conf *providersLockConfig) {
- conf.providers = append(conf.providers, opt.provider)
-}
-
-// ProvidersLock represents the `terraform providers lock` command
-func (tf *Terraform) ProvidersLock(ctx context.Context, opts ...ProvidersLockOption) error {
- err := tf.compatible(ctx, tf0_14_0, nil)
- if err != nil {
- return fmt.Errorf("terraform providers lock was added in 0.14.0: %w", err)
- }
-
- lockCmd := tf.providersLockCmd(ctx, opts...)
-
- err = tf.runTerraformCmd(ctx, lockCmd)
- if err != nil {
- return err
- }
-
- return err
-}
-
-func (tf *Terraform) providersLockCmd(ctx context.Context, opts ...ProvidersLockOption) *exec.Cmd {
- c := defaultProvidersLockOptions
-
- for _, o := range opts {
- o.configureProvidersLock(&c)
- }
- args := []string{"providers", "lock"}
-
- // string options, only pass if set
- if c.fsMirror != "" {
- args = append(args, "-fs-mirror="+c.fsMirror)
- }
-
- if c.netMirror != "" {
- args = append(args, "-net-mirror="+c.netMirror)
- }
-
- for _, p := range c.platforms {
- args = append(args, "-platform="+p)
- }
-
- // positional providers argument
- for _, p := range c.providers {
- args = append(args, p)
- }
-
- return tf.buildTerraformCmd(ctx, nil, args...)
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_schema.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_schema.go
deleted file mode 100644
index 995dd156..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/providers_schema.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "os/exec"
-
- tfjson "github.com/hashicorp/terraform-json"
-)
-
-// ProvidersSchema represents the terraform providers schema -json subcommand.
-func (tf *Terraform) ProvidersSchema(ctx context.Context) (*tfjson.ProviderSchemas, error) {
- schemaCmd := tf.providersSchemaCmd(ctx)
-
- var ret tfjson.ProviderSchemas
- err := tf.runTerraformCmdJSON(ctx, schemaCmd, &ret)
- if err != nil {
- return nil, err
- }
-
- err = ret.Validate()
- if err != nil {
- return nil, err
- }
-
- return &ret, nil
-}
-
-func (tf *Terraform) providersSchemaCmd(ctx context.Context, args ...string) *exec.Cmd {
- allArgs := []string{"providers", "schema", "-json", "-no-color"}
- allArgs = append(allArgs, args...)
-
- return tf.buildTerraformCmd(ctx, nil, allArgs...)
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/refresh.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/refresh.go
deleted file mode 100644
index 16733889..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/refresh.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "fmt"
- "io"
- "os/exec"
- "strconv"
-)
-
-type refreshConfig struct {
- backup string
- dir string
- lock bool
- lockTimeout string
- reattachInfo ReattachInfo
- state string
- stateOut string
- targets []string
- vars []string
- varFiles []string
-}
-
-var defaultRefreshOptions = refreshConfig{
- lock: true,
- lockTimeout: "0s",
-}
-
-// RefreshCmdOption represents options used in the Refresh method.
-type RefreshCmdOption interface {
- configureRefresh(*refreshConfig)
-}
-
-func (opt *BackupOption) configureRefresh(conf *refreshConfig) {
- conf.backup = opt.path
-}
-
-func (opt *DirOption) configureRefresh(conf *refreshConfig) {
- conf.dir = opt.path
-}
-
-func (opt *LockOption) configureRefresh(conf *refreshConfig) {
- conf.lock = opt.lock
-}
-
-func (opt *LockTimeoutOption) configureRefresh(conf *refreshConfig) {
- conf.lockTimeout = opt.timeout
-}
-
-func (opt *ReattachOption) configureRefresh(conf *refreshConfig) {
- conf.reattachInfo = opt.info
-}
-
-func (opt *StateOption) configureRefresh(conf *refreshConfig) {
- conf.state = opt.path
-}
-
-func (opt *StateOutOption) configureRefresh(conf *refreshConfig) {
- conf.stateOut = opt.path
-}
-
-func (opt *TargetOption) configureRefresh(conf *refreshConfig) {
- conf.targets = append(conf.targets, opt.target)
-}
-
-func (opt *VarOption) configureRefresh(conf *refreshConfig) {
- conf.vars = append(conf.vars, opt.assignment)
-}
-
-func (opt *VarFileOption) configureRefresh(conf *refreshConfig) {
- conf.varFiles = append(conf.varFiles, opt.path)
-}
-
-// Refresh represents the terraform refresh subcommand.
-func (tf *Terraform) Refresh(ctx context.Context, opts ...RefreshCmdOption) error {
- cmd, err := tf.refreshCmd(ctx, opts...)
- if err != nil {
- return err
- }
- return tf.runTerraformCmd(ctx, cmd)
-}
-
-// RefreshJSON represents the terraform refresh subcommand with the `-json` flag.
-// Using the `-json` flag will result in
-// [machine-readable](https://developer.hashicorp.com/terraform/internals/machine-readable-ui)
-// JSON being written to the supplied `io.Writer`. RefreshJSON is likely to be
-// removed in a future major version in favour of Refresh returning JSON by default.
-func (tf *Terraform) RefreshJSON(ctx context.Context, w io.Writer, opts ...RefreshCmdOption) error {
- err := tf.compatible(ctx, tf0_15_3, nil)
- if err != nil {
- return fmt.Errorf("terraform refresh -json was added in 0.15.3: %w", err)
- }
-
- tf.SetStdout(w)
-
- cmd, err := tf.refreshJSONCmd(ctx, opts...)
- if err != nil {
- return err
- }
-
- return tf.runTerraformCmd(ctx, cmd)
-}
-
-func (tf *Terraform) refreshCmd(ctx context.Context, opts ...RefreshCmdOption) (*exec.Cmd, error) {
- c := defaultRefreshOptions
-
- for _, o := range opts {
- o.configureRefresh(&c)
- }
-
- args := tf.buildRefreshArgs(c)
-
- return tf.buildRefreshCmd(ctx, c, args)
-
-}
-
-func (tf *Terraform) refreshJSONCmd(ctx context.Context, opts ...RefreshCmdOption) (*exec.Cmd, error) {
- c := defaultRefreshOptions
-
- for _, o := range opts {
- o.configureRefresh(&c)
- }
-
- args := tf.buildRefreshArgs(c)
- args = append(args, "-json")
-
- return tf.buildRefreshCmd(ctx, c, args)
-}
-
-func (tf *Terraform) buildRefreshArgs(c refreshConfig) []string {
- args := []string{"refresh", "-no-color", "-input=false"}
-
- // string opts: only pass if set
- if c.backup != "" {
- args = append(args, "-backup="+c.backup)
- }
- if c.lockTimeout != "" {
- args = append(args, "-lock-timeout="+c.lockTimeout)
- }
- if c.state != "" {
- args = append(args, "-state="+c.state)
- }
- if c.stateOut != "" {
- args = append(args, "-state-out="+c.stateOut)
- }
- for _, vf := range c.varFiles {
- args = append(args, "-var-file="+vf)
- }
-
- // boolean and numerical opts: always pass
- args = append(args, "-lock="+strconv.FormatBool(c.lock))
-
- // string slice opts: split into separate args
- if c.targets != nil {
- for _, ta := range c.targets {
- args = append(args, "-target="+ta)
- }
- }
- if c.vars != nil {
- for _, v := range c.vars {
- args = append(args, "-var", v)
- }
- }
-
- return args
-}
-
-func (tf *Terraform) buildRefreshCmd(ctx context.Context, c refreshConfig, args []string) (*exec.Cmd, error) {
- // optional positional argument
- if c.dir != "" {
- args = append(args, c.dir)
- }
-
- mergeEnv := map[string]string{}
- if c.reattachInfo != nil {
- reattachStr, err := c.reattachInfo.marshalString()
- if err != nil {
- return nil, err
- }
- mergeEnv[reattachEnvVar] = reattachStr
- }
-
- return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/show.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/show.go
deleted file mode 100644
index 5854af1d..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/show.go
+++ /dev/null
@@ -1,219 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "fmt"
- "os/exec"
- "strings"
-
- tfjson "github.com/hashicorp/terraform-json"
-)
-
-type showConfig struct {
- reattachInfo ReattachInfo
- jsonNumber *UseJSONNumberOption
-}
-
-var defaultShowOptions = showConfig{}
-
-type ShowOption interface {
- configureShow(*showConfig)
-}
-
-func (opt *ReattachOption) configureShow(conf *showConfig) {
- conf.reattachInfo = opt.info
-}
-
-func (opt *UseJSONNumberOption) configureShow(conf *showConfig) {
- conf.jsonNumber = opt
-}
-
-// Show reads the default state path and outputs the state.
-// To read a state or plan file, ShowState or ShowPlan must be used instead.
-func (tf *Terraform) Show(ctx context.Context, opts ...ShowOption) (*tfjson.State, error) {
- err := tf.compatible(ctx, tf0_12_0, nil)
- if err != nil {
- return nil, fmt.Errorf("terraform show -json was added in 0.12.0: %w", err)
- }
-
- c := defaultShowOptions
-
- for _, o := range opts {
- o.configureShow(&c)
- }
-
- mergeEnv := map[string]string{}
- if c.reattachInfo != nil {
- reattachStr, err := c.reattachInfo.marshalString()
- if err != nil {
- return nil, err
- }
- mergeEnv[reattachEnvVar] = reattachStr
- }
-
- showCmd := tf.showCmd(ctx, true, mergeEnv)
-
- var ret tfjson.State
- ret.UseJSONNumber(true)
-
- if c.jsonNumber != nil {
- ret.UseJSONNumber(c.jsonNumber.useJSONNumber)
- }
-
- err = tf.runTerraformCmdJSON(ctx, showCmd, &ret)
- if err != nil {
- return nil, err
- }
-
- err = ret.Validate()
- if err != nil {
- return nil, err
- }
-
- return &ret, nil
-}
-
-// ShowStateFile reads a given state file and outputs the state.
-func (tf *Terraform) ShowStateFile(ctx context.Context, statePath string, opts ...ShowOption) (*tfjson.State, error) {
- err := tf.compatible(ctx, tf0_12_0, nil)
- if err != nil {
- return nil, fmt.Errorf("terraform show -json was added in 0.12.0: %w", err)
- }
-
- if statePath == "" {
- return nil, fmt.Errorf("statePath cannot be blank: use Show() if not passing statePath")
- }
-
- c := defaultShowOptions
-
- for _, o := range opts {
- o.configureShow(&c)
- }
-
- mergeEnv := map[string]string{}
- if c.reattachInfo != nil {
- reattachStr, err := c.reattachInfo.marshalString()
- if err != nil {
- return nil, err
- }
- mergeEnv[reattachEnvVar] = reattachStr
- }
-
- showCmd := tf.showCmd(ctx, true, mergeEnv, statePath)
-
- var ret tfjson.State
- ret.UseJSONNumber(true)
-
- if c.jsonNumber != nil {
- ret.UseJSONNumber(c.jsonNumber.useJSONNumber)
- }
-
- err = tf.runTerraformCmdJSON(ctx, showCmd, &ret)
- if err != nil {
- return nil, err
- }
-
- err = ret.Validate()
- if err != nil {
- return nil, err
- }
-
- return &ret, nil
-}
-
-// ShowPlanFile reads a given plan file and outputs the plan.
-func (tf *Terraform) ShowPlanFile(ctx context.Context, planPath string, opts ...ShowOption) (*tfjson.Plan, error) {
- err := tf.compatible(ctx, tf0_12_0, nil)
- if err != nil {
- return nil, fmt.Errorf("terraform show -json was added in 0.12.0: %w", err)
- }
-
- if planPath == "" {
- return nil, fmt.Errorf("planPath cannot be blank: use Show() if not passing planPath")
- }
-
- c := defaultShowOptions
-
- for _, o := range opts {
- o.configureShow(&c)
- }
-
- mergeEnv := map[string]string{}
- if c.reattachInfo != nil {
- reattachStr, err := c.reattachInfo.marshalString()
- if err != nil {
- return nil, err
- }
- mergeEnv[reattachEnvVar] = reattachStr
- }
-
- showCmd := tf.showCmd(ctx, true, mergeEnv, planPath)
-
- var ret tfjson.Plan
-
- if c.jsonNumber != nil {
- ret.UseJSONNumber(c.jsonNumber.useJSONNumber)
- }
-
- err = tf.runTerraformCmdJSON(ctx, showCmd, &ret)
- if err != nil {
- return nil, err
- }
-
- err = ret.Validate()
- if err != nil {
- return nil, err
- }
-
- return &ret, nil
-
-}
-
-// ShowPlanFileRaw reads a given plan file and outputs the plan in a
-// human-friendly, opaque format.
-func (tf *Terraform) ShowPlanFileRaw(ctx context.Context, planPath string, opts ...ShowOption) (string, error) {
- if planPath == "" {
- return "", fmt.Errorf("planPath cannot be blank: use Show() if not passing planPath")
- }
-
- c := defaultShowOptions
-
- for _, o := range opts {
- o.configureShow(&c)
- }
-
- mergeEnv := map[string]string{}
- if c.reattachInfo != nil {
- reattachStr, err := c.reattachInfo.marshalString()
- if err != nil {
- return "", err
- }
- mergeEnv[reattachEnvVar] = reattachStr
- }
-
- showCmd := tf.showCmd(ctx, false, mergeEnv, planPath)
-
- var outBuf strings.Builder
- showCmd.Stdout = &outBuf
- err := tf.runTerraformCmd(ctx, showCmd)
- if err != nil {
- return "", err
- }
-
- return outBuf.String(), nil
-
-}
-
-func (tf *Terraform) showCmd(ctx context.Context, jsonOutput bool, mergeEnv map[string]string, args ...string) *exec.Cmd {
- allArgs := []string{"show"}
- if jsonOutput {
- allArgs = append(allArgs, "-json")
- }
- allArgs = append(allArgs, "-no-color")
- allArgs = append(allArgs, args...)
-
- return tf.buildTerraformCmd(ctx, mergeEnv, allArgs...)
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/state_mv.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_mv.go
deleted file mode 100644
index ca92e522..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/state_mv.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "os/exec"
- "strconv"
-)
-
-type stateMvConfig struct {
- backup string
- backupOut string
- dryRun bool
- lock bool
- lockTimeout string
- state string
- stateOut string
-}
-
-var defaultStateMvOptions = stateMvConfig{
- lock: true,
- lockTimeout: "0s",
-}
-
-// StateMvCmdOption represents options used in the Refresh method.
-type StateMvCmdOption interface {
- configureStateMv(*stateMvConfig)
-}
-
-func (opt *BackupOption) configureStateMv(conf *stateMvConfig) {
- conf.backup = opt.path
-}
-
-func (opt *BackupOutOption) configureStateMv(conf *stateMvConfig) {
- conf.backupOut = opt.path
-}
-
-func (opt *DryRunOption) configureStateMv(conf *stateMvConfig) {
- conf.dryRun = opt.dryRun
-}
-
-func (opt *LockOption) configureStateMv(conf *stateMvConfig) {
- conf.lock = opt.lock
-}
-
-func (opt *LockTimeoutOption) configureStateMv(conf *stateMvConfig) {
- conf.lockTimeout = opt.timeout
-}
-
-func (opt *StateOption) configureStateMv(conf *stateMvConfig) {
- conf.state = opt.path
-}
-
-func (opt *StateOutOption) configureStateMv(conf *stateMvConfig) {
- conf.stateOut = opt.path
-}
-
-// StateMv represents the terraform state mv subcommand.
-func (tf *Terraform) StateMv(ctx context.Context, source string, destination string, opts ...StateMvCmdOption) error {
- cmd, err := tf.stateMvCmd(ctx, source, destination, opts...)
- if err != nil {
- return err
- }
- return tf.runTerraformCmd(ctx, cmd)
-}
-
-func (tf *Terraform) stateMvCmd(ctx context.Context, source string, destination string, opts ...StateMvCmdOption) (*exec.Cmd, error) {
- c := defaultStateMvOptions
-
- for _, o := range opts {
- o.configureStateMv(&c)
- }
-
- args := []string{"state", "mv", "-no-color"}
-
- // string opts: only pass if set
- if c.backup != "" {
- args = append(args, "-backup="+c.backup)
- }
- if c.backupOut != "" {
- args = append(args, "-backup-out="+c.backupOut)
- }
- if c.lockTimeout != "" {
- args = append(args, "-lock-timeout="+c.lockTimeout)
- }
- if c.state != "" {
- args = append(args, "-state="+c.state)
- }
- if c.stateOut != "" {
- args = append(args, "-state-out="+c.stateOut)
- }
-
- // boolean and numerical opts: always pass
- args = append(args, "-lock="+strconv.FormatBool(c.lock))
-
- // unary flags: pass if true
- if c.dryRun {
- args = append(args, "-dry-run")
- }
-
- // positional arguments
- args = append(args, source)
- args = append(args, destination)
-
- return tf.buildTerraformCmd(ctx, nil, args...), nil
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/state_pull.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_pull.go
deleted file mode 100644
index 9fa6e5db..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/state_pull.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "bytes"
- "context"
- "os/exec"
-)
-
-type statePullConfig struct {
- reattachInfo ReattachInfo
-}
-
-var defaultStatePullConfig = statePullConfig{}
-
-type StatePullOption interface {
- configureShow(*statePullConfig)
-}
-
-func (opt *ReattachOption) configureStatePull(conf *statePullConfig) {
- conf.reattachInfo = opt.info
-}
-
-func (tf *Terraform) StatePull(ctx context.Context, opts ...StatePullOption) (string, error) {
- c := defaultStatePullConfig
-
- for _, o := range opts {
- o.configureShow(&c)
- }
-
- mergeEnv := map[string]string{}
- if c.reattachInfo != nil {
- reattachStr, err := c.reattachInfo.marshalString()
- if err != nil {
- return "", err
- }
- mergeEnv[reattachEnvVar] = reattachStr
- }
-
- cmd := tf.statePullCmd(ctx, mergeEnv)
-
- var ret bytes.Buffer
- cmd.Stdout = &ret
- err := tf.runTerraformCmd(ctx, cmd)
- if err != nil {
- return "", err
- }
-
- return ret.String(), nil
-}
-
-func (tf *Terraform) statePullCmd(ctx context.Context, mergeEnv map[string]string) *exec.Cmd {
- args := []string{"state", "pull"}
-
- return tf.buildTerraformCmd(ctx, mergeEnv, args...)
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/state_push.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_push.go
deleted file mode 100644
index a0873e96..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/state_push.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "os/exec"
- "strconv"
-)
-
-type statePushConfig struct {
- force bool
- lock bool
- lockTimeout string
-}
-
-var defaultStatePushOptions = statePushConfig{
- lock: false,
- lockTimeout: "0s",
-}
-
-// StatePushCmdOption represents options used in the Refresh method.
-type StatePushCmdOption interface {
- configureStatePush(*statePushConfig)
-}
-
-func (opt *ForceOption) configureStatePush(conf *statePushConfig) {
- conf.force = opt.force
-}
-
-func (opt *LockOption) configureStatePush(conf *statePushConfig) {
- conf.lock = opt.lock
-}
-
-func (opt *LockTimeoutOption) configureStatePush(conf *statePushConfig) {
- conf.lockTimeout = opt.timeout
-}
-
-func (tf *Terraform) StatePush(ctx context.Context, path string, opts ...StatePushCmdOption) error {
- cmd, err := tf.statePushCmd(ctx, path, opts...)
- if err != nil {
- return err
- }
- return tf.runTerraformCmd(ctx, cmd)
-}
-
-func (tf *Terraform) statePushCmd(ctx context.Context, path string, opts ...StatePushCmdOption) (*exec.Cmd, error) {
- c := defaultStatePushOptions
-
- for _, o := range opts {
- o.configureStatePush(&c)
- }
-
- args := []string{"state", "push"}
-
- if c.force {
- args = append(args, "-force")
- }
-
- args = append(args, "-lock="+strconv.FormatBool(c.lock))
-
- if c.lockTimeout != "" {
- args = append(args, "-lock-timeout="+c.lockTimeout)
- }
-
- args = append(args, path)
-
- return tf.buildTerraformCmd(ctx, nil, args...), nil
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/state_rm.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/state_rm.go
deleted file mode 100644
index 2db18cb7..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/state_rm.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "os/exec"
- "strconv"
-)
-
-type stateRmConfig struct {
- backup string
- backupOut string
- dryRun bool
- lock bool
- lockTimeout string
- state string
- stateOut string
-}
-
-var defaultStateRmOptions = stateRmConfig{
- lock: true,
- lockTimeout: "0s",
-}
-
-// StateRmCmdOption represents options used in the Refresh method.
-type StateRmCmdOption interface {
- configureStateRm(*stateRmConfig)
-}
-
-func (opt *BackupOption) configureStateRm(conf *stateRmConfig) {
- conf.backup = opt.path
-}
-
-func (opt *BackupOutOption) configureStateRm(conf *stateRmConfig) {
- conf.backupOut = opt.path
-}
-
-func (opt *DryRunOption) configureStateRm(conf *stateRmConfig) {
- conf.dryRun = opt.dryRun
-}
-
-func (opt *LockOption) configureStateRm(conf *stateRmConfig) {
- conf.lock = opt.lock
-}
-
-func (opt *LockTimeoutOption) configureStateRm(conf *stateRmConfig) {
- conf.lockTimeout = opt.timeout
-}
-
-func (opt *StateOption) configureStateRm(conf *stateRmConfig) {
- conf.state = opt.path
-}
-
-func (opt *StateOutOption) configureStateRm(conf *stateRmConfig) {
- conf.stateOut = opt.path
-}
-
-// StateRm represents the terraform state rm subcommand.
-func (tf *Terraform) StateRm(ctx context.Context, address string, opts ...StateRmCmdOption) error {
- cmd, err := tf.stateRmCmd(ctx, address, opts...)
- if err != nil {
- return err
- }
- return tf.runTerraformCmd(ctx, cmd)
-}
-
-func (tf *Terraform) stateRmCmd(ctx context.Context, address string, opts ...StateRmCmdOption) (*exec.Cmd, error) {
- c := defaultStateRmOptions
-
- for _, o := range opts {
- o.configureStateRm(&c)
- }
-
- args := []string{"state", "rm", "-no-color"}
-
- // string opts: only pass if set
- if c.backup != "" {
- args = append(args, "-backup="+c.backup)
- }
- if c.backupOut != "" {
- args = append(args, "-backup-out="+c.backupOut)
- }
- if c.lockTimeout != "" {
- args = append(args, "-lock-timeout="+c.lockTimeout)
- }
- if c.state != "" {
- args = append(args, "-state="+c.state)
- }
- if c.stateOut != "" {
- args = append(args, "-state-out="+c.stateOut)
- }
-
- // boolean and numerical opts: always pass
- args = append(args, "-lock="+strconv.FormatBool(c.lock))
-
- // unary flags: pass if true
- if c.dryRun {
- args = append(args, "-dry-run")
- }
-
- // positional arguments
- args = append(args, address)
-
- return tf.buildTerraformCmd(ctx, nil, args...), nil
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/taint.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/taint.go
deleted file mode 100644
index b6ac955c..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/taint.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "fmt"
- "os/exec"
- "strconv"
-)
-
-type taintConfig struct {
- state string
- allowMissing bool
- lock bool
- lockTimeout string
-}
-
-var defaultTaintOptions = taintConfig{
- allowMissing: false,
- lock: true,
-}
-
-// TaintOption represents options used in the Taint method.
-type TaintOption interface {
- configureTaint(*taintConfig)
-}
-
-func (opt *StateOption) configureTaint(conf *taintConfig) {
- conf.state = opt.path
-}
-
-func (opt *AllowMissingOption) configureTaint(conf *taintConfig) {
- conf.allowMissing = opt.allowMissing
-}
-
-func (opt *LockOption) configureTaint(conf *taintConfig) {
- conf.lock = opt.lock
-}
-
-func (opt *LockTimeoutOption) configureTaint(conf *taintConfig) {
- conf.lockTimeout = opt.timeout
-}
-
-// Taint represents the terraform taint subcommand.
-func (tf *Terraform) Taint(ctx context.Context, address string, opts ...TaintOption) error {
- err := tf.compatible(ctx, tf0_4_1, nil)
- if err != nil {
- return fmt.Errorf("taint was first introduced in Terraform 0.4.1: %w", err)
- }
- taintCmd := tf.taintCmd(ctx, address, opts...)
- return tf.runTerraformCmd(ctx, taintCmd)
-}
-
-func (tf *Terraform) taintCmd(ctx context.Context, address string, opts ...TaintOption) *exec.Cmd {
- c := defaultTaintOptions
-
- for _, o := range opts {
- o.configureTaint(&c)
- }
-
- args := []string{"taint", "-no-color"}
-
- if c.lockTimeout != "" {
- args = append(args, "-lock-timeout="+c.lockTimeout)
- }
-
- // string opts: only pass if set
- if c.state != "" {
- args = append(args, "-state="+c.state)
- }
-
- args = append(args, "-lock="+strconv.FormatBool(c.lock))
- if c.allowMissing {
- args = append(args, "-allow-missing")
- }
- args = append(args, address)
-
- return tf.buildTerraformCmd(ctx, nil, args...)
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go
deleted file mode 100644
index 628b733d..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/terraform.go
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "fmt"
- "io"
- "io/ioutil"
- "log"
- "os"
- "sync"
-
- "github.com/hashicorp/go-version"
-)
-
-type printfer interface {
- Printf(format string, v ...interface{})
-}
-
-// Terraform represents the Terraform CLI executable and working directory.
-//
-// Typically this is constructed against the root module of a Terraform configuration
-// but you can override paths used in some commands depending on the available
-// options.
-//
-// All functions that execute CLI commands take a context.Context. It should be noted that
-// exec.Cmd.Run will not return context.DeadlineExceeded or context.Canceled by default, we
-// have augmented our wrapped errors to respond true to errors.Is for context.DeadlineExceeded
-// and context.Canceled if those are present on the context when the error is parsed. See
-// https://github.com/golang/go/issues/21880 for more about the Go limitations.
-//
-// By default, the instance inherits the environment from the calling code (using os.Environ)
-// but it ignores certain environment variables that are managed within the code and prohibits
-// setting them through SetEnv:
-//
-// - TF_APPEND_USER_AGENT
-// - TF_IN_AUTOMATION
-// - TF_INPUT
-// - TF_LOG
-// - TF_LOG_PATH
-// - TF_REATTACH_PROVIDERS
-// - TF_DISABLE_PLUGIN_TLS
-// - TF_SKIP_PROVIDER_VERIFY
-type Terraform struct {
- execPath string
- workingDir string
- appendUserAgent string
- disablePluginTLS bool
- skipProviderVerify bool
- env map[string]string
-
- stdout io.Writer
- stderr io.Writer
- logger printfer
-
- // TF_LOG environment variable, defaults to TRACE if logPath is set.
- log string
-
- // TF_LOG_CORE environment variable
- logCore string
-
- // TF_LOG_PATH environment variable
- logPath string
-
- // TF_LOG_PROVIDER environment variable
- logProvider string
-
- versionLock sync.Mutex
- execVersion *version.Version
- provVersions map[string]*version.Version
-}
-
-// NewTerraform returns a Terraform struct with default values for all fields.
-// If a blank execPath is supplied, NewTerraform will error.
-// Use hc-install or output from os.LookPath to get a desirable execPath.
-func NewTerraform(workingDir string, execPath string) (*Terraform, error) {
- if workingDir == "" {
- return nil, fmt.Errorf("Terraform cannot be initialised with empty workdir")
- }
-
- if _, err := os.Stat(workingDir); err != nil {
- return nil, fmt.Errorf("error initialising Terraform with workdir %s: %s", workingDir, err)
- }
-
- if execPath == "" {
- err := fmt.Errorf("NewTerraform: please supply the path to a Terraform executable using execPath, e.g. using the github.com/hashicorp/hc-install module.")
- return nil, &ErrNoSuitableBinary{
- err: err,
- }
- }
- tf := Terraform{
- execPath: execPath,
- workingDir: workingDir,
- env: nil, // explicit nil means copy os.Environ
- logger: log.New(ioutil.Discard, "", 0),
- }
-
- return &tf, nil
-}
-
-// SetEnv allows you to override environment variables, this should not be used for any well known
-// Terraform environment variables that are already covered in options. Pass nil to copy the values
-// from os.Environ. Attempting to set environment variables that should be managed manually will
-// result in ErrManualEnvVar being returned.
-func (tf *Terraform) SetEnv(env map[string]string) error {
- prohibited := ProhibitedEnv(env)
- if len(prohibited) > 0 {
- // just error on the first instance
- return &ErrManualEnvVar{prohibited[0]}
- }
-
- tf.env = env
- return nil
-}
-
-// SetLogger specifies a logger for tfexec to use.
-func (tf *Terraform) SetLogger(logger printfer) {
- tf.logger = logger
-}
-
-// SetStdout specifies a writer to stream stdout to for every command.
-//
-// This should be used for information or logging purposes only, not control
-// flow. Any parsing necessary should be added as functionality to this package.
-func (tf *Terraform) SetStdout(w io.Writer) {
- tf.stdout = w
-}
-
-// SetStderr specifies a writer to stream stderr to for every command.
-//
-// This should be used for information or logging purposes only, not control
-// flow. Any parsing necessary should be added as functionality to this package.
-func (tf *Terraform) SetStderr(w io.Writer) {
- tf.stderr = w
-}
-
-// SetLog sets the TF_LOG environment variable for Terraform CLI execution.
-// This must be combined with a call to SetLogPath to take effect.
-//
-// This is only compatible with Terraform CLI 0.15.0 or later as setting the
-// log level was unreliable in earlier versions. It will default to TRACE when
-// SetLogPath is called on versions 0.14.11 and earlier, or if SetLogCore and
-// SetLogProvider have not been called before SetLogPath on versions 0.15.0 and
-// later.
-func (tf *Terraform) SetLog(log string) error {
- err := tf.compatible(context.Background(), tf0_15_0, nil)
- if err != nil {
- return err
- }
- tf.log = log
- return nil
-}
-
-// SetLogCore sets the TF_LOG_CORE environment variable for Terraform CLI
-// execution. This must be combined with a call to SetLogPath to take effect.
-//
-// This is only compatible with Terraform CLI 0.15.0 or later.
-func (tf *Terraform) SetLogCore(logCore string) error {
- err := tf.compatible(context.Background(), tf0_15_0, nil)
- if err != nil {
- return err
- }
- tf.logCore = logCore
- return nil
-}
-
-// SetLogPath sets the TF_LOG_PATH environment variable for Terraform CLI
-// execution.
-func (tf *Terraform) SetLogPath(path string) error {
- tf.logPath = path
- // Prevent setting the log path without enabling logging
- if tf.log == "" && tf.logCore == "" && tf.logProvider == "" {
- tf.log = "TRACE"
- }
- return nil
-}
-
-// SetLogProvider sets the TF_LOG_PROVIDER environment variable for Terraform
-// CLI execution. This must be combined with a call to SetLogPath to take
-// effect.
-//
-// This is only compatible with Terraform CLI 0.15.0 or later.
-func (tf *Terraform) SetLogProvider(logProvider string) error {
- err := tf.compatible(context.Background(), tf0_15_0, nil)
- if err != nil {
- return err
- }
- tf.logProvider = logProvider
- return nil
-}
-
-// SetAppendUserAgent sets the TF_APPEND_USER_AGENT environment variable for
-// Terraform CLI execution.
-func (tf *Terraform) SetAppendUserAgent(ua string) error {
- tf.appendUserAgent = ua
- return nil
-}
-
-// SetDisablePluginTLS sets the TF_DISABLE_PLUGIN_TLS environment variable for
-// Terraform CLI execution.
-func (tf *Terraform) SetDisablePluginTLS(disabled bool) error {
- tf.disablePluginTLS = disabled
- return nil
-}
-
-// SetSkipProviderVerify sets the TF_SKIP_PROVIDER_VERIFY environment variable
-// for Terraform CLI execution. This is no longer used in 0.13.0 and greater.
-func (tf *Terraform) SetSkipProviderVerify(skip bool) error {
- err := tf.compatible(context.Background(), nil, tf0_13_0)
- if err != nil {
- return err
- }
- tf.skipProviderVerify = skip
- return nil
-}
-
-// WorkingDir returns the working directory for Terraform.
-func (tf *Terraform) WorkingDir() string {
- return tf.workingDir
-}
-
-// ExecPath returns the path to the Terraform executable.
-func (tf *Terraform) ExecPath() string {
- return tf.execPath
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/test.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/test.go
deleted file mode 100644
index 5e0bb635..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/test.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "fmt"
- "io"
- "os/exec"
-)
-
-type testConfig struct {
- testsDirectory string
-}
-
-var defaultTestOptions = testConfig{}
-
-type TestOption interface {
- configureTest(*testConfig)
-}
-
-func (opt *TestsDirectoryOption) configureTest(conf *testConfig) {
- conf.testsDirectory = opt.testsDirectory
-}
-
-// Test represents the terraform test -json subcommand.
-//
-// The given io.Writer, if specified, will receive
-// [machine-readable](https://developer.hashicorp.com/terraform/internals/machine-readable-ui)
-// JSON from Terraform including test results.
-func (tf *Terraform) Test(ctx context.Context, w io.Writer, opts ...TestOption) error {
- err := tf.compatible(ctx, tf1_6_0, nil)
-
- if err != nil {
- return fmt.Errorf("terraform test was added in 1.6.0: %w", err)
- }
-
- tf.SetStdout(w)
-
- testCmd := tf.testCmd(ctx)
-
- err = tf.runTerraformCmd(ctx, testCmd)
-
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (tf *Terraform) testCmd(ctx context.Context, opts ...TestOption) *exec.Cmd {
- c := defaultTestOptions
-
- for _, o := range opts {
- o.configureTest(&c)
- }
-
- args := []string{"test", "-json"}
-
- if c.testsDirectory != "" {
- args = append(args, "-tests-directory="+c.testsDirectory)
- }
-
- return tf.buildTerraformCmd(ctx, nil, args...)
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/untaint.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/untaint.go
deleted file mode 100644
index 5f0bf350..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/untaint.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "fmt"
- "os/exec"
- "strconv"
-)
-
-type untaintConfig struct {
- state string
- allowMissing bool
- lock bool
- lockTimeout string
-}
-
-var defaultUntaintOptions = untaintConfig{
- allowMissing: false,
- lock: true,
-}
-
-// OutputOption represents options used in the Output method.
-type UntaintOption interface {
- configureUntaint(*untaintConfig)
-}
-
-func (opt *StateOption) configureUntaint(conf *untaintConfig) {
- conf.state = opt.path
-}
-
-func (opt *AllowMissingOption) configureUntaint(conf *untaintConfig) {
- conf.allowMissing = opt.allowMissing
-}
-
-func (opt *LockOption) configureUntaint(conf *untaintConfig) {
- conf.lock = opt.lock
-}
-
-func (opt *LockTimeoutOption) configureUntaint(conf *untaintConfig) {
- conf.lockTimeout = opt.timeout
-}
-
-// Untaint represents the terraform untaint subcommand.
-func (tf *Terraform) Untaint(ctx context.Context, address string, opts ...UntaintOption) error {
- err := tf.compatible(ctx, tf0_6_13, nil)
- if err != nil {
- return fmt.Errorf("untaint was first introduced in Terraform 0.6.13: %w", err)
- }
- untaintCmd := tf.untaintCmd(ctx, address, opts...)
- return tf.runTerraformCmd(ctx, untaintCmd)
-}
-
-func (tf *Terraform) untaintCmd(ctx context.Context, address string, opts ...UntaintOption) *exec.Cmd {
- c := defaultUntaintOptions
-
- for _, o := range opts {
- o.configureUntaint(&c)
- }
-
- args := []string{"untaint", "-no-color"}
-
- if c.lockTimeout != "" {
- args = append(args, "-lock-timeout="+c.lockTimeout)
- }
-
- // string opts: only pass if set
- if c.state != "" {
- args = append(args, "-state="+c.state)
- }
-
- args = append(args, "-lock="+strconv.FormatBool(c.lock))
- if c.allowMissing {
- args = append(args, "-allow-missing")
- }
- args = append(args, address)
-
- return tf.buildTerraformCmd(ctx, nil, args...)
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade012.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade012.go
deleted file mode 100644
index 34a2c87d..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade012.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "fmt"
- "os/exec"
-)
-
-type upgrade012Config struct {
- dir string
- force bool
-
- reattachInfo ReattachInfo
-}
-
-var defaultUpgrade012Options = upgrade012Config{
- force: false,
-}
-
-// Upgrade012Option represents options used in the Destroy method.
-type Upgrade012Option interface {
- configureUpgrade012(*upgrade012Config)
-}
-
-func (opt *DirOption) configureUpgrade012(conf *upgrade012Config) {
- conf.dir = opt.path
-}
-
-func (opt *ForceOption) configureUpgrade012(conf *upgrade012Config) {
- conf.force = opt.force
-}
-
-func (opt *ReattachOption) configureUpgrade012(conf *upgrade012Config) {
- conf.reattachInfo = opt.info
-}
-
-// Upgrade012 represents the terraform 0.12upgrade subcommand.
-func (tf *Terraform) Upgrade012(ctx context.Context, opts ...Upgrade012Option) error {
- cmd, err := tf.upgrade012Cmd(ctx, opts...)
- if err != nil {
- return err
- }
- return tf.runTerraformCmd(ctx, cmd)
-}
-
-func (tf *Terraform) upgrade012Cmd(ctx context.Context, opts ...Upgrade012Option) (*exec.Cmd, error) {
- err := tf.compatible(ctx, tf0_12_0, tf0_13_0)
- if err != nil {
- return nil, fmt.Errorf("terraform 0.12upgrade is only supported in 0.12 releases: %w", err)
- }
-
- c := defaultUpgrade012Options
-
- for _, o := range opts {
- o.configureUpgrade012(&c)
- }
-
- args := []string{"0.12upgrade", "-no-color", "-yes"}
-
- // boolean opts: only pass if set
- if c.force {
- args = append(args, "-force")
- }
-
- // optional positional argument
- if c.dir != "" {
- args = append(args, c.dir)
- }
-
- mergeEnv := map[string]string{}
- if c.reattachInfo != nil {
- reattachStr, err := c.reattachInfo.marshalString()
- if err != nil {
- return nil, err
- }
- mergeEnv[reattachEnvVar] = reattachStr
- }
-
- return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade013.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade013.go
deleted file mode 100644
index 98dc4590..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/upgrade013.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "fmt"
- "os/exec"
-)
-
-type upgrade013Config struct {
- dir string
-
- reattachInfo ReattachInfo
-}
-
-var defaultUpgrade013Options = upgrade013Config{}
-
-// Upgrade013Option represents options used in the Destroy method.
-type Upgrade013Option interface {
- configureUpgrade013(*upgrade013Config)
-}
-
-func (opt *DirOption) configureUpgrade013(conf *upgrade013Config) {
- conf.dir = opt.path
-}
-
-func (opt *ReattachOption) configureUpgrade013(conf *upgrade013Config) {
- conf.reattachInfo = opt.info
-}
-
-// Upgrade013 represents the terraform 0.13upgrade subcommand.
-func (tf *Terraform) Upgrade013(ctx context.Context, opts ...Upgrade013Option) error {
- cmd, err := tf.upgrade013Cmd(ctx, opts...)
- if err != nil {
- return err
- }
- return tf.runTerraformCmd(ctx, cmd)
-}
-
-func (tf *Terraform) upgrade013Cmd(ctx context.Context, opts ...Upgrade013Option) (*exec.Cmd, error) {
- err := tf.compatible(ctx, tf0_13_0, tf0_14_0)
- if err != nil {
- return nil, fmt.Errorf("terraform 0.13upgrade is only supported in 0.13 releases: %w", err)
- }
-
- c := defaultUpgrade013Options
-
- for _, o := range opts {
- o.configureUpgrade013(&c)
- }
-
- args := []string{"0.13upgrade", "-no-color", "-yes"}
-
- // optional positional argument
- if c.dir != "" {
- args = append(args, c.dir)
- }
-
- mergeEnv := map[string]string{}
- if c.reattachInfo != nil {
- reattachStr, err := c.reattachInfo.marshalString()
- if err != nil {
- return nil, err
- }
- mergeEnv[reattachEnvVar] = reattachStr
- }
-
- return tf.buildTerraformCmd(ctx, mergeEnv, args...), nil
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/validate.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/validate.go
deleted file mode 100644
index d995d375..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/validate.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
-
- tfjson "github.com/hashicorp/terraform-json"
-)
-
-// Validate represents the validate subcommand to the Terraform CLI. The -json
-// flag support was added in 0.12.0, so this will not work on earlier versions.
-func (tf *Terraform) Validate(ctx context.Context) (*tfjson.ValidateOutput, error) {
- err := tf.compatible(ctx, tf0_12_0, nil)
- if err != nil {
- return nil, fmt.Errorf("terraform validate -json was added in 0.12.0: %w", err)
- }
-
- cmd := tf.buildTerraformCmd(ctx, nil, "validate", "-no-color", "-json")
-
- var outBuf = bytes.Buffer{}
- cmd.Stdout = &outBuf
-
- err = tf.runTerraformCmd(ctx, cmd)
- // TODO: this command should not exit 1 if you pass -json as its hard to differentiate other errors
- if err != nil && cmd.ProcessState.ExitCode() != 1 {
- return nil, err
- }
-
- var ret tfjson.ValidateOutput
- // TODO: ret.UseJSONNumber(true) validate output should support JSON numbers
- jsonErr := json.Unmarshal(outBuf.Bytes(), &ret)
- if jsonErr != nil {
- // the original call was possibly bad, if it has an error, actually just return that
- if err != nil {
- return nil, err
- }
-
- return nil, jsonErr
- }
-
- return &ret, nil
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go
deleted file mode 100644
index 87addd1e..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/version.go
+++ /dev/null
@@ -1,232 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "regexp"
- "strconv"
- "strings"
-
- "github.com/hashicorp/go-version"
- tfjson "github.com/hashicorp/terraform-json"
-)
-
-var (
- tf0_4_1 = version.Must(version.NewVersion("0.4.1"))
- tf0_5_0 = version.Must(version.NewVersion("0.5.0"))
- tf0_6_13 = version.Must(version.NewVersion("0.6.13"))
- tf0_7_7 = version.Must(version.NewVersion("0.7.7"))
- tf0_8_0 = version.Must(version.NewVersion("0.8.0"))
- tf0_10_0 = version.Must(version.NewVersion("0.10.0"))
- tf0_12_0 = version.Must(version.NewVersion("0.12.0"))
- tf0_13_0 = version.Must(version.NewVersion("0.13.0"))
- tf0_14_0 = version.Must(version.NewVersion("0.14.0"))
- tf0_15_0 = version.Must(version.NewVersion("0.15.0"))
- tf0_15_2 = version.Must(version.NewVersion("0.15.2"))
- tf0_15_3 = version.Must(version.NewVersion("0.15.3"))
- tf0_15_4 = version.Must(version.NewVersion("0.15.4"))
- tf1_1_0 = version.Must(version.NewVersion("1.1.0"))
- tf1_4_0 = version.Must(version.NewVersion("1.4.0"))
- tf1_6_0 = version.Must(version.NewVersion("1.6.0"))
- tf1_9_0 = version.Must(version.NewVersion("1.9.0"))
-)
-
-// Version returns structured output from the terraform version command including both the Terraform CLI version
-// and any initialized provider versions. This will read cached values when present unless the skipCache parameter
-// is set to true.
-func (tf *Terraform) Version(ctx context.Context, skipCache bool) (tfVersion *version.Version, providerVersions map[string]*version.Version, err error) {
- tf.versionLock.Lock()
- defer tf.versionLock.Unlock()
-
- if tf.execVersion == nil || skipCache {
- tf.execVersion, tf.provVersions, err = tf.version(ctx)
- if err != nil {
- return nil, nil, err
- }
- }
-
- return tf.execVersion, tf.provVersions, nil
-}
-
-// version does not use the locking on the Terraform instance and should probably not be used directly, prefer Version.
-func (tf *Terraform) version(ctx context.Context) (*version.Version, map[string]*version.Version, error) {
- versionCmd := tf.buildTerraformCmd(ctx, nil, "version", "-json")
-
- var outBuf bytes.Buffer
- versionCmd.Stdout = &outBuf
-
- err := tf.runTerraformCmd(ctx, versionCmd)
- if err != nil {
- return nil, nil, err
- }
-
- tfVersion, providerVersions, err := parseJsonVersionOutput(outBuf.Bytes())
- if err != nil {
- if _, ok := err.(*json.SyntaxError); ok {
- return tf.versionFromPlaintext(ctx)
- }
- }
-
- return tfVersion, providerVersions, err
-}
-
-func parseJsonVersionOutput(stdout []byte) (*version.Version, map[string]*version.Version, error) {
- var out tfjson.VersionOutput
- err := json.Unmarshal(stdout, &out)
- if err != nil {
- return nil, nil, err
- }
-
- tfVersion, err := version.NewVersion(out.Version)
- if err != nil {
- return nil, nil, fmt.Errorf("unable to parse version %q: %w", out.Version, err)
- }
-
- providerVersions := make(map[string]*version.Version, 0)
- for provider, versionStr := range out.ProviderSelections {
- v, err := version.NewVersion(versionStr)
- if err != nil {
- return nil, nil, fmt.Errorf("unable to parse %q version %q: %w",
- provider, versionStr, err)
- }
- providerVersions[provider] = v
- }
-
- return tfVersion, providerVersions, nil
-}
-
-func (tf *Terraform) versionFromPlaintext(ctx context.Context) (*version.Version, map[string]*version.Version, error) {
- versionCmd := tf.buildTerraformCmd(ctx, nil, "version")
-
- var outBuf strings.Builder
- versionCmd.Stdout = &outBuf
-
- err := tf.runTerraformCmd(ctx, versionCmd)
- if err != nil {
- return nil, nil, err
- }
-
- tfVersion, providerVersions, err := parsePlaintextVersionOutput(outBuf.String())
- if err != nil {
- return nil, nil, fmt.Errorf("unable to parse version: %w", err)
- }
-
- return tfVersion, providerVersions, nil
-}
-
-var (
- simpleVersionRe = `v?(?P[0-9]+(?:\.[0-9]+)*(?:-[A-Za-z0-9\.]+)?)`
-
- versionOutputRe = regexp.MustCompile(`Terraform ` + simpleVersionRe)
- providerVersionOutputRe = regexp.MustCompile(`(\n\+ provider[\. ](?P\S+) ` + simpleVersionRe + `)`)
-)
-
-func parsePlaintextVersionOutput(stdout string) (*version.Version, map[string]*version.Version, error) {
- stdout = strings.TrimSpace(stdout)
-
- submatches := versionOutputRe.FindStringSubmatch(stdout)
- if len(submatches) != 2 {
- return nil, nil, fmt.Errorf("unexpected number of version matches %d for %s", len(submatches), stdout)
- }
- v, err := version.NewVersion(submatches[1])
- if err != nil {
- return nil, nil, fmt.Errorf("unable to parse version %q: %w", submatches[1], err)
- }
-
- allSubmatches := providerVersionOutputRe.FindAllStringSubmatch(stdout, -1)
- provV := map[string]*version.Version{}
-
- for _, submatches := range allSubmatches {
- if len(submatches) != 4 {
- return nil, nil, fmt.Errorf("unexpected number of provider version matches %d for %s", len(submatches), stdout)
- }
-
- v, err := version.NewVersion(submatches[3])
- if err != nil {
- return nil, nil, fmt.Errorf("unable to parse provider version %q: %w", submatches[3], err)
- }
-
- provV[submatches[2]] = v
- }
-
- return v, provV, err
-}
-
-func errorVersionString(v *version.Version) string {
- if v == nil {
- return "-"
- }
- return v.String()
-}
-
-// compatible asserts compatibility of the cached terraform version with the executable, and returns a well known error if not.
-func (tf *Terraform) compatible(ctx context.Context, minInclusive *version.Version, maxExclusive *version.Version) error {
- tfv, _, err := tf.Version(ctx, false)
- if err != nil {
- return err
- }
- if ok := versionInRange(tfv, minInclusive, maxExclusive); !ok {
- return &ErrVersionMismatch{
- MinInclusive: errorVersionString(minInclusive),
- MaxExclusive: errorVersionString(maxExclusive),
- Actual: errorVersionString(tfv),
- }
- }
-
- return nil
-}
-
-// experimentsEnabled asserts the cached terraform version has experiments enabled in the executable,
-// and returns a well known error if not. Experiments are enabled in alpha and (potentially) dev builds of Terraform.
-func (tf *Terraform) experimentsEnabled(ctx context.Context) error {
- tfv, _, err := tf.Version(ctx, false)
- if err != nil {
- return err
- }
-
- preRelease := tfv.Prerelease()
- if preRelease == "dev" || strings.Contains(preRelease, "alpha") {
- return nil
- }
-
- return fmt.Errorf("experiments are not enabled in version %s, as it's not an alpha or dev build", errorVersionString(tfv))
-}
-
-func stripPrereleaseAndMeta(v *version.Version) *version.Version {
- if v == nil {
- return nil
- }
- segs := []string{}
- for _, s := range v.Segments() {
- segs = append(segs, strconv.Itoa(s))
- }
- vs := strings.Join(segs, ".")
- clean, _ := version.NewVersion(vs)
- return clean
-}
-
-// versionInRange checks compatibility of the Terraform version. The minimum is inclusive and the max
-// is exclusive, equivalent to min <= expected version < max.
-//
-// Pre-release information is ignored for comparison.
-func versionInRange(tfv *version.Version, minInclusive *version.Version, maxExclusive *version.Version) bool {
- if minInclusive == nil && maxExclusive == nil {
- return true
- }
- tfv = stripPrereleaseAndMeta(tfv)
- minInclusive = stripPrereleaseAndMeta(minInclusive)
- maxExclusive = stripPrereleaseAndMeta(maxExclusive)
- if minInclusive != nil && !tfv.GreaterThanOrEqual(minInclusive) {
- return false
- }
- if maxExclusive != nil && !tfv.LessThan(maxExclusive) {
- return false
- }
-
- return true
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_delete.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_delete.go
deleted file mode 100644
index f2a17e65..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_delete.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "fmt"
- "os/exec"
- "strconv"
-)
-
-type workspaceDeleteConfig struct {
- lock bool
- lockTimeout string
- force bool
-}
-
-var defaultWorkspaceDeleteOptions = workspaceDeleteConfig{
- lock: true,
- lockTimeout: "0s",
-}
-
-// WorkspaceDeleteCmdOption represents options that are applicable to the WorkspaceDelete method.
-type WorkspaceDeleteCmdOption interface {
- configureWorkspaceDelete(*workspaceDeleteConfig)
-}
-
-func (opt *LockOption) configureWorkspaceDelete(conf *workspaceDeleteConfig) {
- conf.lock = opt.lock
-}
-
-func (opt *LockTimeoutOption) configureWorkspaceDelete(conf *workspaceDeleteConfig) {
- conf.lockTimeout = opt.timeout
-}
-
-func (opt *ForceOption) configureWorkspaceDelete(conf *workspaceDeleteConfig) {
- conf.force = opt.force
-}
-
-// WorkspaceDelete represents the workspace delete subcommand to the Terraform CLI.
-func (tf *Terraform) WorkspaceDelete(ctx context.Context, workspace string, opts ...WorkspaceDeleteCmdOption) error {
- cmd, err := tf.workspaceDeleteCmd(ctx, workspace, opts...)
- if err != nil {
- return err
- }
- return tf.runTerraformCmd(ctx, cmd)
-}
-
-func (tf *Terraform) workspaceDeleteCmd(ctx context.Context, workspace string, opts ...WorkspaceDeleteCmdOption) (*exec.Cmd, error) {
- c := defaultWorkspaceDeleteOptions
-
- for _, o := range opts {
- switch o.(type) {
- case *LockOption, *LockTimeoutOption:
- err := tf.compatible(ctx, tf0_12_0, nil)
- if err != nil {
- return nil, fmt.Errorf("-lock and -lock-timeout were added to workspace delete in Terraform 0.12: %w", err)
- }
- }
-
- o.configureWorkspaceDelete(&c)
- }
-
- args := []string{"workspace", "delete", "-no-color"}
-
- if c.force {
- args = append(args, "-force")
- }
- if c.lockTimeout != "" && c.lockTimeout != defaultWorkspaceDeleteOptions.lockTimeout {
- // only pass if not default, so we don't need to worry about the 0.11 version check
- args = append(args, "-lock-timeout="+c.lockTimeout)
- }
- if !c.lock {
- // only pass if false, so we don't need to worry about the 0.11 version check
- args = append(args, "-lock="+strconv.FormatBool(c.lock))
- }
-
- args = append(args, workspace)
-
- cmd := tf.buildTerraformCmd(ctx, nil, args...)
-
- return cmd, nil
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_list.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_list.go
deleted file mode 100644
index 1b4bec37..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_list.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "strings"
-)
-
-// WorkspaceList represents the workspace list subcommand to the Terraform CLI.
-func (tf *Terraform) WorkspaceList(ctx context.Context) ([]string, string, error) {
- // TODO: [DIR] param option
- wlCmd := tf.buildTerraformCmd(ctx, nil, "workspace", "list", "-no-color")
-
- var outBuf strings.Builder
- wlCmd.Stdout = &outBuf
-
- err := tf.runTerraformCmd(ctx, wlCmd)
- if err != nil {
- return nil, "", err
- }
-
- ws, current := parseWorkspaceList(outBuf.String())
-
- return ws, current, nil
-}
-
-const currentWorkspacePrefix = "* "
-
-func parseWorkspaceList(stdout string) ([]string, string) {
- lines := strings.Split(stdout, "\n")
-
- current := ""
- workspaces := []string{}
- for _, line := range lines {
- line = strings.TrimSpace(line)
- if line == "" {
- continue
- }
- if strings.HasPrefix(line, currentWorkspacePrefix) {
- line = strings.TrimPrefix(line, currentWorkspacePrefix)
- current = line
- }
- workspaces = append(workspaces, line)
- }
-
- return workspaces, current
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_new.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_new.go
deleted file mode 100644
index 921a1187..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_new.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "fmt"
- "os/exec"
- "strconv"
-)
-
-type workspaceNewConfig struct {
- lock bool
- lockTimeout string
- copyState string
-}
-
-var defaultWorkspaceNewOptions = workspaceNewConfig{
- lock: true,
- lockTimeout: "0s",
-}
-
-// WorkspaceNewCmdOption represents options that are applicable to the WorkspaceNew method.
-type WorkspaceNewCmdOption interface {
- configureWorkspaceNew(*workspaceNewConfig)
-}
-
-func (opt *LockOption) configureWorkspaceNew(conf *workspaceNewConfig) {
- conf.lock = opt.lock
-}
-
-func (opt *LockTimeoutOption) configureWorkspaceNew(conf *workspaceNewConfig) {
- conf.lockTimeout = opt.timeout
-}
-
-func (opt *CopyStateOption) configureWorkspaceNew(conf *workspaceNewConfig) {
- conf.copyState = opt.path
-}
-
-// WorkspaceNew represents the workspace new subcommand to the Terraform CLI.
-func (tf *Terraform) WorkspaceNew(ctx context.Context, workspace string, opts ...WorkspaceNewCmdOption) error {
- cmd, err := tf.workspaceNewCmd(ctx, workspace, opts...)
- if err != nil {
- return err
- }
- return tf.runTerraformCmd(ctx, cmd)
-}
-
-func (tf *Terraform) workspaceNewCmd(ctx context.Context, workspace string, opts ...WorkspaceNewCmdOption) (*exec.Cmd, error) {
- // TODO: [DIR] param option
-
- c := defaultWorkspaceNewOptions
-
- for _, o := range opts {
- switch o.(type) {
- case *LockOption, *LockTimeoutOption:
- err := tf.compatible(ctx, tf0_12_0, nil)
- if err != nil {
- return nil, fmt.Errorf("-lock and -lock-timeout were added to workspace new in Terraform 0.12: %w", err)
- }
- }
-
- o.configureWorkspaceNew(&c)
- }
-
- args := []string{"workspace", "new", "-no-color"}
-
- if c.lockTimeout != "" && c.lockTimeout != defaultWorkspaceNewOptions.lockTimeout {
- // only pass if not default, so we don't need to worry about the 0.11 version check
- args = append(args, "-lock-timeout="+c.lockTimeout)
- }
- if !c.lock {
- // only pass if false, so we don't need to worry about the 0.11 version check
- args = append(args, "-lock="+strconv.FormatBool(c.lock))
- }
- if c.copyState != "" {
- args = append(args, "-state="+c.copyState)
- }
-
- args = append(args, workspace)
-
- cmd := tf.buildTerraformCmd(ctx, nil, args...)
-
- return cmd, nil
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_select.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_select.go
deleted file mode 100644
index da88472a..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_select.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import "context"
-
-// WorkspaceSelect represents the workspace select subcommand to the Terraform CLI.
-func (tf *Terraform) WorkspaceSelect(ctx context.Context, workspace string) error {
- // TODO: [DIR] param option
-
- return tf.runTerraformCmd(ctx, tf.buildTerraformCmd(ctx, nil, "workspace", "select", "-no-color", workspace))
-}
diff --git a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_show.go b/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_show.go
deleted file mode 100644
index 840eff9a..00000000
--- a/vendor/github.com/hashicorp/terraform-exec/tfexec/workspace_show.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfexec
-
-import (
- "context"
- "fmt"
- "os/exec"
- "strings"
-)
-
-// WorkspaceShow represents the workspace show subcommand to the Terraform CLI.
-func (tf *Terraform) WorkspaceShow(ctx context.Context) (string, error) {
- workspaceShowCmd, err := tf.workspaceShowCmd(ctx)
- if err != nil {
- return "", err
- }
-
- var outBuffer strings.Builder
- workspaceShowCmd.Stdout = &outBuffer
-
- err = tf.runTerraformCmd(ctx, workspaceShowCmd)
- if err != nil {
- return "", err
- }
-
- return strings.TrimSpace(outBuffer.String()), nil
-}
-
-func (tf *Terraform) workspaceShowCmd(ctx context.Context) (*exec.Cmd, error) {
- err := tf.compatible(ctx, tf0_10_0, nil)
- if err != nil {
- return nil, fmt.Errorf("workspace show was first introduced in Terraform 0.10.0: %w", err)
- }
-
- return tf.buildTerraformCmd(ctx, nil, "workspace", "show", "-no-color"), nil
-}
diff --git a/vendor/github.com/hashicorp/terraform-json/.copywrite.hcl b/vendor/github.com/hashicorp/terraform-json/.copywrite.hcl
deleted file mode 100644
index ada7d74a..00000000
--- a/vendor/github.com/hashicorp/terraform-json/.copywrite.hcl
+++ /dev/null
@@ -1,13 +0,0 @@
-schema_version = 1
-
-project {
- license = "MPL-2.0"
- copyright_year = 2019
-
- # (OPTIONAL) A list of globs that should not have copyright/license headers.
- # Supports doublestar glob patterns for more flexibility in defining which
- # files or folders should be ignored
- header_ignore = [
- "testdata/**",
- ]
-}
diff --git a/vendor/github.com/hashicorp/terraform-json/.gitignore b/vendor/github.com/hashicorp/terraform-json/.gitignore
deleted file mode 100644
index 15b499b9..00000000
--- a/vendor/github.com/hashicorp/terraform-json/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
-.terraform
-plan.tfplan
-terraform.tfstate.backup
diff --git a/vendor/github.com/hashicorp/terraform-json/.go-version b/vendor/github.com/hashicorp/terraform-json/.go-version
deleted file mode 100644
index 5fb5a6b4..00000000
--- a/vendor/github.com/hashicorp/terraform-json/.go-version
+++ /dev/null
@@ -1 +0,0 @@
-1.20
diff --git a/vendor/github.com/hashicorp/terraform-json/CODEOWNERS b/vendor/github.com/hashicorp/terraform-json/CODEOWNERS
deleted file mode 100644
index a99f162a..00000000
--- a/vendor/github.com/hashicorp/terraform-json/CODEOWNERS
+++ /dev/null
@@ -1,2 +0,0 @@
-# This codebase has shared ownership and responsibility.
-* @hashicorp/terraform-core @hashicorp/terraform-devex @hashicorp/tf-editor-experience-engineers
diff --git a/vendor/github.com/hashicorp/terraform-json/LICENSE b/vendor/github.com/hashicorp/terraform-json/LICENSE
deleted file mode 100644
index 3b97eaf3..00000000
--- a/vendor/github.com/hashicorp/terraform-json/LICENSE
+++ /dev/null
@@ -1,375 +0,0 @@
-Copyright (c) 2019 HashiCorp, Inc.
-
-Mozilla Public License Version 2.0
-==================================
-
-1. Definitions
---------------
-
-1.1. "Contributor"
- means each individual or legal entity that creates, contributes to
- the creation of, or owns Covered Software.
-
-1.2. "Contributor Version"
- means the combination of the Contributions of others (if any) used
- by a Contributor and that particular Contributor's Contribution.
-
-1.3. "Contribution"
- means Covered Software of a particular Contributor.
-
-1.4. "Covered Software"
- means Source Code Form to which the initial Contributor has attached
- the notice in Exhibit A, the Executable Form of such Source Code
- Form, and Modifications of such Source Code Form, in each case
- including portions thereof.
-
-1.5. "Incompatible With Secondary Licenses"
- means
-
- (a) that the initial Contributor has attached the notice described
- in Exhibit B to the Covered Software; or
-
- (b) that the Covered Software was made available under the terms of
- version 1.1 or earlier of the License, but not also under the
- terms of a Secondary License.
-
-1.6. "Executable Form"
- means any form of the work other than Source Code Form.
-
-1.7. "Larger Work"
- means a work that combines Covered Software with other material, in
- a separate file or files, that is not Covered Software.
-
-1.8. "License"
- means this document.
-
-1.9. "Licensable"
- means having the right to grant, to the maximum extent possible,
- whether at the time of the initial grant or subsequently, any and
- all of the rights conveyed by this License.
-
-1.10. "Modifications"
- means any of the following:
-
- (a) any file in Source Code Form that results from an addition to,
- deletion from, or modification of the contents of Covered
- Software; or
-
- (b) any new file in Source Code Form that contains any Covered
- Software.
-
-1.11. "Patent Claims" of a Contributor
- means any patent claim(s), including without limitation, method,
- process, and apparatus claims, in any patent Licensable by such
- Contributor that would be infringed, but for the grant of the
- License, by the making, using, selling, offering for sale, having
- made, import, or transfer of either its Contributions or its
- Contributor Version.
-
-1.12. "Secondary License"
- means either the GNU General Public License, Version 2.0, the GNU
- Lesser General Public License, Version 2.1, the GNU Affero General
- Public License, Version 3.0, or any later versions of those
- licenses.
-
-1.13. "Source Code Form"
- means the form of the work preferred for making modifications.
-
-1.14. "You" (or "Your")
- means an individual or a legal entity exercising rights under this
- License. For legal entities, "You" includes any entity that
- controls, is controlled by, or is under common control with You. For
- purposes of this definition, "control" means (a) the power, direct
- or indirect, to cause the direction or management of such entity,
- whether by contract or otherwise, or (b) ownership of more than
- fifty percent (50%) of the outstanding shares or beneficial
- ownership of such entity.
-
-2. License Grants and Conditions
---------------------------------
-
-2.1. Grants
-
-Each Contributor hereby grants You a world-wide, royalty-free,
-non-exclusive license:
-
-(a) under intellectual property rights (other than patent or trademark)
- Licensable by such Contributor to use, reproduce, make available,
- modify, display, perform, distribute, and otherwise exploit its
- Contributions, either on an unmodified basis, with Modifications, or
- as part of a Larger Work; and
-
-(b) under Patent Claims of such Contributor to make, use, sell, offer
- for sale, have made, import, and otherwise transfer either its
- Contributions or its Contributor Version.
-
-2.2. Effective Date
-
-The licenses granted in Section 2.1 with respect to any Contribution
-become effective for each Contribution on the date the Contributor first
-distributes such Contribution.
-
-2.3. Limitations on Grant Scope
-
-The licenses granted in this Section 2 are the only rights granted under
-this License. No additional rights or licenses will be implied from the
-distribution or licensing of Covered Software under this License.
-Notwithstanding Section 2.1(b) above, no patent license is granted by a
-Contributor:
-
-(a) for any code that a Contributor has removed from Covered Software;
- or
-
-(b) for infringements caused by: (i) Your and any other third party's
- modifications of Covered Software, or (ii) the combination of its
- Contributions with other software (except as part of its Contributor
- Version); or
-
-(c) under Patent Claims infringed by Covered Software in the absence of
- its Contributions.
-
-This License does not grant any rights in the trademarks, service marks,
-or logos of any Contributor (except as may be necessary to comply with
-the notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
-No Contributor makes additional grants as a result of Your choice to
-distribute the Covered Software under a subsequent version of this
-License (see Section 10.2) or under the terms of a Secondary License (if
-permitted under the terms of Section 3.3).
-
-2.5. Representation
-
-Each Contributor represents that the Contributor believes its
-Contributions are its original creation(s) or it has sufficient rights
-to grant the rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
-This License is not intended to limit any rights You have under
-applicable copyright doctrines of fair use, fair dealing, or other
-equivalents.
-
-2.7. Conditions
-
-Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
-in Section 2.1.
-
-3. Responsibilities
--------------------
-
-3.1. Distribution of Source Form
-
-All distribution of Covered Software in Source Code Form, including any
-Modifications that You create or to which You contribute, must be under
-the terms of this License. You must inform recipients that the Source
-Code Form of the Covered Software is governed by the terms of this
-License, and how they can obtain a copy of this License. You may not
-attempt to alter or restrict the recipients' rights in the Source Code
-Form.
-
-3.2. Distribution of Executable Form
-
-If You distribute Covered Software in Executable Form then:
-
-(a) such Covered Software must also be made available in Source Code
- Form, as described in Section 3.1, and You must inform recipients of
- the Executable Form how they can obtain a copy of such Source Code
- Form by reasonable means in a timely manner, at a charge no more
- than the cost of distribution to the recipient; and
-
-(b) You may distribute such Executable Form under the terms of this
- License, or sublicense it under different terms, provided that the
- license for the Executable Form does not attempt to limit or alter
- the recipients' rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
-You may create and distribute a Larger Work under terms of Your choice,
-provided that You also comply with the requirements of this License for
-the Covered Software. If the Larger Work is a combination of Covered
-Software with a work governed by one or more Secondary Licenses, and the
-Covered Software is not Incompatible With Secondary Licenses, this
-License permits You to additionally distribute such Covered Software
-under the terms of such Secondary License(s), so that the recipient of
-the Larger Work may, at their option, further distribute the Covered
-Software under the terms of either this License or such Secondary
-License(s).
-
-3.4. Notices
-
-You may not remove or alter the substance of any license notices
-(including copyright notices, patent notices, disclaimers of warranty,
-or limitations of liability) contained within the Source Code Form of
-the Covered Software, except that You may alter any license notices to
-the extent required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
-You may choose to offer, and to charge a fee for, warranty, support,
-indemnity or liability obligations to one or more recipients of Covered
-Software. However, You may do so only on Your own behalf, and not on
-behalf of any Contributor. You must make it absolutely clear that any
-such warranty, support, indemnity, or liability obligation is offered by
-You alone, and You hereby agree to indemnify every Contributor for any
-liability incurred by such Contributor as a result of warranty, support,
-indemnity or liability terms You offer. You may include additional
-disclaimers of warranty and limitations of liability specific to any
-jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
----------------------------------------------------
-
-If it is impossible for You to comply with any of the terms of this
-License with respect to some or all of the Covered Software due to
-statute, judicial order, or regulation then You must: (a) comply with
-the terms of this License to the maximum extent possible; and (b)
-describe the limitations and the code they affect. Such description must
-be placed in a text file included with all distributions of the Covered
-Software under this License. Except to the extent prohibited by statute
-or regulation, such description must be sufficiently detailed for a
-recipient of ordinary skill to be able to understand it.
-
-5. Termination
---------------
-
-5.1. The rights granted under this License will terminate automatically
-if You fail to comply with any of its terms. However, if You become
-compliant, then the rights granted under this License from a particular
-Contributor are reinstated (a) provisionally, unless and until such
-Contributor explicitly and finally terminates Your grants, and (b) on an
-ongoing basis, if such Contributor fails to notify You of the
-non-compliance by some reasonable means prior to 60 days after You have
-come back into compliance. Moreover, Your grants from a particular
-Contributor are reinstated on an ongoing basis if such Contributor
-notifies You of the non-compliance by some reasonable means, this is the
-first time You have received notice of non-compliance with this License
-from such Contributor, and You become compliant prior to 30 days after
-Your receipt of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
-infringement claim (excluding declaratory judgment actions,
-counter-claims, and cross-claims) alleging that a Contributor Version
-directly or indirectly infringes any patent, then the rights granted to
-You by any and all Contributors for the Covered Software under Section
-2.1 of this License shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all
-end user license agreements (excluding distributors and resellers) which
-have been validly granted by You or Your distributors under this License
-prior to termination shall survive termination.
-
-************************************************************************
-* *
-* 6. Disclaimer of Warranty *
-* ------------------------- *
-* *
-* Covered Software is provided under this License on an "as is" *
-* basis, without warranty of any kind, either expressed, implied, or *
-* statutory, including, without limitation, warranties that the *
-* Covered Software is free of defects, merchantable, fit for a *
-* particular purpose or non-infringing. The entire risk as to the *
-* quality and performance of the Covered Software is with You. *
-* Should any Covered Software prove defective in any respect, You *
-* (not any Contributor) assume the cost of any necessary servicing, *
-* repair, or correction. This disclaimer of warranty constitutes an *
-* essential part of this License. No use of any Covered Software is *
-* authorized under this License except under this disclaimer. *
-* *
-************************************************************************
-
-************************************************************************
-* *
-* 7. Limitation of Liability *
-* -------------------------- *
-* *
-* Under no circumstances and under no legal theory, whether tort *
-* (including negligence), contract, or otherwise, shall any *
-* Contributor, or anyone who distributes Covered Software as *
-* permitted above, be liable to You for any direct, indirect, *
-* special, incidental, or consequential damages of any character *
-* including, without limitation, damages for lost profits, loss of *
-* goodwill, work stoppage, computer failure or malfunction, or any *
-* and all other commercial damages or losses, even if such party *
-* shall have been informed of the possibility of such damages. This *
-* limitation of liability shall not apply to liability for death or *
-* personal injury resulting from such party's negligence to the *
-* extent applicable law prohibits such limitation. Some *
-* jurisdictions do not allow the exclusion or limitation of *
-* incidental or consequential damages, so this exclusion and *
-* limitation may not apply to You. *
-* *
-************************************************************************
-
-8. Litigation
--------------
-
-Any litigation relating to this License may be brought only in the
-courts of a jurisdiction where the defendant maintains its principal
-place of business and such litigation shall be governed by laws of that
-jurisdiction, without reference to its conflict-of-law provisions.
-Nothing in this Section shall prevent a party's ability to bring
-cross-claims or counter-claims.
-
-9. Miscellaneous
-----------------
-
-This License represents the complete agreement concerning the subject
-matter hereof. If any provision of this License is held to be
-unenforceable, such provision shall be reformed only to the extent
-necessary to make it enforceable. Any law or regulation which provides
-that the language of a contract shall be construed against the drafter
-shall not be used to construe this License against a Contributor.
-
-10. Versions of the License
----------------------------
-
-10.1. New Versions
-
-Mozilla Foundation is the license steward. Except as provided in Section
-10.3, no one other than the license steward has the right to modify or
-publish new versions of this License. Each version will be given a
-distinguishing version number.
-
-10.2. Effect of New Versions
-
-You may distribute the Covered Software under the terms of the version
-of the License under which You originally received the Covered Software,
-or under the terms of any subsequent version published by the license
-steward.
-
-10.3. Modified Versions
-
-If you create software not governed by this License, and you want to
-create a new license for such software, you may create and use a
-modified version of this License if you rename the license and remove
-any references to the name of the license steward (except to note that
-such modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary
-Licenses
-
-If You choose to distribute Source Code Form that is Incompatible With
-Secondary Licenses under the terms of this version of the License, the
-notice described in Exhibit B of this License must be attached.
-
-Exhibit A - Source Code Form License Notice
--------------------------------------------
-
- This Source Code Form is subject to the terms of the Mozilla Public
- License, v. 2.0. If a copy of the MPL was not distributed with this
- file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular
-file, then You may include the notice in a location (such as a LICENSE
-file in a relevant directory) where a recipient would be likely to look
-for such a notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - "Incompatible With Secondary Licenses" Notice
----------------------------------------------------------
-
- This Source Code Form is "Incompatible With Secondary Licenses", as
- defined by the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/terraform-json/Makefile b/vendor/github.com/hashicorp/terraform-json/Makefile
deleted file mode 100644
index bb93c7f9..00000000
--- a/vendor/github.com/hashicorp/terraform-json/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
-GOTOOLS = \
- gotest.tools/gotestsum@latest
-
-test: tools
- gotestsum --format=short-verbose $(TEST) $(TESTARGS)
-
-generate:
- cd testdata && make generate
-
-modules:
- go mod download && go mod verify
-
-test-circle:
- mkdir -p test-results/terraform-json
- gotestsum --format=short-verbose --junitfile test-results/terraform-json/results.xml
-
-tools:
- @echo $(GOTOOLS) | xargs -t -n1 go install
- go mod tidy
-
-.PHONY: test generate modules test-circle tools
diff --git a/vendor/github.com/hashicorp/terraform-json/README.md b/vendor/github.com/hashicorp/terraform-json/README.md
deleted file mode 100644
index 462c1a81..00000000
--- a/vendor/github.com/hashicorp/terraform-json/README.md
+++ /dev/null
@@ -1,64 +0,0 @@
-# terraform-json
-
-[](https://godoc.org/github.com/hashicorp/terraform-json)
-
-This repository houses data types designed to help parse the data produced by
-two [Terraform](https://www.terraform.io/) commands:
-
-* [`terraform show -json`](https://www.terraform.io/docs/commands/show.html#json-output)
-* [`terraform providers schema -json`](https://www.terraform.io/docs/commands/providers/schema.html#json)
-
-While containing mostly data types, there are also a few helpers to assist with
-working with the data.
-
-This repository also serves as de facto documentation for the formats produced
-by these commands. For more details, see the
-[GoDoc](https://godoc.org/github.com/hashicorp/terraform-json).
-
-## Should I use this library?
-
-This library was built for a few specific applications, and is not intended for
-general purpose use.
-
-The Terraform core team **recommends against** using `terraform-json` if your
-application has any of the following requirements:
-
-* **Forward-compatibility**: each version of this library represents a specific
- snapshot of the [Terraform JSON output format](https://developer.hashicorp.com/terraform/internals/json-format),
- and it often slightly lags behind Terraform itself. The library supports
- [the 1.x compatibility promises](https://developer.hashicorp.com/terraform/language/v1-compatibility-promises)
- but you will need to upgrade the version promptly to use new additions. If you
- require full compatibility with future Terraform versions, we recommend
- implementing your own custom decoders for the parts of the JSON format you need.
-* **Writing JSON output**: the structures in this library are not guaranteed to emit
- JSON data which is semantically equivalent to Terraform itself. If your application
- must robustly write JSON data to be consumed by systems which expect Terraform's
- format to be supported, you should implement your own custom encoders.
-* **Filtering or round-tripping**: the Terraform JSON formats are designed to be
- forwards compatible, and permit new attributes to be added which may safely be
- ignored by earlier versions of consumers. This library **drops unknown attributes**,
- which means it is unsuitable for any application which intends to filter data
- or read-modify-write data which will be consumed downstream. Any application doing
- this will silently drop new data from new versions. For this application, you should
- implement a custom decoder and encoder which preserves any unknown attributes
- through a round-trip.
-
-When is `terraform-json` suitable? We recommend using it for applications which
-decode the core stable data types and use it directly, and don't attempt to emit
-JSON to be consumed by applications which expect the Terraform format.
-
-## Why a separate repository?
-
-To reduce dependencies on any of Terraform core's internals, we've made a design
-decision to make any helpers or libraries that work with the external JSON data
-external and not a part of the Terraform GitHub repository itself.
-
-While Terraform core will change often and be relatively unstable, this library
-will see a smaller amount of change. Most of the major changes have already
-happened leading up to 0.12, so you can expect this library to only see minor
-incremental changes going forward.
-
-For this reason, `terraform show -json` and `terraform providers schema -json`
-is the recommended format for working with Terraform data externally, and as
-such, if you require any help working with the data in these formats, or even a
-reference of how the JSON is formatted, use this repository.
diff --git a/vendor/github.com/hashicorp/terraform-json/action.go b/vendor/github.com/hashicorp/terraform-json/action.go
deleted file mode 100644
index c74f7e68..00000000
--- a/vendor/github.com/hashicorp/terraform-json/action.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfjson
-
-// Action is a valid action type for a resource change.
-//
-// Note that a singular Action is not telling of a full resource
-// change operation. Certain resource actions, such as replacement,
-// are a composite of more than one type. See the Actions type and
-// its helpers for more information.
-type Action string
-
-const (
- // ActionNoop denotes a no-op operation.
- ActionNoop Action = "no-op"
-
- // ActionCreate denotes a create operation.
- ActionCreate Action = "create"
-
- // ActionRead denotes a read operation.
- ActionRead Action = "read"
-
- // ActionUpdate denotes an update operation.
- ActionUpdate Action = "update"
-
- // ActionDelete denotes a delete operation.
- ActionDelete Action = "delete"
-)
-
-// Actions denotes a valid change type.
-type Actions []Action
-
-// NoOp is true if this set of Actions denotes a no-op.
-func (a Actions) NoOp() bool {
- if len(a) != 1 {
- return false
- }
-
- return a[0] == ActionNoop
-}
-
-// Create is true if this set of Actions denotes creation of a new
-// resource.
-func (a Actions) Create() bool {
- if len(a) != 1 {
- return false
- }
-
- return a[0] == ActionCreate
-}
-
-// Read is true if this set of Actions denotes a read operation only.
-func (a Actions) Read() bool {
- if len(a) != 1 {
- return false
- }
-
- return a[0] == ActionRead
-}
-
-// Update is true if this set of Actions denotes an update operation.
-func (a Actions) Update() bool {
- if len(a) != 1 {
- return false
- }
-
- return a[0] == ActionUpdate
-}
-
-// Delete is true if this set of Actions denotes resource removal.
-func (a Actions) Delete() bool {
- if len(a) != 1 {
- return false
- }
-
- return a[0] == ActionDelete
-}
-
-// DestroyBeforeCreate is true if this set of Actions denotes a
-// destroy-before-create operation. This is the standard resource
-// replacement method.
-func (a Actions) DestroyBeforeCreate() bool {
- if len(a) != 2 {
- return false
- }
-
- return a[0] == ActionDelete && a[1] == ActionCreate
-}
-
-// CreateBeforeDestroy is true if this set of Actions denotes a
-// create-before-destroy operation, usually the result of replacement
-// to a resource that has the create_before_destroy lifecycle option
-// set.
-func (a Actions) CreateBeforeDestroy() bool {
- if len(a) != 2 {
- return false
- }
-
- return a[0] == ActionCreate && a[1] == ActionDelete
-}
-
-// Replace is true if this set of Actions denotes a valid replacement
-// operation.
-func (a Actions) Replace() bool {
- return a.DestroyBeforeCreate() || a.CreateBeforeDestroy()
-}
diff --git a/vendor/github.com/hashicorp/terraform-json/checks.go b/vendor/github.com/hashicorp/terraform-json/checks.go
deleted file mode 100644
index 558cb290..00000000
--- a/vendor/github.com/hashicorp/terraform-json/checks.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfjson
-
-// CheckKind is a string representation of the type of conditional check
-// referenced in a check result.
-type CheckKind string
-
-const (
- // CheckKindResource indicates the check result is from a pre- or
- // post-condition on a resource or data source.
- CheckKindResource CheckKind = "resource"
-
- // CheckKindOutputValue indicates the check result is from an output
- // post-condition.
- CheckKindOutputValue CheckKind = "output_value"
-
- // CheckKindCheckBlock indicates the check result is from a check block.
- CheckKindCheckBlock CheckKind = "check"
-)
-
-// CheckStatus is a string representation of the status of a given conditional
-// check.
-type CheckStatus string
-
-const (
- // CheckStatusPass indicates the check passed.
- CheckStatusPass CheckStatus = "pass"
-
- // CheckStatusFail indicates the check failed.
- CheckStatusFail CheckStatus = "fail"
-
- // CheckStatusError indicates the check errored. This is distinct from
- // CheckStatusFail in that it represents a logical or configuration error
- // within the check block that prevented the check from executing, as
- // opposed to the check was attempted and evaluated to false.
- CheckStatusError CheckStatus = "error"
-
- // CheckStatusUnknown indicates the result of the check was not known. This
- // could be because a value within the check could not be known at plan
- // time, or because the overall plan failed for an unrelated reason before
- // this check could be executed.
- CheckStatusUnknown CheckStatus = "unknown"
-)
-
-// CheckStaticAddress details the address of the object that performed a given
-// check. The static address points to the overall resource, as opposed to the
-// dynamic address which contains the instance key for any resource that has
-// multiple instances.
-type CheckStaticAddress struct {
- // ToDisplay is a formatted and ready to display representation of the
- // address.
- ToDisplay string `json:"to_display"`
-
- // Kind represents the CheckKind of this check.
- Kind CheckKind `json:"kind"`
-
- // Module is the module part of the address. This will be empty for any
- // resources in the root module.
- Module string `json:"module,omitempty"`
-
- // Mode is the ResourceMode of the resource that contains this check. This
- // field is only set is Kind equals CheckKindResource.
- Mode ResourceMode `json:"mode,omitempty"`
-
- // Type is the resource type for the resource that contains this check. This
- // field is only set if Kind equals CheckKindResource.
- Type string `json:"type,omitempty"`
-
- // Name is the name of the resource, check block, or output that contains
- // this check.
- Name string `json:"name,omitempty"`
-}
-
-// CheckDynamicAddress contains the InstanceKey field for any resources that
-// have multiple instances. A complete address can be built by combining the
-// CheckStaticAddress with the CheckDynamicAddress.
-type CheckDynamicAddress struct {
- // ToDisplay is a formatted and ready to display representation of the
- // full address, including the additional information from the relevant
- // CheckStaticAddress.
- ToDisplay string `json:"to_display"`
-
- // Module is the module part of the address. This address will include the
- // instance key for any module expansions resulting from foreach or count
- // arguments. This field will be empty for any resources within the root
- // module.
- Module string `json:"module,omitempty"`
-
- // InstanceKey is the instance key for any instances of a given resource.
- //
- // InstanceKey will be empty if there was no foreach or count argument
- // defined on the containing object.
- InstanceKey interface{} `json:"instance_key,omitempty"`
-}
-
-// CheckResultStatic is the container for a "checkable object".
-//
-// A "checkable object" is a resource or data source, an output, or a check
-// block.
-type CheckResultStatic struct {
- // Address is the absolute address of the "checkable object"
- Address CheckStaticAddress `json:"address"`
-
- // Status is the overall status for all the checks within this object.
- Status CheckStatus `json:"status"`
-
- // Instances contains the results for dynamic object that belongs to this
- // static object. For example, any instances created from an object using
- // the foreach or count meta arguments.
- //
- // Check blocks and outputs will only contain a single instance, while
- // resources can contain 1 to many.
- Instances []CheckResultDynamic `json:"instances,omitempty"`
-}
-
-// CheckResultDynamic describes the check result for a dynamic object that
-// results from the expansion of the containing object.
-type CheckResultDynamic struct {
- // Address is the relative address of this instance given the Address in the
- // parent object.
- Address CheckDynamicAddress `json:"address"`
-
- // Status is the overall status for the checks within this dynamic object.
- Status CheckStatus `json:"status"`
-
- // Problems describes any additional optional details about this check if
- // the check failed.
- //
- // This will not include the errors resulting from this check block, as they
- // will be exposed as diagnostics in the original terraform execution. It
- // may contain any failure messages even if the overall status is
- // CheckStatusError, however, as the instance could contain multiple checks
- // that returned a mix of error and failure statuses.
- Problems []CheckResultProblem `json:"problems,omitempty"`
-}
-
-// CheckResultProblem describes one of potentially several problems that led to
-// a check being classied as CheckStatusFail.
-type CheckResultProblem struct {
- // Message is the condition error message provided by the original check
- // author.
- Message string `json:"message"`
-}
diff --git a/vendor/github.com/hashicorp/terraform-json/config.go b/vendor/github.com/hashicorp/terraform-json/config.go
deleted file mode 100644
index e8ea638a..00000000
--- a/vendor/github.com/hashicorp/terraform-json/config.go
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfjson
-
-import (
- "encoding/json"
- "errors"
-)
-
-// Config represents the complete configuration source.
-type Config struct {
- // A map of all provider instances across all modules in the
- // configuration.
- //
- // The index for this field is opaque and should not be parsed. Use
- // the individual fields in ProviderConfig to discern actual data
- // about the provider such as name, alias, or defined module.
- ProviderConfigs map[string]*ProviderConfig `json:"provider_config,omitempty"`
-
- // The root module in the configuration. Any child modules descend
- // off of here.
- RootModule *ConfigModule `json:"root_module,omitempty"`
-}
-
-// Validate checks to ensure that the config is present.
-func (c *Config) Validate() error {
- if c == nil {
- return errors.New("config is nil")
- }
-
- return nil
-}
-
-func (c *Config) UnmarshalJSON(b []byte) error {
- type rawConfig Config
- var config rawConfig
-
- err := json.Unmarshal(b, &config)
- if err != nil {
- return err
- }
-
- *c = *(*Config)(&config)
-
- return c.Validate()
-}
-
-// ProviderConfig describes a provider configuration instance.
-type ProviderConfig struct {
- // The name of the provider, ie: "aws".
- Name string `json:"name,omitempty"`
-
- // The fully-specified name of the provider, ie: "registry.terraform.io/hashicorp/aws".
- FullName string `json:"full_name,omitempty"`
-
- // The alias of the provider, ie: "us-east-1".
- Alias string `json:"alias,omitempty"`
-
- // The address of the module the provider is declared in.
- ModuleAddress string `json:"module_address,omitempty"`
-
- // Any non-special configuration values in the provider, indexed by
- // key.
- Expressions map[string]*Expression `json:"expressions,omitempty"`
-
- // The defined version constraint for this provider.
- VersionConstraint string `json:"version_constraint,omitempty"`
-}
-
-// ConfigModule describes a module in Terraform configuration.
-type ConfigModule struct {
- // The outputs defined in the module.
- Outputs map[string]*ConfigOutput `json:"outputs,omitempty"`
-
- // The resources defined in the module.
- Resources []*ConfigResource `json:"resources,omitempty"`
-
- // Any "module" stanzas within the specific module.
- ModuleCalls map[string]*ModuleCall `json:"module_calls,omitempty"`
-
- // The variables defined in the module.
- Variables map[string]*ConfigVariable `json:"variables,omitempty"`
-}
-
-// ConfigOutput defines an output as defined in configuration.
-type ConfigOutput struct {
- // Indicates whether or not the output was marked as sensitive.
- Sensitive bool `json:"sensitive,omitempty"`
-
- // The defined value of the output.
- Expression *Expression `json:"expression,omitempty"`
-
- // The defined description of this output.
- Description string `json:"description,omitempty"`
-
- // The defined dependencies tied to this output.
- DependsOn []string `json:"depends_on,omitempty"`
-}
-
-// ConfigResource is the configuration representation of a resource.
-type ConfigResource struct {
- // The address of the resource relative to the module that it is
- // in.
- Address string `json:"address,omitempty"`
-
- // The resource mode.
- Mode ResourceMode `json:"mode,omitempty"`
-
- // The type of resource, ie: "null_resource" in
- // "null_resource.foo".
- Type string `json:"type,omitempty"`
-
- // The name of the resource, ie: "foo" in "null_resource.foo".
- Name string `json:"name,omitempty"`
-
- // An opaque key representing the provider configuration this
- // module uses. Note that there are more than one circumstance that
- // this key will not match what is found in the ProviderConfigs
- // field in the root Config structure, and as such should not be
- // relied on for that purpose.
- ProviderConfigKey string `json:"provider_config_key,omitempty"`
-
- // The list of provisioner defined for this configuration. This
- // will be nil if no providers are defined.
- Provisioners []*ConfigProvisioner `json:"provisioners,omitempty"`
-
- // Any non-special configuration values in the resource, indexed by
- // key.
- Expressions map[string]*Expression `json:"expressions,omitempty"`
-
- // The resource's configuration schema version. With access to the
- // specific Terraform provider for this resource, this can be used
- // to determine the correct schema for the configuration data
- // supplied in Expressions.
- SchemaVersion uint64 `json:"schema_version"`
-
- // The expression data for the "count" value in the resource.
- CountExpression *Expression `json:"count_expression,omitempty"`
-
- // The expression data for the "for_each" value in the resource.
- ForEachExpression *Expression `json:"for_each_expression,omitempty"`
-
- // The contents of the "depends_on" config directive, which
- // declares explicit dependencies for this resource.
- DependsOn []string `json:"depends_on,omitempty"`
-}
-
-// ConfigVariable defines a variable as defined in configuration.
-type ConfigVariable struct {
- // The defined default value of the variable.
- Default interface{} `json:"default,omitempty"`
-
- // The defined text description of the variable.
- Description string `json:"description,omitempty"`
-
- // Whether the variable is marked as sensitive
- Sensitive bool `json:"sensitive,omitempty"`
-}
-
-// ConfigProvisioner describes a provisioner declared in a resource
-// configuration.
-type ConfigProvisioner struct {
- // The type of the provisioner, ie: "local-exec".
- Type string `json:"type,omitempty"`
-
- // Any non-special configuration values in the provisioner, indexed by
- // key.
- Expressions map[string]*Expression `json:"expressions,omitempty"`
-}
-
-// ModuleCall describes a declared "module" within a configuration.
-// It also contains the data for the module itself.
-type ModuleCall struct {
- // The contents of the "source" field.
- Source string `json:"source,omitempty"`
-
- // Any non-special configuration values in the module, indexed by
- // key.
- Expressions map[string]*Expression `json:"expressions,omitempty"`
-
- // The expression data for the "count" value in the module.
- CountExpression *Expression `json:"count_expression,omitempty"`
-
- // The expression data for the "for_each" value in the module.
- ForEachExpression *Expression `json:"for_each_expression,omitempty"`
-
- // The configuration data for the module itself.
- Module *ConfigModule `json:"module,omitempty"`
-
- // The version constraint for modules that come from the registry.
- VersionConstraint string `json:"version_constraint,omitempty"`
-
- // The explicit resource dependencies for the "depends_on" value.
- // As it must be a slice of references, Expression is not used.
- DependsOn []string `json:"depends_on,omitempty"`
-}
diff --git a/vendor/github.com/hashicorp/terraform-json/expression.go b/vendor/github.com/hashicorp/terraform-json/expression.go
deleted file mode 100644
index 5ecb15ce..00000000
--- a/vendor/github.com/hashicorp/terraform-json/expression.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfjson
-
-import "encoding/json"
-
-type unknownConstantValue struct{}
-
-// UnknownConstantValue is a singleton type that denotes that a
-// constant value is explicitly unknown. This is set during an
-// unmarshal when references are found in an expression to help more
-// explicitly differentiate between an explicit null and unknown
-// value.
-var UnknownConstantValue = &unknownConstantValue{}
-
-// Expression describes the format for an individual key in a
-// Terraform configuration.
-//
-// This struct wraps ExpressionData to support custom JSON parsing.
-type Expression struct {
- *ExpressionData
-}
-
-// ExpressionData describes the format for an individual key in a
-// Terraform configuration.
-type ExpressionData struct {
- // If the *entire* expression is a constant-defined value, this
- // will contain the Go representation of the expression's data.
- //
- // Note that a nil here denotes and explicit null. When a value is
- // unknown on part of the value coming from an expression that
- // cannot be resolved at parse time, this field will contain
- // UnknownConstantValue.
- ConstantValue interface{} `json:"constant_value,omitempty"`
-
- // If any part of the expression contained values that were not
- // able to be resolved at parse-time, this will contain a list of
- // the referenced identifiers that caused the value to be unknown.
- References []string `json:"references,omitempty"`
-
- // A list of complex objects that were nested in this expression.
- // If this value is a nested block in configuration, sometimes
- // referred to as a "sub-resource", this field will contain those
- // values, and ConstantValue and References will be blank.
- NestedBlocks []map[string]*Expression `json:"-"`
-}
-
-// UnmarshalJSON implements json.Unmarshaler for Expression.
-func (e *Expression) UnmarshalJSON(b []byte) error {
- result := new(ExpressionData)
-
- // Check to see if this is an array first. If it is, this is more
- // than likely a list of nested blocks.
- var rawNested []map[string]json.RawMessage
- if err := json.Unmarshal(b, &rawNested); err == nil {
- result.NestedBlocks, err = unmarshalExpressionBlocks(rawNested)
- if err != nil {
- return err
- }
- } else {
- // It's a non-nested expression block, parse normally
- if err := json.Unmarshal(b, &result); err != nil {
- return err
- }
-
- // If References is non-zero, then ConstantValue is unknown. Set
- // this explicitly.
- if len(result.References) > 0 {
- result.ConstantValue = UnknownConstantValue
- }
- }
-
- e.ExpressionData = result
- return nil
-}
-
-func unmarshalExpressionBlocks(raw []map[string]json.RawMessage) ([]map[string]*Expression, error) {
- var result []map[string]*Expression
-
- for _, rawBlock := range raw {
- block := make(map[string]*Expression)
- for k, rawExpr := range rawBlock {
- var expr *Expression
- if err := json.Unmarshal(rawExpr, &expr); err != nil {
- return nil, err
- }
-
- block[k] = expr
- }
-
- result = append(result, block)
- }
-
- return result, nil
-}
-
-// MarshalJSON implements json.Marshaler for Expression.
-func (e *Expression) MarshalJSON() ([]byte, error) {
- switch {
- case len(e.ExpressionData.NestedBlocks) > 0:
- return marshalExpressionBlocks(e.ExpressionData.NestedBlocks)
-
- case e.ExpressionData.ConstantValue == UnknownConstantValue:
- return json.Marshal(&ExpressionData{
- References: e.ExpressionData.References,
- })
- }
-
- return json.Marshal(e.ExpressionData)
-}
-
-func marshalExpressionBlocks(nested []map[string]*Expression) ([]byte, error) {
- var rawNested []map[string]json.RawMessage
- for _, block := range nested {
- rawBlock := make(map[string]json.RawMessage)
- for k, expr := range block {
- raw, err := json.Marshal(expr)
- if err != nil {
- return nil, err
- }
-
- rawBlock[k] = raw
- }
-
- rawNested = append(rawNested, rawBlock)
- }
-
- return json.Marshal(rawNested)
-}
diff --git a/vendor/github.com/hashicorp/terraform-json/metadata.go b/vendor/github.com/hashicorp/terraform-json/metadata.go
deleted file mode 100644
index 8ac111ad..00000000
--- a/vendor/github.com/hashicorp/terraform-json/metadata.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfjson
-
-import (
- "encoding/json"
- "errors"
- "fmt"
-
- "github.com/hashicorp/go-version"
- "github.com/zclconf/go-cty/cty"
-)
-
-// MetadataFunctionsFormatVersionConstraints defines the versions of the JSON
-// metadata functions format that are supported by this package.
-var MetadataFunctionsFormatVersionConstraints = "~> 1.0"
-
-// MetadataFunctions is the top-level object returned when exporting function
-// signatures
-type MetadataFunctions struct {
- // The version of the format. This should always match the
- // MetadataFunctionsFormatVersionConstraints in this package, else
- // unmarshaling will fail.
- FormatVersion string `json:"format_version"`
-
- // The signatures of the functions available in a Terraform version.
- Signatures map[string]*FunctionSignature `json:"function_signatures,omitempty"`
-}
-
-// Validate checks to ensure that MetadataFunctions is present, and the
-// version matches the version supported by this library.
-func (f *MetadataFunctions) Validate() error {
- if f == nil {
- return errors.New("metadata functions data is nil")
- }
-
- if f.FormatVersion == "" {
- return errors.New("unexpected metadata functions data, format version is missing")
- }
-
- constraint, err := version.NewConstraint(MetadataFunctionsFormatVersionConstraints)
- if err != nil {
- return fmt.Errorf("invalid version constraint: %w", err)
- }
-
- version, err := version.NewVersion(f.FormatVersion)
- if err != nil {
- return fmt.Errorf("invalid format version %q: %w", f.FormatVersion, err)
- }
-
- if !constraint.Check(version) {
- return fmt.Errorf("unsupported metadata functions format version: %q does not satisfy %q",
- version, constraint)
- }
-
- return nil
-}
-
-func (f *MetadataFunctions) UnmarshalJSON(b []byte) error {
- type rawFunctions MetadataFunctions
- var functions rawFunctions
-
- err := json.Unmarshal(b, &functions)
- if err != nil {
- return err
- }
-
- *f = *(*MetadataFunctions)(&functions)
-
- return f.Validate()
-}
-
-// FunctionSignature represents a function signature.
-type FunctionSignature struct {
- // Description is an optional human-readable description
- // of the function
- Description string `json:"description,omitempty"`
-
- // Summary is an optional shortened description of the function
- Summary string `json:"summary,omitempty"`
-
- // DeprecationMessage is an optional message that indicates that the
- // function should be considered deprecated and what actions should be
- // performed by the practitioner to handle the deprecation.
- DeprecationMessage string `json:"deprecation_message,omitempty"`
-
- // ReturnType is the ctyjson representation of the function's
- // return types based on supplying all parameters using
- // dynamic types. Functions can have dynamic return types.
- ReturnType cty.Type `json:"return_type"`
-
- // Parameters describes the function's fixed positional parameters.
- Parameters []*FunctionParameter `json:"parameters,omitempty"`
-
- // VariadicParameter describes the function's variadic
- // parameter if it is supported.
- VariadicParameter *FunctionParameter `json:"variadic_parameter,omitempty"`
-}
-
-// FunctionParameter represents a parameter to a function.
-type FunctionParameter struct {
- // Name is an optional name for the argument.
- Name string `json:"name,omitempty"`
-
- // Description is an optional human-readable description
- // of the argument
- Description string `json:"description,omitempty"`
-
- // IsNullable is true if null is acceptable value for the argument
- IsNullable bool `json:"is_nullable,omitempty"`
-
- // A type that any argument for this parameter must conform to.
- Type cty.Type `json:"type"`
-}
diff --git a/vendor/github.com/hashicorp/terraform-json/plan.go b/vendor/github.com/hashicorp/terraform-json/plan.go
deleted file mode 100644
index d8618985..00000000
--- a/vendor/github.com/hashicorp/terraform-json/plan.go
+++ /dev/null
@@ -1,292 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfjson
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
-
- "github.com/hashicorp/go-version"
-)
-
-// PlanFormatVersionConstraints defines the versions of the JSON plan format
-// that are supported by this package.
-var PlanFormatVersionConstraints = ">= 0.1, < 2.0"
-
-// ResourceMode is a string representation of the resource type found
-// in certain fields in the plan.
-type ResourceMode string
-
-const (
- // DataResourceMode is the resource mode for data sources.
- DataResourceMode ResourceMode = "data"
-
- // ManagedResourceMode is the resource mode for managed resources.
- ManagedResourceMode ResourceMode = "managed"
-)
-
-// Plan represents the entire contents of an output Terraform plan.
-type Plan struct {
- // useJSONNumber opts into the behavior of calling
- // json.Decoder.UseNumber prior to decoding the plan, which turns
- // numbers into json.Numbers instead of float64s. Set it using
- // Plan.UseJSONNumber.
- useJSONNumber bool
-
- // The version of the plan format. This should always match the
- // PlanFormatVersion constant in this package, or else an unmarshal
- // will be unstable.
- FormatVersion string `json:"format_version,omitempty"`
-
- // The version of Terraform used to make the plan.
- TerraformVersion string `json:"terraform_version,omitempty"`
-
- // The variables set in the root module when creating the plan.
- Variables map[string]*PlanVariable `json:"variables,omitempty"`
-
- // The common state representation of resources within this plan.
- // This is a product of the existing state merged with the diff for
- // this plan.
- PlannedValues *StateValues `json:"planned_values,omitempty"`
-
- // The change operations for resources and data sources within this plan
- // resulting from resource drift.
- ResourceDrift []*ResourceChange `json:"resource_drift,omitempty"`
-
- // The change operations for resources and data sources within this
- // plan.
- ResourceChanges []*ResourceChange `json:"resource_changes,omitempty"`
-
- // DeferredChanges contains the change operations for resources that are deferred
- // for this plan.
- DeferredChanges []*DeferredResourceChange `json:"deferred_changes,omitempty"`
-
- // Complete indicates that all resources have successfully planned changes.
- // This will be false if there are DeferredChanges or if the -target flag is used.
- //
- // Complete was introduced in Terraform 1.8 and will be nil for all previous
- // Terraform versions.
- Complete *bool `json:"complete,omitempty"`
-
- // The change operations for outputs within this plan.
- OutputChanges map[string]*Change `json:"output_changes,omitempty"`
-
- // The Terraform state prior to the plan operation. This is the
- // same format as PlannedValues, without the current diff merged.
- PriorState *State `json:"prior_state,omitempty"`
-
- // The Terraform configuration used to make the plan.
- Config *Config `json:"configuration,omitempty"`
-
- // RelevantAttributes represents any resource instances and their
- // attributes which may have contributed to the planned changes
- RelevantAttributes []ResourceAttribute `json:"relevant_attributes,omitempty"`
-
- // Checks contains the results of any conditional checks executed, or
- // planned to be executed, during this plan.
- Checks []CheckResultStatic `json:"checks,omitempty"`
-
- // Timestamp contains the static timestamp that Terraform considers to be
- // the time this plan executed, in UTC.
- Timestamp string `json:"timestamp,omitempty"`
-}
-
-// ResourceAttribute describes a full path to a resource attribute
-type ResourceAttribute struct {
- // Resource describes resource instance address (e.g. null_resource.foo)
- Resource string `json:"resource"`
- // Attribute describes the attribute path using a lossy representation
- // of cty.Path. (e.g. ["id"] or ["objects", 0, "val"]).
- Attribute []json.RawMessage `json:"attribute"`
-}
-
-// UseJSONNumber controls whether the Plan will be decoded using the
-// json.Number behavior or the float64 behavior. When b is true, the Plan will
-// represent numbers in PlanOutputs as json.Numbers. When b is false, the
-// Plan will represent numbers in PlanOutputs as float64s.
-func (p *Plan) UseJSONNumber(b bool) {
- p.useJSONNumber = b
-}
-
-// Validate checks to ensure that the plan is present, and the
-// version matches the version supported by this library.
-func (p *Plan) Validate() error {
- if p == nil {
- return errors.New("plan is nil")
- }
-
- if p.FormatVersion == "" {
- return errors.New("unexpected plan input, format version is missing")
- }
-
- constraint, err := version.NewConstraint(PlanFormatVersionConstraints)
- if err != nil {
- return fmt.Errorf("invalid version constraint: %w", err)
- }
-
- version, err := version.NewVersion(p.FormatVersion)
- if err != nil {
- return fmt.Errorf("invalid format version %q: %w", p.FormatVersion, err)
- }
-
- if !constraint.Check(version) {
- return fmt.Errorf("unsupported plan format version: %q does not satisfy %q",
- version, constraint)
- }
-
- return nil
-}
-
-func isStringInSlice(slice []string, s string) bool {
- for _, el := range slice {
- if el == s {
- return true
- }
- }
- return false
-}
-
-func (p *Plan) UnmarshalJSON(b []byte) error {
- type rawPlan Plan
- var plan rawPlan
-
- dec := json.NewDecoder(bytes.NewReader(b))
- if p.useJSONNumber {
- dec.UseNumber()
- }
- err := dec.Decode(&plan)
- if err != nil {
- return err
- }
-
- *p = *(*Plan)(&plan)
-
- return p.Validate()
-}
-
-// ResourceChange is a description of an individual change action
-// that Terraform plans to use to move from the prior state to a new
-// state matching the configuration.
-type ResourceChange struct {
- // The absolute resource address.
- Address string `json:"address,omitempty"`
-
- // The absolute address that this resource instance had
- // at the conclusion of a previous plan.
- PreviousAddress string `json:"previous_address,omitempty"`
-
- // The module portion of the above address. Omitted if the instance
- // is in the root module.
- ModuleAddress string `json:"module_address,omitempty"`
-
- // The resource mode.
- Mode ResourceMode `json:"mode,omitempty"`
-
- // The resource type, example: "aws_instance" for aws_instance.foo.
- Type string `json:"type,omitempty"`
-
- // The resource name, example: "foo" for aws_instance.foo.
- Name string `json:"name,omitempty"`
-
- // The instance key for any resources that have been created using
- // "count" or "for_each". If neither of these apply the key will be
- // empty.
- //
- // This value can be either an integer (int) or a string.
- Index interface{} `json:"index,omitempty"`
-
- // The name of the provider this resource belongs to. This allows
- // the provider to be interpreted unambiguously in the unusual
- // situation where a provider offers a resource type whose name
- // does not start with its own name, such as the "googlebeta"
- // provider offering "google_compute_instance".
- ProviderName string `json:"provider_name,omitempty"`
-
- // An identifier used during replacement operations, and can be
- // used to identify the exact resource being replaced in state.
- DeposedKey string `json:"deposed,omitempty"`
-
- // The data describing the change that will be made to this object.
- Change *Change `json:"change,omitempty"`
-}
-
-// Change is the representation of a proposed change for an object.
-type Change struct {
- // The action to be carried out by this change.
- Actions Actions `json:"actions,omitempty"`
-
- // Before and After are representations of the object value both
- // before and after the action. For create and delete actions,
- // either Before or After is unset (respectively). For no-op
- // actions, both values will be identical. After will be incomplete
- // if there are values within it that won't be known until after
- // apply.
- Before interface{} `json:"before,"`
- After interface{} `json:"after,omitempty"`
-
- // A deep object of booleans that denotes any values that are
- // unknown in a resource. These values were previously referred to
- // as "computed" values.
- //
- // If the value cannot be found in this map, then its value should
- // be available within After, so long as the operation supports it.
- AfterUnknown interface{} `json:"after_unknown,omitempty"`
-
- // BeforeSensitive and AfterSensitive are object values with similar
- // structure to Before and After, but with all sensitive leaf values
- // replaced with true, and all non-sensitive leaf values omitted. These
- // objects should be combined with Before and After to prevent accidental
- // display of sensitive values in user interfaces.
- BeforeSensitive interface{} `json:"before_sensitive,omitempty"`
- AfterSensitive interface{} `json:"after_sensitive,omitempty"`
-
- // Importing contains the import metadata about this operation. If importing
- // is present (ie. not null) then the change is an import operation in
- // addition to anything mentioned in the actions field. The actual contents
- // of the Importing struct is subject to change, so downstream consumers
- // should treat any values in here as strictly optional.
- Importing *Importing `json:"importing,omitempty"`
-
- // GeneratedConfig contains any HCL config generated for this resource
- // during planning as a string.
- //
- // If this is populated, then Importing should also be populated but this
- // might change in the future. However, not all Importing changes will
- // contain generated config.
- GeneratedConfig string `json:"generated_config,omitempty"`
-
- // ReplacePaths contains a set of paths that point to attributes/elements
- // that are causing the overall resource to be replaced rather than simply
- // updated.
- //
- // This field is always a slice of indexes, where an index in this context
- // is either an integer pointing to a child of a set/list, or a string
- // pointing to the child of a map, object, or block.
- ReplacePaths []interface{} `json:"replace_paths,omitempty"`
-}
-
-// Importing is a nested object for the resource import metadata.
-type Importing struct {
- // The original ID of this resource used to target it as part of planned
- // import operation.
- ID string `json:"id,omitempty"`
-}
-
-// PlanVariable is a top-level variable in the Terraform plan.
-type PlanVariable struct {
- // The value for this variable at plan time.
- Value interface{} `json:"value,omitempty"`
-}
-
-// DeferredResourceChange is a description of a resource change that has been
-// deferred for some reason.
-type DeferredResourceChange struct {
- // Reason is the reason why this resource change was deferred.
- Reason string `json:"reason,omitempty"`
-
- // Change contains any information we have about the deferred change.
- ResourceChange *ResourceChange `json:"resource_change,omitempty"`
-}
diff --git a/vendor/github.com/hashicorp/terraform-json/schemas.go b/vendor/github.com/hashicorp/terraform-json/schemas.go
deleted file mode 100644
index a2918ef4..00000000
--- a/vendor/github.com/hashicorp/terraform-json/schemas.go
+++ /dev/null
@@ -1,287 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfjson
-
-import (
- "encoding/json"
- "errors"
- "fmt"
-
- "github.com/hashicorp/go-version"
- "github.com/zclconf/go-cty/cty"
-)
-
-// ProviderSchemasFormatVersionConstraints defines the versions of the JSON
-// provider schema format that are supported by this package.
-var ProviderSchemasFormatVersionConstraints = ">= 0.1, < 2.0"
-
-// ProviderSchemas represents the schemas of all providers and
-// resources in use by the configuration.
-type ProviderSchemas struct {
- // The version of the plan format. This should always match one of
- // ProviderSchemasFormatVersions in this package, or else
- // an unmarshal will be unstable.
- FormatVersion string `json:"format_version,omitempty"`
-
- // The schemas for the providers in this configuration, indexed by
- // provider type. Aliases are not included, and multiple instances
- // of a provider in configuration will be represented by a single
- // provider here.
- Schemas map[string]*ProviderSchema `json:"provider_schemas,omitempty"`
-}
-
-// Validate checks to ensure that ProviderSchemas is present, and the
-// version matches the version supported by this library.
-func (p *ProviderSchemas) Validate() error {
- if p == nil {
- return errors.New("provider schema data is nil")
- }
-
- if p.FormatVersion == "" {
- return errors.New("unexpected provider schema data, format version is missing")
- }
-
- constraint, err := version.NewConstraint(ProviderSchemasFormatVersionConstraints)
- if err != nil {
- return fmt.Errorf("invalid version constraint: %w", err)
- }
-
- version, err := version.NewVersion(p.FormatVersion)
- if err != nil {
- return fmt.Errorf("invalid format version %q: %w", p.FormatVersion, err)
- }
-
- if !constraint.Check(version) {
- return fmt.Errorf("unsupported provider schema format version: %q does not satisfy %q",
- version, constraint)
- }
-
- return nil
-}
-
-func (p *ProviderSchemas) UnmarshalJSON(b []byte) error {
- type rawSchemas ProviderSchemas
- var schemas rawSchemas
-
- err := json.Unmarshal(b, &schemas)
- if err != nil {
- return err
- }
-
- *p = *(*ProviderSchemas)(&schemas)
-
- return p.Validate()
-}
-
-// ProviderSchema is the JSON representation of the schema of an
-// entire provider, including the provider configuration and any
-// resources and data sources included with the provider.
-type ProviderSchema struct {
- // The schema for the provider's configuration.
- ConfigSchema *Schema `json:"provider,omitempty"`
-
- // The schemas for any resources in this provider.
- ResourceSchemas map[string]*Schema `json:"resource_schemas,omitempty"`
-
- // The schemas for any data sources in this provider.
- DataSourceSchemas map[string]*Schema `json:"data_source_schemas,omitempty"`
-
- // The definitions for any functions in this provider.
- Functions map[string]*FunctionSignature `json:"functions,omitempty"`
-}
-
-// Schema is the JSON representation of a particular schema
-// (provider configuration, resources, data sources).
-type Schema struct {
- // The version of the particular resource schema.
- Version uint64 `json:"version"`
-
- // The root-level block of configuration values.
- Block *SchemaBlock `json:"block,omitempty"`
-}
-
-// SchemaDescriptionKind describes the format type for a particular description's field.
-type SchemaDescriptionKind string
-
-const (
- // SchemaDescriptionKindPlain indicates a string in plain text format.
- SchemaDescriptionKindPlain SchemaDescriptionKind = "plain"
-
- // SchemaDescriptionKindMarkdown indicates a Markdown string and may need to be
- // processed prior to presentation.
- SchemaDescriptionKindMarkdown SchemaDescriptionKind = "markdown"
-)
-
-// SchemaBlock represents a nested block within a particular schema.
-type SchemaBlock struct {
- // The attributes defined at the particular level of this block.
- Attributes map[string]*SchemaAttribute `json:"attributes,omitempty"`
-
- // Any nested blocks within this particular block.
- NestedBlocks map[string]*SchemaBlockType `json:"block_types,omitempty"`
-
- // The description for this block and format of the description. If
- // no kind is provided, it can be assumed to be plain text.
- Description string `json:"description,omitempty"`
- DescriptionKind SchemaDescriptionKind `json:"description_kind,omitempty"`
-
- // If true, this block is deprecated.
- Deprecated bool `json:"deprecated,omitempty"`
-}
-
-// SchemaNestingMode is the nesting mode for a particular nested
-// schema block.
-type SchemaNestingMode string
-
-const (
- // SchemaNestingModeSingle denotes single block nesting mode, which
- // allows a single block of this specific type only in
- // configuration. This is generally the same as list or set types
- // with a single-element constraint.
- SchemaNestingModeSingle SchemaNestingMode = "single"
-
- // SchemaNestingModeGroup is similar to SchemaNestingModeSingle in that it
- // calls for only a single instance of a given block type with no labels,
- // but it additonally guarantees that its result will never be null,
- // even if the block is absent, and instead the nested attributes
- // and blocks will be treated as absent in that case.
- //
- // This is useful for the situation where a remote API has a feature that
- // is always enabled but has a group of settings related to that feature
- // that themselves have default values. By using SchemaNestingModeGroup
- // instead of SchemaNestingModeSingle in that case, generated plans will
- // show the block as present even when not present in configuration,
- // thus allowing any default values within to be displayed to the user.
- SchemaNestingModeGroup SchemaNestingMode = "group"
-
- // SchemaNestingModeList denotes list block nesting mode, which
- // allows an ordered list of blocks where duplicates are allowed.
- SchemaNestingModeList SchemaNestingMode = "list"
-
- // SchemaNestingModeSet denotes set block nesting mode, which
- // allows an unordered list of blocks where duplicates are
- // generally not allowed. What is considered a duplicate is up to
- // the rules of the set itself, which may or may not cover all
- // fields in the block.
- SchemaNestingModeSet SchemaNestingMode = "set"
-
- // SchemaNestingModeMap denotes map block nesting mode. This
- // creates a map of all declared blocks of the block type within
- // the parent, keying them on the label supplied in the block
- // declaration. This allows for blocks to be declared in the same
- // style as resources.
- SchemaNestingModeMap SchemaNestingMode = "map"
-)
-
-// SchemaBlockType describes a nested block within a schema.
-type SchemaBlockType struct {
- // The nesting mode for this block.
- NestingMode SchemaNestingMode `json:"nesting_mode,omitempty"`
-
- // The block data for this block type, including attributes and
- // subsequent nested blocks.
- Block *SchemaBlock `json:"block,omitempty"`
-
- // The lower limit on items that can be declared of this block
- // type.
- MinItems uint64 `json:"min_items,omitempty"`
-
- // The upper limit on items that can be declared of this block
- // type.
- MaxItems uint64 `json:"max_items,omitempty"`
-}
-
-// SchemaAttribute describes an attribute within a schema block.
-type SchemaAttribute struct {
- // The attribute type
- // Either AttributeType or AttributeNestedType is set, never both.
- AttributeType cty.Type `json:"type,omitempty"`
-
- // Details about a nested attribute type
- // Either AttributeType or AttributeNestedType is set, never both.
- AttributeNestedType *SchemaNestedAttributeType `json:"nested_type,omitempty"`
-
- // The description field for this attribute. If no kind is
- // provided, it can be assumed to be plain text.
- Description string `json:"description,omitempty"`
- DescriptionKind SchemaDescriptionKind `json:"description_kind,omitempty"`
-
- // If true, this attribute is deprecated.
- Deprecated bool `json:"deprecated,omitempty"`
-
- // If true, this attribute is required - it has to be entered in
- // configuration.
- Required bool `json:"required,omitempty"`
-
- // If true, this attribute is optional - it does not need to be
- // entered in configuration.
- Optional bool `json:"optional,omitempty"`
-
- // If true, this attribute is computed - it can be set by the
- // provider. It may also be set by configuration if Optional is
- // true.
- Computed bool `json:"computed,omitempty"`
-
- // If true, this attribute is sensitive and will not be displayed
- // in logs. Future versions of Terraform may encrypt or otherwise
- // treat these values with greater care than non-sensitive fields.
- Sensitive bool `json:"sensitive,omitempty"`
-}
-
-// jsonSchemaAttribute describes an attribute within a schema block
-// in a middle-step internal representation before marshalled into
-// a more useful SchemaAttribute with cty.Type.
-//
-// This avoid panic on marshalling cty.NilType (from cty upstream)
-// which the default Go marshaller cannot ignore because it's a
-// not nil-able struct.
-type jsonSchemaAttribute struct {
- AttributeType json.RawMessage `json:"type,omitempty"`
- AttributeNestedType *SchemaNestedAttributeType `json:"nested_type,omitempty"`
- Description string `json:"description,omitempty"`
- DescriptionKind SchemaDescriptionKind `json:"description_kind,omitempty"`
- Deprecated bool `json:"deprecated,omitempty"`
- Required bool `json:"required,omitempty"`
- Optional bool `json:"optional,omitempty"`
- Computed bool `json:"computed,omitempty"`
- Sensitive bool `json:"sensitive,omitempty"`
-}
-
-func (as *SchemaAttribute) MarshalJSON() ([]byte, error) {
- jsonSa := &jsonSchemaAttribute{
- AttributeNestedType: as.AttributeNestedType,
- Description: as.Description,
- DescriptionKind: as.DescriptionKind,
- Deprecated: as.Deprecated,
- Required: as.Required,
- Optional: as.Optional,
- Computed: as.Computed,
- Sensitive: as.Sensitive,
- }
- if as.AttributeType != cty.NilType {
- attrTy, _ := as.AttributeType.MarshalJSON()
- jsonSa.AttributeType = attrTy
- }
- return json.Marshal(jsonSa)
-}
-
-// SchemaNestedAttributeType describes a nested attribute
-// which could also be just expressed simply as cty.Object(...),
-// cty.List(cty.Object(...)) etc. but this allows tracking additional
-// metadata which can help interpreting or validating the data.
-type SchemaNestedAttributeType struct {
- // A map of nested attributes
- Attributes map[string]*SchemaAttribute `json:"attributes,omitempty"`
-
- // The nesting mode for this attribute.
- NestingMode SchemaNestingMode `json:"nesting_mode,omitempty"`
-
- // The lower limit on number of items that can be declared
- // of this attribute type (not applicable to single nesting mode).
- MinItems uint64 `json:"min_items,omitempty"`
-
- // The upper limit on number of items that can be declared
- // of this attribute type (not applicable to single nesting mode).
- MaxItems uint64 `json:"max_items,omitempty"`
-}
diff --git a/vendor/github.com/hashicorp/terraform-json/state.go b/vendor/github.com/hashicorp/terraform-json/state.go
deleted file mode 100644
index e5336329..00000000
--- a/vendor/github.com/hashicorp/terraform-json/state.go
+++ /dev/null
@@ -1,213 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfjson
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
-
- "github.com/hashicorp/go-version"
- "github.com/zclconf/go-cty/cty"
-)
-
-// StateFormatVersionConstraints defines the versions of the JSON state format
-// that are supported by this package.
-var StateFormatVersionConstraints = ">= 0.1, < 2.0"
-
-// State is the top-level representation of a Terraform state.
-type State struct {
- // useJSONNumber opts into the behavior of calling
- // json.Decoder.UseNumber prior to decoding the state, which turns
- // numbers into json.Numbers instead of float64s. Set it using
- // State.UseJSONNumber.
- useJSONNumber bool
-
- // The version of the state format. This should always match the
- // StateFormatVersion constant in this package, or else am
- // unmarshal will be unstable.
- FormatVersion string `json:"format_version,omitempty"`
-
- // The Terraform version used to make the state.
- TerraformVersion string `json:"terraform_version,omitempty"`
-
- // The values that make up the state.
- Values *StateValues `json:"values,omitempty"`
-
- // Checks contains the results of any conditional checks when Values was
- // last updated.
- Checks []CheckResultStatic `json:"checks,omitempty"`
-}
-
-// UseJSONNumber controls whether the State will be decoded using the
-// json.Number behavior or the float64 behavior. When b is true, the State will
-// represent numbers in StateOutputs as json.Numbers. When b is false, the
-// State will represent numbers in StateOutputs as float64s.
-func (s *State) UseJSONNumber(b bool) {
- s.useJSONNumber = b
-}
-
-// Validate checks to ensure that the state is present, and the
-// version matches the version supported by this library.
-func (s *State) Validate() error {
- if s == nil {
- return errors.New("state is nil")
- }
-
- if s.FormatVersion == "" {
- return errors.New("unexpected state input, format version is missing")
- }
-
- constraint, err := version.NewConstraint(StateFormatVersionConstraints)
- if err != nil {
- return fmt.Errorf("invalid version constraint: %w", err)
- }
-
- version, err := version.NewVersion(s.FormatVersion)
- if err != nil {
- return fmt.Errorf("invalid format version %q: %w", s.FormatVersion, err)
- }
-
- if !constraint.Check(version) {
- return fmt.Errorf("unsupported state format version: %q does not satisfy %q",
- version, constraint)
- }
-
- return nil
-}
-
-func (s *State) UnmarshalJSON(b []byte) error {
- type rawState State
- var state rawState
-
- dec := json.NewDecoder(bytes.NewReader(b))
- if s.useJSONNumber {
- dec.UseNumber()
- }
- err := dec.Decode(&state)
- if err != nil {
- return err
- }
-
- *s = *(*State)(&state)
-
- return s.Validate()
-}
-
-// StateValues is the common representation of resolved values for both the
-// prior state (which is always complete) and the planned new state.
-type StateValues struct {
- // The Outputs for this common state representation.
- Outputs map[string]*StateOutput `json:"outputs,omitempty"`
-
- // The root module in this state representation.
- RootModule *StateModule `json:"root_module,omitempty"`
-}
-
-// StateModule is the representation of a module in the common state
-// representation. This can be the root module or a child module.
-type StateModule struct {
- // All resources or data sources within this module.
- Resources []*StateResource `json:"resources,omitempty"`
-
- // The absolute module address, omitted for the root module.
- Address string `json:"address,omitempty"`
-
- // Any child modules within this module.
- ChildModules []*StateModule `json:"child_modules,omitempty"`
-}
-
-// StateResource is the representation of a resource in the common
-// state representation.
-type StateResource struct {
- // The absolute resource address.
- Address string `json:"address,omitempty"`
-
- // The resource mode.
- Mode ResourceMode `json:"mode,omitempty"`
-
- // The resource type, example: "aws_instance" for aws_instance.foo.
- Type string `json:"type,omitempty"`
-
- // The resource name, example: "foo" for aws_instance.foo.
- Name string `json:"name,omitempty"`
-
- // The instance key for any resources that have been created using
- // "count" or "for_each". If neither of these apply the key will be
- // empty.
- //
- // This value can be either an integer (int) or a string.
- Index interface{} `json:"index,omitempty"`
-
- // The name of the provider this resource belongs to. This allows
- // the provider to be interpreted unambiguously in the unusual
- // situation where a provider offers a resource type whose name
- // does not start with its own name, such as the "googlebeta"
- // provider offering "google_compute_instance".
- ProviderName string `json:"provider_name,omitempty"`
-
- // The version of the resource type schema the "values" property
- // conforms to.
- SchemaVersion uint64 `json:"schema_version,"`
-
- // The JSON representation of the attribute values of the resource,
- // whose structure depends on the resource type schema. Any unknown
- // values are omitted or set to null, making them indistinguishable
- // from absent values.
- AttributeValues map[string]interface{} `json:"values,omitempty"`
-
- // The JSON representation of the sensitivity of the resource's
- // attribute values. Only attributes which are sensitive
- // are included in this structure.
- SensitiveValues json.RawMessage `json:"sensitive_values,omitempty"`
-
- // The addresses of the resources that this resource depends on.
- DependsOn []string `json:"depends_on,omitempty"`
-
- // If true, the resource has been marked as tainted and will be
- // re-created on the next update.
- Tainted bool `json:"tainted,omitempty"`
-
- // DeposedKey is set if the resource instance has been marked Deposed and
- // will be destroyed on the next apply.
- DeposedKey string `json:"deposed_key,omitempty"`
-}
-
-// StateOutput represents an output value in a common state
-// representation.
-type StateOutput struct {
- // Whether or not the output was marked as sensitive.
- Sensitive bool `json:"sensitive"`
-
- // The value of the output.
- Value interface{} `json:"value,omitempty"`
-
- // The type of the output.
- Type cty.Type `json:"type,omitempty"`
-}
-
-// jsonStateOutput describes an output value in a middle-step internal
-// representation before marshalled into a more useful StateOutput with cty.Type.
-//
-// This avoid panic on marshalling cty.NilType (from cty upstream)
-// which the default Go marshaller cannot ignore because it's a
-// not nil-able struct.
-type jsonStateOutput struct {
- Sensitive bool `json:"sensitive"`
- Value interface{} `json:"value,omitempty"`
- Type json.RawMessage `json:"type,omitempty"`
-}
-
-func (so *StateOutput) MarshalJSON() ([]byte, error) {
- jsonSa := &jsonStateOutput{
- Sensitive: so.Sensitive,
- Value: so.Value,
- }
- if so.Type != cty.NilType {
- outputType, _ := so.Type.MarshalJSON()
- jsonSa.Type = outputType
- }
- return json.Marshal(jsonSa)
-}
diff --git a/vendor/github.com/hashicorp/terraform-json/tfjson.go b/vendor/github.com/hashicorp/terraform-json/tfjson.go
deleted file mode 100644
index 3a78a4f0..00000000
--- a/vendor/github.com/hashicorp/terraform-json/tfjson.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-// Package tfjson is a de-coupled helper library containing types for
-// the plan format output by "terraform show -json" command. This
-// command is designed for the export of Terraform plan data in
-// a format that can be easily processed by tools unrelated to
-// Terraform.
-//
-// This format is stable and should be used over the binary plan data
-// whenever possible.
-package tfjson
diff --git a/vendor/github.com/hashicorp/terraform-json/validate.go b/vendor/github.com/hashicorp/terraform-json/validate.go
deleted file mode 100644
index 53652eff..00000000
--- a/vendor/github.com/hashicorp/terraform-json/validate.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfjson
-
-import (
- "encoding/json"
- "errors"
- "fmt"
-
- "github.com/hashicorp/go-version"
-)
-
-// ValidateFormatVersionConstraints defines the versions of the JSON
-// validate format that are supported by this package.
-var ValidateFormatVersionConstraints = ">= 0.1, < 2.0"
-
-// Pos represents a position in a config file
-type Pos struct {
- Line int `json:"line"`
- Column int `json:"column"`
- Byte int `json:"byte"`
-}
-
-// Range represents a range of bytes between two positions
-type Range struct {
- Filename string `json:"filename"`
- Start Pos `json:"start"`
- End Pos `json:"end"`
-}
-
-type DiagnosticSeverity string
-
-// These severities map to the tfdiags.Severity values, plus an explicit
-// unknown in case that enum grows without us noticing here.
-const (
- DiagnosticSeverityUnknown DiagnosticSeverity = "unknown"
- DiagnosticSeverityError DiagnosticSeverity = "error"
- DiagnosticSeverityWarning DiagnosticSeverity = "warning"
-)
-
-// Diagnostic represents information to be presented to a user about an
-// error or anomaly in parsing or evaluating configuration
-type Diagnostic struct {
- Severity DiagnosticSeverity `json:"severity,omitempty"`
-
- Summary string `json:"summary,omitempty"`
- Detail string `json:"detail,omitempty"`
- Range *Range `json:"range,omitempty"`
-
- Snippet *DiagnosticSnippet `json:"snippet,omitempty"`
-}
-
-// DiagnosticSnippet represents source code information about the diagnostic.
-// It is possible for a diagnostic to have a source (and therefore a range) but
-// no source code can be found. In this case, the range field will be present and
-// the snippet field will not.
-type DiagnosticSnippet struct {
- // Context is derived from HCL's hcled.ContextString output. This gives a
- // high-level summary of the root context of the diagnostic: for example,
- // the resource block in which an expression causes an error.
- Context *string `json:"context"`
-
- // Code is a possibly-multi-line string of Terraform configuration, which
- // includes both the diagnostic source and any relevant context as defined
- // by the diagnostic.
- Code string `json:"code"`
-
- // StartLine is the line number in the source file for the first line of
- // the snippet code block. This is not necessarily the same as the value of
- // Range.Start.Line, as it is possible to have zero or more lines of
- // context source code before the diagnostic range starts.
- StartLine int `json:"start_line"`
-
- // HighlightStartOffset is the character offset into Code at which the
- // diagnostic source range starts, which ought to be highlighted as such by
- // the consumer of this data.
- HighlightStartOffset int `json:"highlight_start_offset"`
-
- // HighlightEndOffset is the character offset into Code at which the
- // diagnostic source range ends.
- HighlightEndOffset int `json:"highlight_end_offset"`
-
- // Values is a sorted slice of expression values which may be useful in
- // understanding the source of an error in a complex expression.
- Values []DiagnosticExpressionValue `json:"values"`
-}
-
-// DiagnosticExpressionValue represents an HCL traversal string (e.g.
-// "var.foo") and a statement about its value while the expression was
-// evaluated (e.g. "is a string", "will be known only after apply"). These are
-// intended to help the consumer diagnose why an expression caused a diagnostic
-// to be emitted.
-type DiagnosticExpressionValue struct {
- Traversal string `json:"traversal"`
- Statement string `json:"statement"`
-}
-
-// ValidateOutput represents JSON output from terraform validate
-// (available from 0.12 onwards)
-type ValidateOutput struct {
- FormatVersion string `json:"format_version"`
-
- Valid bool `json:"valid"`
- ErrorCount int `json:"error_count"`
- WarningCount int `json:"warning_count"`
- Diagnostics []Diagnostic `json:"diagnostics"`
-}
-
-// Validate checks to ensure that data is present, and the
-// version matches the version supported by this library.
-func (vo *ValidateOutput) Validate() error {
- if vo == nil {
- return errors.New("validation output is nil")
- }
-
- if vo.FormatVersion == "" {
- // The format was not versioned in the past
- return nil
- }
-
- constraint, err := version.NewConstraint(ValidateFormatVersionConstraints)
- if err != nil {
- return fmt.Errorf("invalid version constraint: %w", err)
- }
-
- version, err := version.NewVersion(vo.FormatVersion)
- if err != nil {
- return fmt.Errorf("invalid format version %q: %w", vo.FormatVersion, err)
- }
-
- if !constraint.Check(version) {
- return fmt.Errorf("unsupported validation output format version: %q does not satisfy %q",
- version, constraint)
- }
-
- return nil
-}
-
-func (vo *ValidateOutput) UnmarshalJSON(b []byte) error {
- type rawOutput ValidateOutput
- var schemas rawOutput
-
- err := json.Unmarshal(b, &schemas)
- if err != nil {
- return err
- }
-
- *vo = *(*ValidateOutput)(&schemas)
-
- return vo.Validate()
-}
diff --git a/vendor/github.com/hashicorp/terraform-json/version.go b/vendor/github.com/hashicorp/terraform-json/version.go
deleted file mode 100644
index 7516ad6d..00000000
--- a/vendor/github.com/hashicorp/terraform-json/version.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfjson
-
-// VersionOutput represents output from the version -json command
-// added in v0.13
-type VersionOutput struct {
- Version string `json:"terraform_version"`
- Revision string `json:"terraform_revision"`
- Platform string `json:"platform,omitempty"`
- ProviderSelections map[string]string `json:"provider_selections"`
- Outdated bool `json:"terraform_outdated"`
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/LICENSE b/vendor/github.com/hashicorp/terraform-plugin-go/LICENSE
deleted file mode 100644
index e5ead304..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/LICENSE
+++ /dev/null
@@ -1,356 +0,0 @@
-Copyright (c) 2020 HashiCorp, Inc.
-
-Mozilla Public License, version 2.0
-
-1. Definitions
-
-1.1. “Contributor”
-
- means each individual or legal entity that creates, contributes to the
- creation of, or owns Covered Software.
-
-1.2. “Contributor Version”
-
- means the combination of the Contributions of others (if any) used by a
- Contributor and that particular Contributor’s Contribution.
-
-1.3. “Contribution”
-
- means Covered Software of a particular Contributor.
-
-1.4. “Covered Software”
-
- means Source Code Form to which the initial Contributor has attached the
- notice in Exhibit A, the Executable Form of such Source Code Form, and
- Modifications of such Source Code Form, in each case including portions
- thereof.
-
-1.5. “Incompatible With Secondary Licenses”
- means
-
- a. that the initial Contributor has attached the notice described in
- Exhibit B to the Covered Software; or
-
- b. that the Covered Software was made available under the terms of version
- 1.1 or earlier of the License, but not also under the terms of a
- Secondary License.
-
-1.6. “Executable Form”
-
- means any form of the work other than Source Code Form.
-
-1.7. “Larger Work”
-
- means a work that combines Covered Software with other material, in a separate
- file or files, that is not Covered Software.
-
-1.8. “License”
-
- means this document.
-
-1.9. “Licensable”
-
- means having the right to grant, to the maximum extent possible, whether at the
- time of the initial grant or subsequently, any and all of the rights conveyed by
- this License.
-
-1.10. “Modifications”
-
- means any of the following:
-
- a. any file in Source Code Form that results from an addition to, deletion
- from, or modification of the contents of Covered Software; or
-
- b. any new file in Source Code Form that contains any Covered Software.
-
-1.11. “Patent Claims” of a Contributor
-
- means any patent claim(s), including without limitation, method, process,
- and apparatus claims, in any patent Licensable by such Contributor that
- would be infringed, but for the grant of the License, by the making,
- using, selling, offering for sale, having made, import, or transfer of
- either its Contributions or its Contributor Version.
-
-1.12. “Secondary License”
-
- means either the GNU General Public License, Version 2.0, the GNU Lesser
- General Public License, Version 2.1, the GNU Affero General Public
- License, Version 3.0, or any later versions of those licenses.
-
-1.13. “Source Code Form”
-
- means the form of the work preferred for making modifications.
-
-1.14. “You” (or “Your”)
-
- means an individual or a legal entity exercising rights under this
- License. For legal entities, “You” includes any entity that controls, is
- controlled by, or is under common control with You. For purposes of this
- definition, “control” means (a) the power, direct or indirect, to cause
- the direction or management of such entity, whether by contract or
- otherwise, or (b) ownership of more than fifty percent (50%) of the
- outstanding shares or beneficial ownership of such entity.
-
-
-2. License Grants and Conditions
-
-2.1. Grants
-
- Each Contributor hereby grants You a world-wide, royalty-free,
- non-exclusive license:
-
- a. under intellectual property rights (other than patent or trademark)
- Licensable by such Contributor to use, reproduce, make available,
- modify, display, perform, distribute, and otherwise exploit its
- Contributions, either on an unmodified basis, with Modifications, or as
- part of a Larger Work; and
-
- b. under Patent Claims of such Contributor to make, use, sell, offer for
- sale, have made, import, and otherwise transfer either its Contributions
- or its Contributor Version.
-
-2.2. Effective Date
-
- The licenses granted in Section 2.1 with respect to any Contribution become
- effective for each Contribution on the date the Contributor first distributes
- such Contribution.
-
-2.3. Limitations on Grant Scope
-
- The licenses granted in this Section 2 are the only rights granted under this
- License. No additional rights or licenses will be implied from the distribution
- or licensing of Covered Software under this License. Notwithstanding Section
- 2.1(b) above, no patent license is granted by a Contributor:
-
- a. for any code that a Contributor has removed from Covered Software; or
-
- b. for infringements caused by: (i) Your and any other third party’s
- modifications of Covered Software, or (ii) the combination of its
- Contributions with other software (except as part of its Contributor
- Version); or
-
- c. under Patent Claims infringed by Covered Software in the absence of its
- Contributions.
-
- This License does not grant any rights in the trademarks, service marks, or
- logos of any Contributor (except as may be necessary to comply with the
- notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
- No Contributor makes additional grants as a result of Your choice to
- distribute the Covered Software under a subsequent version of this License
- (see Section 10.2) or under the terms of a Secondary License (if permitted
- under the terms of Section 3.3).
-
-2.5. Representation
-
- Each Contributor represents that the Contributor believes its Contributions
- are its original creation(s) or it has sufficient rights to grant the
- rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
- This License is not intended to limit any rights You have under applicable
- copyright doctrines of fair use, fair dealing, or other equivalents.
-
-2.7. Conditions
-
- Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
- Section 2.1.
-
-
-3. Responsibilities
-
-3.1. Distribution of Source Form
-
- All distribution of Covered Software in Source Code Form, including any
- Modifications that You create or to which You contribute, must be under the
- terms of this License. You must inform recipients that the Source Code Form
- of the Covered Software is governed by the terms of this License, and how
- they can obtain a copy of this License. You may not attempt to alter or
- restrict the recipients’ rights in the Source Code Form.
-
-3.2. Distribution of Executable Form
-
- If You distribute Covered Software in Executable Form then:
-
- a. such Covered Software must also be made available in Source Code Form,
- as described in Section 3.1, and You must inform recipients of the
- Executable Form how they can obtain a copy of such Source Code Form by
- reasonable means in a timely manner, at a charge no more than the cost
- of distribution to the recipient; and
-
- b. You may distribute such Executable Form under the terms of this License,
- or sublicense it under different terms, provided that the license for
- the Executable Form does not attempt to limit or alter the recipients’
- rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
- You may create and distribute a Larger Work under terms of Your choice,
- provided that You also comply with the requirements of this License for the
- Covered Software. If the Larger Work is a combination of Covered Software
- with a work governed by one or more Secondary Licenses, and the Covered
- Software is not Incompatible With Secondary Licenses, this License permits
- You to additionally distribute such Covered Software under the terms of
- such Secondary License(s), so that the recipient of the Larger Work may, at
- their option, further distribute the Covered Software under the terms of
- either this License or such Secondary License(s).
-
-3.4. Notices
-
- You may not remove or alter the substance of any license notices (including
- copyright notices, patent notices, disclaimers of warranty, or limitations
- of liability) contained within the Source Code Form of the Covered
- Software, except that You may alter any license notices to the extent
- required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
- You may choose to offer, and to charge a fee for, warranty, support,
- indemnity or liability obligations to one or more recipients of Covered
- Software. However, You may do so only on Your own behalf, and not on behalf
- of any Contributor. You must make it absolutely clear that any such
- warranty, support, indemnity, or liability obligation is offered by You
- alone, and You hereby agree to indemnify every Contributor for any
- liability incurred by such Contributor as a result of warranty, support,
- indemnity or liability terms You offer. You may include additional
- disclaimers of warranty and limitations of liability specific to any
- jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
-
- If it is impossible for You to comply with any of the terms of this License
- with respect to some or all of the Covered Software due to statute, judicial
- order, or regulation then You must: (a) comply with the terms of this License
- to the maximum extent possible; and (b) describe the limitations and the code
- they affect. Such description must be placed in a text file included with all
- distributions of the Covered Software under this License. Except to the
- extent prohibited by statute or regulation, such description must be
- sufficiently detailed for a recipient of ordinary skill to be able to
- understand it.
-
-5. Termination
-
-5.1. The rights granted under this License will terminate automatically if You
- fail to comply with any of its terms. However, if You become compliant,
- then the rights granted under this License from a particular Contributor
- are reinstated (a) provisionally, unless and until such Contributor
- explicitly and finally terminates Your grants, and (b) on an ongoing basis,
- if such Contributor fails to notify You of the non-compliance by some
- reasonable means prior to 60 days after You have come back into compliance.
- Moreover, Your grants from a particular Contributor are reinstated on an
- ongoing basis if such Contributor notifies You of the non-compliance by
- some reasonable means, this is the first time You have received notice of
- non-compliance with this License from such Contributor, and You become
- compliant prior to 30 days after Your receipt of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
- infringement claim (excluding declaratory judgment actions, counter-claims,
- and cross-claims) alleging that a Contributor Version directly or
- indirectly infringes any patent, then the rights granted to You by any and
- all Contributors for the Covered Software under Section 2.1 of this License
- shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
- license agreements (excluding distributors and resellers) which have been
- validly granted by You or Your distributors under this License prior to
- termination shall survive termination.
-
-6. Disclaimer of Warranty
-
- Covered Software is provided under this License on an “as is” basis, without
- warranty of any kind, either expressed, implied, or statutory, including,
- without limitation, warranties that the Covered Software is free of defects,
- merchantable, fit for a particular purpose or non-infringing. The entire
- risk as to the quality and performance of the Covered Software is with You.
- Should any Covered Software prove defective in any respect, You (not any
- Contributor) assume the cost of any necessary servicing, repair, or
- correction. This disclaimer of warranty constitutes an essential part of this
- License. No use of any Covered Software is authorized under this License
- except under this disclaimer.
-
-7. Limitation of Liability
-
- Under no circumstances and under no legal theory, whether tort (including
- negligence), contract, or otherwise, shall any Contributor, or anyone who
- distributes Covered Software as permitted above, be liable to You for any
- direct, indirect, special, incidental, or consequential damages of any
- character including, without limitation, damages for lost profits, loss of
- goodwill, work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses, even if such party shall have been
- informed of the possibility of such damages. This limitation of liability
- shall not apply to liability for death or personal injury resulting from such
- party’s negligence to the extent applicable law prohibits such limitation.
- Some jurisdictions do not allow the exclusion or limitation of incidental or
- consequential damages, so this exclusion and limitation may not apply to You.
-
-8. Litigation
-
- Any litigation relating to this License may be brought only in the courts of
- a jurisdiction where the defendant maintains its principal place of business
- and such litigation shall be governed by laws of that jurisdiction, without
- reference to its conflict-of-law provisions. Nothing in this Section shall
- prevent a party’s ability to bring cross-claims or counter-claims.
-
-9. Miscellaneous
-
- This License represents the complete agreement concerning the subject matter
- hereof. If any provision of this License is held to be unenforceable, such
- provision shall be reformed only to the extent necessary to make it
- enforceable. Any law or regulation which provides that the language of a
- contract shall be construed against the drafter shall not be used to construe
- this License against a Contributor.
-
-
-10. Versions of the License
-
-10.1. New Versions
-
- Mozilla Foundation is the license steward. Except as provided in Section
- 10.3, no one other than the license steward has the right to modify or
- publish new versions of this License. Each version will be given a
- distinguishing version number.
-
-10.2. Effect of New Versions
-
- You may distribute the Covered Software under the terms of the version of
- the License under which You originally received the Covered Software, or
- under the terms of any subsequent version published by the license
- steward.
-
-10.3. Modified Versions
-
- If you create software not governed by this License, and you want to
- create a new license for such software, you may create and use a modified
- version of this License if you rename the license and remove any
- references to the name of the license steward (except to note that such
- modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
- If You choose to distribute Source Code Form that is Incompatible With
- Secondary Licenses under the terms of this version of the License, the
- notice described in Exhibit B of this License must be attached.
-
-Exhibit A - Source Code Form License Notice
-
- This Source Code Form is subject to the
- terms of the Mozilla Public License, v.
- 2.0. If a copy of the MPL was not
- distributed with this file, You can
- obtain one at
- http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular file, then
-You may include the notice in a location (such as a LICENSE file in a relevant
-directory) where a recipient would be likely to look for such a notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - “Incompatible With Secondary Licenses” Notice
-
- This Source Code Form is “Incompatible
- With Secondary Licenses”, as defined by
- the Mozilla Public License, v. 2.0.
-
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/context.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/context.go
deleted file mode 100644
index d99e1979..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/context.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package logging
-
-import (
- "context"
-
- "github.com/hashicorp/go-uuid"
- "github.com/hashicorp/terraform-plugin-log/tflog"
- "github.com/hashicorp/terraform-plugin-log/tfsdklog"
-)
-
-// DataSourceContext injects the data source type into logger contexts.
-func DataSourceContext(ctx context.Context, dataSource string) context.Context {
- ctx = tfsdklog.SetField(ctx, KeyDataSourceType, dataSource)
- ctx = tfsdklog.SubsystemSetField(ctx, SubsystemProto, KeyDataSourceType, dataSource)
- ctx = tflog.SetField(ctx, KeyDataSourceType, dataSource)
-
- return ctx
-}
-
-// InitContext creates SDK and provider logger contexts.
-func InitContext(ctx context.Context, sdkOpts tfsdklog.Options, providerOpts tflog.Options) context.Context {
- ctx = tfsdklog.NewRootSDKLogger(ctx, append(tfsdklog.Options{
- tfsdklog.WithLevelFromEnv(EnvTfLogSdk),
- }, sdkOpts...)...)
- ctx = ProtoSubsystemContext(ctx, sdkOpts)
- ctx = tfsdklog.NewRootProviderLogger(ctx, providerOpts...)
-
- return ctx
-}
-
-// ProtoSubsystemContext adds the proto subsystem to the SDK logger context.
-func ProtoSubsystemContext(ctx context.Context, sdkOpts tfsdklog.Options) context.Context {
- ctx = tfsdklog.NewSubsystem(ctx, SubsystemProto, append(tfsdklog.Options{
- // All calls are through the Protocol* helper functions
- tfsdklog.WithAdditionalLocationOffset(1),
- tfsdklog.WithLevelFromEnv(EnvTfLogSdkProto),
- }, sdkOpts...)...)
-
- return ctx
-}
-
-// ProtocolVersionContext injects the protocol version into logger contexts.
-func ProtocolVersionContext(ctx context.Context, protocolVersion string) context.Context {
- ctx = tfsdklog.SubsystemSetField(ctx, SubsystemProto, KeyProtocolVersion, protocolVersion)
-
- return ctx
-}
-
-// ProviderAddressContext injects the provider address into logger contexts.
-func ProviderAddressContext(ctx context.Context, providerAddress string) context.Context {
- ctx = tfsdklog.SetField(ctx, KeyProviderAddress, providerAddress)
- ctx = tfsdklog.SubsystemSetField(ctx, SubsystemProto, KeyProviderAddress, providerAddress)
- ctx = tflog.SetField(ctx, KeyProviderAddress, providerAddress)
-
- return ctx
-}
-
-// RequestIdContext injects a unique request ID into logger contexts.
-func RequestIdContext(ctx context.Context) context.Context {
- reqID, err := uuid.GenerateUUID()
-
- if err != nil {
- reqID = "unable to assign request ID: " + err.Error()
- }
-
- ctx = tfsdklog.SetField(ctx, KeyRequestID, reqID)
- ctx = tfsdklog.SubsystemSetField(ctx, SubsystemProto, KeyRequestID, reqID)
- ctx = tflog.SetField(ctx, KeyRequestID, reqID)
-
- return ctx
-}
-
-// ResourceContext injects the resource type into logger contexts.
-func ResourceContext(ctx context.Context, resource string) context.Context {
- ctx = tfsdklog.SetField(ctx, KeyResourceType, resource)
- ctx = tfsdklog.SubsystemSetField(ctx, SubsystemProto, KeyResourceType, resource)
- ctx = tflog.SetField(ctx, KeyResourceType, resource)
-
- return ctx
-}
-
-// RpcContext injects the RPC name into logger contexts.
-func RpcContext(ctx context.Context, rpc string) context.Context {
- ctx = tfsdklog.SetField(ctx, KeyRPC, rpc)
- ctx = tfsdklog.SubsystemSetField(ctx, SubsystemProto, KeyRPC, rpc)
- ctx = tflog.SetField(ctx, KeyRPC, rpc)
-
- return ctx
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/doc.go
deleted file mode 100644
index 1d29f515..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/doc.go
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-// Package logging contains shared environment variable and log functionality.
-package logging
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/environment_variables.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/environment_variables.go
deleted file mode 100644
index c2033457..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/environment_variables.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package logging
-
-// Environment variables.
-const (
- // EnvTfLogProvider is the prefix of the environment variable that sets the
- // logging level of the root provider logger for the provider being served.
- // The suffix is an underscore and the parsed provider name. For example,
- // registry.terraform.io/hashicorp/example becomes TF_LOG_PROVIDER_EXAMPLE.
- EnvTfLogProvider = "TF_LOG_PROVIDER"
-
- // EnvTfLogSdk is an environment variable that sets the root logging level
- // of SDK loggers.
- EnvTfLogSdk = "TF_LOG_SDK"
-
- // EnvTfLogSdkProto is an environment variable that sets the logging level
- // of SDK protocol loggers. Infers root SDK logging level, if unset.
- EnvTfLogSdkProto = "TF_LOG_SDK_PROTO"
-
- // EnvTfLogSdkProtoDataDir is an environment variable that sets the
- // directory to write raw protocol data files for debugging purposes.
- EnvTfLogSdkProtoDataDir = "TF_LOG_SDK_PROTO_DATA_DIR"
-)
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/keys.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/keys.go
deleted file mode 100644
index fb821442..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/keys.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package logging
-
-// Global logging keys attached to all requests.
-//
-// Practitioners or tooling reading logs may be depending on these keys, so be
-// conscious of that when changing them.
-const (
- // Attribute of the diagnostic being logged.
- KeyDiagnosticAttribute = "diagnostic_attribute"
-
- // Number of the error diagnostics.
- KeyDiagnosticErrorCount = "diagnostic_error_count"
-
- // Severity of the diagnostic being logged.
- KeyDiagnosticSeverity = "diagnostic_severity"
-
- // Detail of the diagnostic being logged.
- KeyDiagnosticDetail = "diagnostic_detail"
-
- // Summary of the diagnostic being logged.
- KeyDiagnosticSummary = "diagnostic_summary"
-
- // Number of the warning diagnostics.
- KeyDiagnosticWarningCount = "diagnostic_warning_count"
-
- // Underlying error string
- KeyError = "error"
-
- // Argument position of the function error.
- KeyFunctionErrorArgument = "function_error_argument"
-
- // Boolean indicating presence of function error
- KeyFunctionErrorExists = "function_error_exists"
-
- // Message of the function error.
- KeyFunctionErrorText = "function_error_text"
-
- // Duration in milliseconds for the RPC request
- KeyRequestDurationMs = "tf_req_duration_ms"
-
- // A unique ID for the RPC request
- KeyRequestID = "tf_req_id"
-
- // The full address of the provider, such as
- // registry.terraform.io/hashicorp/random
- KeyProviderAddress = "tf_provider_addr"
-
- // The RPC being run, such as "ApplyResourceChange"
- KeyRPC = "tf_rpc"
-
- // The type of resource being operated on, such as "random_pet"
- KeyResourceType = "tf_resource_type"
-
- // The type of data source being operated on, such as "archive_file"
- KeyDataSourceType = "tf_data_source_type"
-
- // Path to protocol data file, such as "/tmp/example.json"
- KeyProtocolDataFile = "tf_proto_data_file"
-
- // The protocol version being used, as a string, such as "6"
- KeyProtocolVersion = "tf_proto_version"
-
- // The Deferred reason for an RPC response
- KeyDeferredReason = "tf_deferred_reason"
-
- // Whether the GetProviderSchemaOptional server capability is enabled
- KeyServerCapabilityGetProviderSchemaOptional = "tf_server_capability_get_provider_schema_optional"
-
- // Whether the PlanDestroy server capability is enabled
- KeyServerCapabilityPlanDestroy = "tf_server_capability_plan_destroy"
-
- // Whether the DeferralAllowed client capability is enabled
- KeyClientCapabilityDeferralAllowed = "tf_client_capability_deferral_allowed"
-)
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol.go
deleted file mode 100644
index a1d49eae..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package logging
-
-import (
- "context"
-
- "github.com/hashicorp/terraform-plugin-log/tfsdklog"
-)
-
-const (
- // SubsystemProto is the tfsdklog subsystem name for protocol logging.
- SubsystemProto = "proto"
-)
-
-// ProtocolError emits a protocol subsystem log at ERROR level.
-func ProtocolError(ctx context.Context, msg string, additionalFields ...map[string]interface{}) {
- tfsdklog.SubsystemError(ctx, SubsystemProto, msg, additionalFields...)
-}
-
-// ProtocolWarn emits a protocol subsystem log at WARN level.
-func ProtocolWarn(ctx context.Context, msg string, additionalFields ...map[string]interface{}) {
- tfsdklog.SubsystemWarn(ctx, SubsystemProto, msg, additionalFields...)
-}
-
-// ProtocolTrace emits a protocol subsystem log at TRACE level.
-func ProtocolTrace(ctx context.Context, msg string, additionalFields ...map[string]interface{}) {
- tfsdklog.SubsystemTrace(ctx, SubsystemProto, msg, additionalFields...)
-}
-
-// ProtocolSetField returns a context with the additional protocol subsystem
-// field set.
-func ProtocolSetField(ctx context.Context, key string, value any) context.Context {
- return tfsdklog.SubsystemSetField(ctx, SubsystemProto, key, value)
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol_data.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol_data.go
deleted file mode 100644
index e96188f5..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/protocol_data.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package logging
-
-import (
- "context"
- "fmt"
- "os"
- "path"
- "sync"
- "time"
-
- "github.com/hashicorp/terraform-plugin-go/tfprotov5"
- "github.com/hashicorp/terraform-plugin-go/tfprotov6"
-)
-
-const (
- // fileExtEmpty is the file extension for empty data.
- // Empty data may be expected, depending on the RPC.
- fileExtEmpty = "empty"
-
- // fileExtJson is the file extension for JSON data.
- fileExtJson = "json"
-
- // fileExtMsgpack is the file extension for MessagePack data.
- fileExtMsgpack = "msgpack"
-)
-
-var protocolDataSkippedLog sync.Once
-
-// ProtocolData emits raw protocol data to a file, if given a directory.
-//
-// The directory must exist and be writable, prior to invoking this function.
-//
-// File names are in the format: {TIME}_{RPC}_{MESSAGE}_{FIELD}.{EXT}
-func ProtocolData(ctx context.Context, dataDir string, rpc string, message string, field string, data interface{}) {
- if dataDir == "" {
- // Write a log, only once, that explains how to enable this functionality.
- protocolDataSkippedLog.Do(func() {
- ProtocolTrace(ctx, "Skipping protocol data file writing because no data directory is set. "+
- fmt.Sprintf("Use the %s environment variable to enable this functionality.", EnvTfLogSdkProtoDataDir))
- })
-
- return
- }
-
- var fileContents []byte
- var fileExtension string
-
- switch data := data.(type) {
- case *tfprotov5.DynamicValue:
- fileExtension, fileContents = protocolDataDynamicValue5(ctx, data)
- case *tfprotov6.DynamicValue:
- fileExtension, fileContents = protocolDataDynamicValue6(ctx, data)
- default:
- ProtocolError(ctx, fmt.Sprintf("Skipping unknown protocol data type: %T", data))
- return
- }
-
- writeProtocolFile(ctx, dataDir, rpc, message, field, fileExtension, fileContents)
-}
-
-// ProtocolPrivateData emits raw protocol private data to a file, if given a
-// directory. This data is "private" in the sense that it is provider-owned,
-// rather than something managed by Terraform.
-//
-// The directory must exist and be writable, prior to invoking this function.
-//
-// File names are in the format: {TIME}_{RPC}_{MESSAGE}_{FIELD}(.empty)
-func ProtocolPrivateData(ctx context.Context, dataDir string, rpc string, message string, field string, data []byte) {
- if dataDir == "" {
- // Write a log, only once, that explains how to enable this functionality.
- protocolDataSkippedLog.Do(func() {
- ProtocolTrace(ctx, "Skipping protocol data file writing because no data directory is set. "+
- fmt.Sprintf("Use the %s environment variable to enable this functionality.", EnvTfLogSdkProtoDataDir))
- })
-
- return
- }
-
- var fileExtension string
-
- if len(data) == 0 {
- fileExtension = fileExtEmpty
- }
-
- writeProtocolFile(ctx, dataDir, rpc, message, field, fileExtension, data)
-}
-
-func protocolDataDynamicValue5(_ context.Context, value *tfprotov5.DynamicValue) (string, []byte) {
- if value == nil {
- return fileExtEmpty, nil
- }
-
- // (tfprotov5.DynamicValue).Unmarshal() prefers JSON first, so prefer to
- // output JSON if found.
- if len(value.JSON) > 0 {
- return fileExtJson, value.JSON
- }
-
- if len(value.MsgPack) > 0 {
- return fileExtMsgpack, value.MsgPack
- }
-
- return fileExtEmpty, nil
-}
-
-func protocolDataDynamicValue6(_ context.Context, value *tfprotov6.DynamicValue) (string, []byte) {
- if value == nil {
- return fileExtEmpty, nil
- }
-
- // (tfprotov6.DynamicValue).Unmarshal() prefers JSON first, so prefer to
- // output JSON if found.
- if len(value.JSON) > 0 {
- return fileExtJson, value.JSON
- }
-
- if len(value.MsgPack) > 0 {
- return fileExtMsgpack, value.MsgPack
- }
-
- return fileExtEmpty, nil
-}
-
-func writeProtocolFile(ctx context.Context, dataDir string, rpc string, message string, field string, fileExtension string, fileContents []byte) {
- fileName := fmt.Sprintf("%d_%s_%s_%s", time.Now().UnixMilli(), rpc, message, field)
-
- if fileExtension != "" {
- fileName += "." + fileExtension
- }
-
- filePath := path.Join(dataDir, fileName)
- ctx = ProtocolSetField(ctx, KeyProtocolDataFile, filePath)
-
- ProtocolTrace(ctx, "Writing protocol data file")
-
- err := os.WriteFile(filePath, fileContents, 0644)
-
- if err != nil {
- ProtocolError(ctx, "Unable to write protocol data file", map[string]any{KeyError: err.Error()})
- return
- }
-
- ProtocolTrace(ctx, "Wrote protocol data file")
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/provider.go b/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/provider.go
deleted file mode 100644
index 1ece6992..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/provider.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package logging
-
-import (
- "log"
- "strings"
-
- tfaddr "github.com/hashicorp/terraform-registry-address"
-)
-
-func ProviderLoggerName(providerAddress string) string {
- provider, err := tfaddr.ParseProviderSource(providerAddress)
- if err != nil {
- log.Printf("[ERROR] Error parsing provider name %q: %s", providerAddress, err)
- return ""
- }
-
- return strings.ReplaceAll(provider.Type, "-", "_")
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/client_capabilities.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/client_capabilities.go
deleted file mode 100644
index ba01cd8b..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/client_capabilities.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfprotov5
-
-// ConfigureProviderClientCapabilities allows Terraform to publish information
-// regarding optionally supported protocol features for the ConfigureProvider RPC,
-// such as forward-compatible Terraform behavior changes.
-type ConfigureProviderClientCapabilities struct {
- // DeferralAllowed signals that the request from Terraform is able to
- // handle deferred responses from the provider.
- DeferralAllowed bool
-}
-
-// ReadDataSourceClientCapabilities allows Terraform to publish information
-// regarding optionally supported protocol features for the ReadDataSource RPC,
-// such as forward-compatible Terraform behavior changes.
-type ReadDataSourceClientCapabilities struct {
- // DeferralAllowed signals that the request from Terraform is able to
- // handle deferred responses from the provider.
- DeferralAllowed bool
-}
-
-// ReadResourceClientCapabilities allows Terraform to publish information
-// regarding optionally supported protocol features for the ReadResource RPC,
-// such as forward-compatible Terraform behavior changes.
-type ReadResourceClientCapabilities struct {
- // DeferralAllowed signals that the request from Terraform is able to
- // handle deferred responses from the provider.
- DeferralAllowed bool
-}
-
-// PlanResourceChangeClientCapabilities allows Terraform to publish information
-// regarding optionally supported protocol features for the PlanResourceChange RPC,
-// such as forward-compatible Terraform behavior changes.
-type PlanResourceChangeClientCapabilities struct {
- // DeferralAllowed signals that the request from Terraform is able to
- // handle deferred responses from the provider.
- DeferralAllowed bool
-}
-
-// ImportResourceStateClientCapabilities allows Terraform to publish information
-// regarding optionally supported protocol features for the ImportResourceState RPC,
-// such as forward-compatible Terraform behavior changes.
-type ImportResourceStateClientCapabilities struct {
- // DeferralAllowed signals that the request from Terraform is able to
- // handle deferred responses from the provider.
- DeferralAllowed bool
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/data_source.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/data_source.go
deleted file mode 100644
index df1a2814..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/data_source.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfprotov5
-
-import (
- "context"
-)
-
-// DataSourceMetadata describes metadata for a data resource in the GetMetadata
-// RPC.
-type DataSourceMetadata struct {
- // TypeName is the name of the data resource.
- TypeName string
-}
-
-// DataSourceServer is an interface containing the methods a data source
-// implementation needs to fill.
-type DataSourceServer interface {
- // ValidateDataSourceConfig is called when Terraform is checking that a
- // data source's configuration is valid. It is guaranteed to have types
- // conforming to your schema, but it is not guaranteed that all values
- // will be known. This is your opportunity to do custom or advanced
- // validation prior to a plan being generated.
- ValidateDataSourceConfig(context.Context, *ValidateDataSourceConfigRequest) (*ValidateDataSourceConfigResponse, error)
-
- // ReadDataSource is called when Terraform is refreshing a data
- // source's state.
- ReadDataSource(context.Context, *ReadDataSourceRequest) (*ReadDataSourceResponse, error)
-}
-
-// ValidateDataSourceConfigRequest is the request Terraform sends when it wants
-// to validate a data source's configuration.
-type ValidateDataSourceConfigRequest struct {
- // TypeName is the type of data source Terraform is validating.
- TypeName string
-
- // Config is the configuration the user supplied for that data source.
- // See the documentation on `DynamicValue` for more information about
- // safely accessing the configuration.
- //
- // The configuration is represented as a tftypes.Object, with each
- // attribute and nested block getting its own key and value.
- //
- // This configuration may contain unknown values if a user uses
- // interpolation or other functionality that would prevent Terraform
- // from knowing the value at request time.
- Config *DynamicValue
-}
-
-// ValidateDataSourceConfigResponse is the response from the provider about the
-// validity of a data source's configuration.
-type ValidateDataSourceConfigResponse struct {
- // Diagnostics report errors or warnings related to the given
- // configuration. Returning an empty slice indicates a successful
- // validation with no warnings or errors generated.
- Diagnostics []*Diagnostic
-}
-
-// ReadDataSourceRequest is the request Terraform sends when it wants to get
-// the latest state for a data source.
-type ReadDataSourceRequest struct {
- // TypeName is the type of data source Terraform is requesting an
- // updated state for.
- TypeName string
-
- // Config is the configuration the user supplied for that data source.
- // See the documentation on `DynamicValue` for information about safely
- // accessing the configuration.
- //
- // The configuration is represented as a tftypes.Object, with each
- // attribute and nested block getting its own key and value.
- //
- // This configuration may have unknown values.
- Config *DynamicValue
-
- // ProviderMeta supplies the provider metadata configuration for the
- // module this data source is in. Module-specific provider metadata is
- // an advanced feature and usage of it should be coordinated with the
- // Terraform Core team by raising an issue at
- // https://github.com/hashicorp/terraform/issues/new/choose. See the
- // documentation on `DynamicValue` for information about safely
- // accessing the configuration.
- //
- // The configuration is represented as a tftypes.Object, with each
- // attribute and nested block getting its own key and value.
- //
- // This configuration will have known values for all fields.
- ProviderMeta *DynamicValue
-
- // ClientCapabilities defines optionally supported protocol features for the
- // ReadDataSource RPC, such as forward-compatible Terraform behavior changes.
- ClientCapabilities *ReadDataSourceClientCapabilities
-}
-
-// ReadDataSourceResponse is the response from the provider about the current
-// state of the requested data source.
-type ReadDataSourceResponse struct {
- // State is the current state of the data source, represented as a
- // `DynamicValue`. See the documentation on `DynamicValue` for
- // information about safely creating the `DynamicValue`.
- //
- // The state should be represented as a tftypes.Object, with each
- // attribute and nested block getting its own key and value.
- State *DynamicValue
-
- // Diagnostics report errors or warnings related to retrieving the
- // current state of the requested data source. Returning an empty slice
- // indicates a successful validation with no warnings or errors
- // generated.
- Diagnostics []*Diagnostic
-
- // Deferred is used to indicate to Terraform that the ReadDataSource operation
- // needs to be deferred for a reason.
- Deferred *Deferred
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/deferred.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/deferred.go
deleted file mode 100644
index 967cb861..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/deferred.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfprotov5
-
-const (
- // DeferredReasonUnknown is used to indicate an invalid `DeferredReason`.
- // Provider developers should not use it.
- DeferredReasonUnknown DeferredReason = 0
-
- // DeferredReasonResourceConfigUnknown is used to indicate that the resource configuration
- // is partially unknown and the real values need to be known before the change can be planned.
- DeferredReasonResourceConfigUnknown DeferredReason = 1
-
- // DeferredReasonProviderConfigUnknown is used to indicate that the provider configuration
- // is partially unknown and the real values need to be known before the change can be planned.
- DeferredReasonProviderConfigUnknown DeferredReason = 2
-
- // DeferredReasonAbsentPrereq is used to indicate that a hard dependency has not been satisfied.
- DeferredReasonAbsentPrereq DeferredReason = 3
-)
-
-// Deferred is used to indicate to Terraform that a change needs to be deferred for a reason.
-type Deferred struct {
- // Reason is the reason for deferring the change.
- Reason DeferredReason
-}
-
-// DeferredReason represents different reasons for deferring a change.
-type DeferredReason int32
-
-func (d DeferredReason) String() string {
- switch d {
- case 0:
- return "UNKNOWN"
- case 1:
- return "RESOURCE_CONFIG_UNKNOWN"
- case 2:
- return "PROVIDER_CONFIG_UNKNOWN"
- case 3:
- return "ABSENT_PREREQ"
- }
- return "UNKNOWN"
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/diagnostic.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/diagnostic.go
deleted file mode 100644
index 15ab6a4a..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/diagnostic.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfprotov5
-
-import "github.com/hashicorp/terraform-plugin-go/tftypes"
-
-const (
- // DiagnosticSeverityInvalid is used to indicate an invalid
- // `DiagnosticSeverity`. Provider developers should not use it.
- DiagnosticSeverityInvalid DiagnosticSeverity = 0
-
- // DiagnosticSeverityError is used to indicate that a `Diagnostic`
- // represents an error and should halt Terraform execution.
- DiagnosticSeverityError DiagnosticSeverity = 1
-
- // DiagnosticSeverityWarning is used to indicate that a `Diagnostic`
- // represents a warning and should not halt Terraform's execution, but
- // it should be surfaced to the user.
- DiagnosticSeverityWarning DiagnosticSeverity = 2
-)
-
-// Diagnostic is used to convey information back the user running Terraform.
-type Diagnostic struct {
- // Severity indicates how Terraform should handle the Diagnostic.
- Severity DiagnosticSeverity
-
- // Summary is a brief description of the problem, roughly
- // sentence-sized, and should provide a concise description of what
- // went wrong. For example, a Summary could be as simple as "Invalid
- // value.".
- Summary string
-
- // Detail is a lengthier, more complete description of the problem.
- // Detail should provide enough information that a user can resolve the
- // problem entirely. For example, a Detail could be "Values must be
- // alphanumeric and lowercase only."
- Detail string
-
- // Attribute indicates which field, specifically, has the problem. Not
- // setting this will indicate the entire resource; setting it will
- // indicate that the problem is with a certain field in the resource,
- // which helps users find the source of the problem.
- Attribute *tftypes.AttributePath
-}
-
-// DiagnosticSeverity represents different classes of Diagnostic which affect
-// how Terraform handles the Diagnostics.
-type DiagnosticSeverity int32
-
-func (d DiagnosticSeverity) String() string {
- switch d {
- case 0:
- return "INVALID"
- case 1:
- return "ERROR"
- case 2:
- return "WARNING"
- }
- return "UNKNOWN"
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/doc.go
deleted file mode 100644
index 2d35c925..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/doc.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-// Package tfprotov5 provides the interfaces and types needed to build a
-// Terraform provider server.
-//
-// All Terraform provider servers should be built on
-// these types, to take advantage of the ecosystem and tooling built around
-// them.
-//
-// These types are small wrappers around the Terraform protocol. It is assumed
-// that developers using tfprotov5 are familiar with the protocol, its
-// requirements, and its semantics. Developers not comfortable working with the
-// raw protocol should use the github.com/hashicorp/terraform-plugin-sdk/v2 Go
-// module instead, which offers a less verbose, safer way to develop a
-// Terraform provider, albeit with less flexibility and power.
-//
-// Provider developers should start by defining a type that implements the
-// `ProviderServer` interface. A struct is recommended, as it will allow you to
-// store the configuration information attached to your provider for use in
-// requests, but any type is technically possible.
-//
-// `ProviderServer` implementations will need to implement the composed
-// interfaces, `ResourceServer` and `DataSourceServer`. It is recommended, but
-// not required, to use an embedded `ResourceRouter` and `DataSourceRouter` in
-// your `ProviderServer` to achieve this, which will let you handle requests
-// for each resource and data source in a resource-specific or data
-// source-specific function.
-//
-// To serve the `ProviderServer` implementation as a gRPC server that Terraform
-// can connect to, use the `tfprotov5/server.Serve` function.
-package tfprotov5
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/dynamic_value.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/dynamic_value.go
deleted file mode 100644
index d21e4966..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/dynamic_value.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfprotov5
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
-
- "github.com/hashicorp/terraform-plugin-go/tftypes"
- msgpack "github.com/vmihailenco/msgpack/v5"
- "github.com/vmihailenco/msgpack/v5/msgpcode"
-)
-
-// ErrUnknownDynamicValueType is returned when a DynamicValue has no MsgPack or
-// JSON bytes set. This should never be returned during the normal operation of
-// a provider, and indicates one of the following:
-//
-// 1. terraform-plugin-go is out of sync with the protocol and should be
-// updated.
-//
-// 2. terrafrom-plugin-go has a bug.
-//
-// 3. The `DynamicValue` was generated or modified by something other than
-// terraform-plugin-go and is no longer a valid value.
-var ErrUnknownDynamicValueType = errors.New("DynamicValue had no JSON or msgpack data set")
-
-// NewDynamicValue creates a DynamicValue from a tftypes.Value. You must
-// specify the tftype.Type you want to send the value as, and it must be a type
-// that is compatible with the Type of the Value. Usually it should just be the
-// Type of the Value, but it can also be the DynamicPseudoType.
-func NewDynamicValue(t tftypes.Type, v tftypes.Value) (DynamicValue, error) {
- b, err := v.MarshalMsgPack(t) //nolint:staticcheck
- if err != nil {
- return DynamicValue{}, err
- }
- return DynamicValue{
- MsgPack: b,
- }, nil
-}
-
-// DynamicValue represents a nested encoding value that came from the protocol.
-// The only way providers should ever interact with it is by calling its
-// `Unmarshal` method to retrieve a `tftypes.Value`. Although the type system
-// allows for other interactions, they are explicitly not supported, and will
-// not be considered when evaluating for breaking changes. Treat this type as
-// an opaque value, and *only* call its `Unmarshal` method.
-type DynamicValue struct {
- MsgPack []byte
- JSON []byte
-}
-
-// IsNull returns true if the DynamicValue represents a null value based on the
-// underlying JSON or MessagePack data.
-func (d DynamicValue) IsNull() (bool, error) {
- if d.JSON != nil {
- decoder := json.NewDecoder(bytes.NewReader(d.JSON))
- token, err := decoder.Token()
-
- if err != nil {
- return false, fmt.Errorf("unable to read DynamicValue JSON token: %w", err)
- }
-
- if token != nil {
- return false, nil
- }
-
- return true, nil
- }
-
- if d.MsgPack != nil {
- decoder := msgpack.NewDecoder(bytes.NewReader(d.MsgPack))
- code, err := decoder.PeekCode()
-
- if err != nil {
- return false, fmt.Errorf("unable to read DynamicValue MsgPack code: %w", err)
- }
-
- // Extensions are considered unknown
- if msgpcode.IsExt(code) || code != msgpcode.Nil {
- return false, nil
- }
-
- return true, nil
- }
-
- return false, fmt.Errorf("unable to read DynamicValue: %w", ErrUnknownDynamicValueType)
-}
-
-// Unmarshal returns a `tftypes.Value` that represents the information
-// contained in the DynamicValue in an easy-to-interact-with way. It is the
-// main purpose of the DynamicValue type, and is how provider developers should
-// obtain config, state, and other values from the protocol.
-//
-// Pass in the type you want the `Value` to be interpreted as. Terraform's type
-// system encodes in a lossy manner, meaning the type information is not
-// preserved losslessly when going over the wire. Sets, lists, and tuples all
-// look the same, as do user-specified values when the provider has a
-// DynamicPseudoType in its schema. Objects and maps all look the same, as
-// well, as do DynamicPseudoType values sometimes. Fortunately, the provider
-// should already know the type; it should be the type of the schema, or
-// PseudoDynamicType if that's what's in the schema. `Unmarshal` will then
-// parse the value as though it belongs to that type, if possible, and return a
-// `tftypes.Value` with the appropriate information. If the data can't be
-// interpreted as that type, an error will be returned saying so. In these
-// cases, double check to make sure the schema is declaring the same type being
-// passed into `Unmarshal`.
-//
-// In the event an ErrUnknownDynamicValueType is returned, one of three things
-// has happened:
-//
-// 1. terraform-plugin-go is out of date and out of sync with the protocol, and
-// an issue should be opened on its repo to get it updated.
-//
-// 2. terraform-plugin-go has a bug somewhere, and an issue should be opened on
-// its repo to get it fixed.
-//
-// 3. The provider or a dependency has modified the `DynamicValue` in an
-// unsupported way, or has created one from scratch, and should treat it as
-// opaque and not modify it, only calling `Unmarshal` on `DynamicValue`s
-// received from RPC requests.
-func (d DynamicValue) Unmarshal(typ tftypes.Type) (tftypes.Value, error) {
- if d.JSON != nil {
- return tftypes.ValueFromJSON(d.JSON, typ) //nolint:staticcheck
- }
- if d.MsgPack != nil {
- return tftypes.ValueFromMsgPack(d.MsgPack, typ) //nolint:staticcheck
- }
- return tftypes.Value{}, ErrUnknownDynamicValueType
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/function.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/function.go
deleted file mode 100644
index ef1e363a..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/function.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfprotov5
-
-import (
- "context"
-
- "github.com/hashicorp/terraform-plugin-go/tftypes"
-)
-
-// Function describes the definition of a function. Result must be defined.
-type Function struct {
- // Parameters is the ordered list of positional function parameters.
- Parameters []*FunctionParameter
-
- // VariadicParameter is an optional final parameter which accepts zero or
- // more argument values, in which Terraform will send an ordered list of the
- // parameter type.
- VariadicParameter *FunctionParameter
-
- // Return is the function result.
- Return *FunctionReturn
-
- // Summary is the shortened human-readable documentation for the function.
- Summary string
-
- // Description is the longer human-readable documentation for the function.
- Description string
-
- // DescriptionKind indicates the formatting and encoding that the
- // Description field is using.
- DescriptionKind StringKind
-
- // DeprecationMessage is the human-readable documentation if the function
- // is deprecated. This message should be practitioner oriented to explain
- // how their configuration should be updated.
- DeprecationMessage string
-}
-
-// FunctionMetadata describes metadata for a function in the GetMetadata RPC.
-type FunctionMetadata struct {
- // Name is the name of the function.
- Name string
-}
-
-// FunctionParameter describes the definition of a function parameter. Type must
-// be defined.
-type FunctionParameter struct {
- // AllowNullValue when enabled denotes that a null argument value can be
- // passed to the provider. When disabled, Terraform returns an error if the
- // argument value is null.
- AllowNullValue bool
-
- // AllowUnknownValues when enabled denotes that any unknown argument value
- // (recursively checked for collections) can be passed to the provider. When
- // disabled and an unknown value is present, Terraform skips the function
- // call entirely and returns an unknown value result from the function.
- AllowUnknownValues bool
-
- // Description is the human-readable documentation for the parameter.
- Description string
-
- // DescriptionKind indicates the formatting and encoding that the
- // Description field is using.
- DescriptionKind StringKind
-
- // Name is the human-readable display name for the parameter. Parameters
- // are by definition positional and this name is only used in documentation.
- Name string
-
- // Type indicates the type of data the parameter expects.
- Type tftypes.Type
-}
-
-// FunctionReturn describes the definition of a function result. Type must be
-// defined.
-type FunctionReturn struct {
- // Type indicates the type of return data.
- Type tftypes.Type
-}
-
-// FunctionServer is an interface containing the methods a function
-// implementation needs to fill.
-type FunctionServer interface {
- // CallFunction is called when Terraform wants to execute the logic of a
- // function referenced in the configuration.
- CallFunction(context.Context, *CallFunctionRequest) (*CallFunctionResponse, error)
-
- // GetFunctions is called when Terraform wants to lookup which functions a
- // provider supports when not calling GetProviderSchema.
- GetFunctions(context.Context, *GetFunctionsRequest) (*GetFunctionsResponse, error)
-}
-
-// CallFunctionRequest is the request Terraform sends when it wants to execute
-// the logic of function referenced in the configuration.
-type CallFunctionRequest struct {
- // Name is the function name being called.
- Name string
-
- // Arguments is the configuration value of each argument the practitioner
- // supplied for the function call. The ordering and value of each element
- // matches the function parameters and their associated type. If the
- // function definition includes a final variadic parameter, its value is an
- // ordered list of the variadic parameter type.
- Arguments []*DynamicValue
-}
-
-// CallFunctionResponse is the response from the provider with the result of
-// executing the logic of the function.
-type CallFunctionResponse struct {
- // Error reports errors related to the execution of the
- // function logic. Returning a nil error indicates a successful response
- // with no errors presented to practitioners.
- Error *FunctionError
-
- // Result is the return value from the called function, matching the result
- // type in the function definition.
- Result *DynamicValue
-}
-
-// GetFunctionsRequest is the request Terraform sends when it wants to lookup
-// which functions a provider supports when not calling GetProviderSchema.
-type GetFunctionsRequest struct{}
-
-// GetFunctionsResponse is the response from the provider about the implemented
-// functions.
-type GetFunctionsResponse struct {
- // Diagnostics report errors or warnings related to the provider
- // implementation. Returning an empty slice indicates a successful response
- // with no warnings or errors presented to practitioners.
- Diagnostics []*Diagnostic
-
- // Functions is a map of function names to their definition.
- //
- // Unlike data resources and managed resources, the name should NOT be
- // prefixed with the provider name and an underscore. Configuration
- // references to functions use a separate namespacing syntax that already
- // includes the provider name.
- Functions map[string]*Function
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/function_error.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/function_error.go
deleted file mode 100644
index 558335f9..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/function_error.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tfprotov5
-
-// FunctionError is used to convey information back to the user running Terraform.
-type FunctionError struct {
- // Text is the description of the error.
- Text string
-
- // FunctionArgument is the positional function argument for aligning
- // configuration source.
- FunctionArgument *int64
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/diagnostics.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/diagnostics.go
deleted file mode 100644
index cc40d861..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/diagnostics.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package diag
-
-import (
- "context"
-
- "github.com/hashicorp/terraform-plugin-go/internal/logging"
- "github.com/hashicorp/terraform-plugin-go/tfprotov5"
-)
-
-// Diagnostics is a collection of Diagnostic.
-type Diagnostics []*tfprotov5.Diagnostic
-
-// ErrorCount returns the number of error severity diagnostics.
-func (d Diagnostics) ErrorCount() int {
- var result int
-
- for _, diagnostic := range d {
- if diagnostic == nil {
- continue
- }
-
- if diagnostic.Severity != tfprotov5.DiagnosticSeverityError {
- continue
- }
-
- result++
- }
-
- return result
-}
-
-// Log will log every diagnostic:
-//
-// - Error severity at ERROR level
-// - Warning severity at WARN level
-// - Invalid/Unknown severity at WARN level
-func (d Diagnostics) Log(ctx context.Context) {
- for _, diagnostic := range d {
- if diagnostic == nil {
- continue
- }
-
- diagnosticFields := map[string]interface{}{
- logging.KeyDiagnosticDetail: diagnostic.Detail,
- logging.KeyDiagnosticSeverity: diagnostic.Severity.String(),
- logging.KeyDiagnosticSummary: diagnostic.Summary,
- }
-
- if diagnostic.Attribute != nil {
- diagnosticFields[logging.KeyDiagnosticAttribute] = diagnostic.Attribute.String()
- }
-
- switch diagnostic.Severity {
- case tfprotov5.DiagnosticSeverityError:
- logging.ProtocolError(ctx, "Response contains error diagnostic", diagnosticFields)
- case tfprotov5.DiagnosticSeverityWarning:
- logging.ProtocolWarn(ctx, "Response contains warning diagnostic", diagnosticFields)
- default:
- logging.ProtocolWarn(ctx, "Response contains unknown diagnostic", diagnosticFields)
- }
- }
-}
-
-// WarningCount returns the number of warning severity diagnostics.
-func (d Diagnostics) WarningCount() int {
- var result int
-
- for _, diagnostic := range d {
- if diagnostic == nil {
- continue
- }
-
- if diagnostic.Severity != tfprotov5.DiagnosticSeverityWarning {
- continue
- }
-
- result++
- }
-
- return result
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/doc.go
deleted file mode 100644
index faaba228..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/doc.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-// Package diag contains diagnostics helpers. These implementations are
-// intentionally outside the public API.
-package diag
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/client_capabilities.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/client_capabilities.go
deleted file mode 100644
index 94ddc3d4..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/client_capabilities.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package fromproto
-
-import (
- "github.com/hashicorp/terraform-plugin-go/tfprotov5"
- "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5"
-)
-
-func ConfigureProviderClientCapabilities(in *tfplugin5.ClientCapabilities) *tfprotov5.ConfigureProviderClientCapabilities {
- if in == nil {
- return nil
- }
-
- resp := &tfprotov5.ConfigureProviderClientCapabilities{
- DeferralAllowed: in.DeferralAllowed,
- }
-
- return resp
-}
-
-func ReadDataSourceClientCapabilities(in *tfplugin5.ClientCapabilities) *tfprotov5.ReadDataSourceClientCapabilities {
- if in == nil {
- return nil
- }
-
- resp := &tfprotov5.ReadDataSourceClientCapabilities{
- DeferralAllowed: in.DeferralAllowed,
- }
-
- return resp
-}
-
-func ReadResourceClientCapabilities(in *tfplugin5.ClientCapabilities) *tfprotov5.ReadResourceClientCapabilities {
- if in == nil {
- return nil
- }
-
- resp := &tfprotov5.ReadResourceClientCapabilities{
- DeferralAllowed: in.DeferralAllowed,
- }
-
- return resp
-}
-
-func PlanResourceChangeClientCapabilities(in *tfplugin5.ClientCapabilities) *tfprotov5.PlanResourceChangeClientCapabilities {
- if in == nil {
- return nil
- }
-
- resp := &tfprotov5.PlanResourceChangeClientCapabilities{
- DeferralAllowed: in.DeferralAllowed,
- }
-
- return resp
-}
-
-func ImportResourceStateClientCapabilities(in *tfplugin5.ClientCapabilities) *tfprotov5.ImportResourceStateClientCapabilities {
- if in == nil {
- return nil
- }
-
- resp := &tfprotov5.ImportResourceStateClientCapabilities{
- DeferralAllowed: in.DeferralAllowed,
- }
-
- return resp
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/data_source.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/data_source.go
deleted file mode 100644
index 385f4845..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/data_source.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package fromproto
-
-import (
- "github.com/hashicorp/terraform-plugin-go/tfprotov5"
- "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5"
-)
-
-func ValidateDataSourceConfigRequest(in *tfplugin5.ValidateDataSourceConfig_Request) *tfprotov5.ValidateDataSourceConfigRequest {
- if in == nil {
- return nil
- }
-
- resp := &tfprotov5.ValidateDataSourceConfigRequest{
- Config: DynamicValue(in.Config),
- TypeName: in.TypeName,
- }
-
- return resp
-}
-
-func ReadDataSourceRequest(in *tfplugin5.ReadDataSource_Request) *tfprotov5.ReadDataSourceRequest {
- if in == nil {
- return nil
- }
-
- resp := &tfprotov5.ReadDataSourceRequest{
- Config: DynamicValue(in.Config),
- ProviderMeta: DynamicValue(in.ProviderMeta),
- TypeName: in.TypeName,
- ClientCapabilities: ReadDataSourceClientCapabilities(in.ClientCapabilities),
- }
-
- return resp
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/doc.go
deleted file mode 100644
index 01a7012d..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/doc.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-// Package fromproto converts Protocol Buffers generated tfplugin5 types into
-// terraform-plugin-go tfprotov5 types.
-package fromproto
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/dynamic_value.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/dynamic_value.go
deleted file mode 100644
index af332bfd..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/dynamic_value.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package fromproto
-
-import (
- "github.com/hashicorp/terraform-plugin-go/tfprotov5"
- "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5"
-)
-
-func DynamicValue(in *tfplugin5.DynamicValue) *tfprotov5.DynamicValue {
- if in == nil {
- return nil
- }
-
- resp := &tfprotov5.DynamicValue{
- MsgPack: in.Msgpack,
- JSON: in.Json,
- }
-
- return resp
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/function.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/function.go
deleted file mode 100644
index 0abd61de..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/function.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package fromproto
-
-import (
- "github.com/hashicorp/terraform-plugin-go/tfprotov5"
- "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5"
-)
-
-func CallFunctionRequest(in *tfplugin5.CallFunction_Request) *tfprotov5.CallFunctionRequest {
- if in == nil {
- return nil
- }
-
- resp := &tfprotov5.CallFunctionRequest{
- Arguments: make([]*tfprotov5.DynamicValue, 0, len(in.Arguments)),
- Name: in.Name,
- }
-
- for _, argument := range in.Arguments {
- resp.Arguments = append(resp.Arguments, DynamicValue(argument))
- }
-
- return resp
-}
-
-func GetFunctionsRequest(in *tfplugin5.GetFunctions_Request) *tfprotov5.GetFunctionsRequest {
- if in == nil {
- return nil
- }
-
- resp := &tfprotov5.GetFunctionsRequest{}
-
- return resp
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/provider.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/provider.go
deleted file mode 100644
index ac487800..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/provider.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package fromproto
-
-import (
- "github.com/hashicorp/terraform-plugin-go/tfprotov5"
- "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5"
-)
-
-func GetMetadataRequest(in *tfplugin5.GetMetadata_Request) *tfprotov5.GetMetadataRequest {
- if in == nil {
- return nil
- }
-
- resp := &tfprotov5.GetMetadataRequest{}
-
- return resp
-}
-
-func GetProviderSchemaRequest(in *tfplugin5.GetProviderSchema_Request) *tfprotov5.GetProviderSchemaRequest {
- if in == nil {
- return nil
- }
-
- resp := &tfprotov5.GetProviderSchemaRequest{}
-
- return resp
-}
-
-func PrepareProviderConfigRequest(in *tfplugin5.PrepareProviderConfig_Request) *tfprotov5.PrepareProviderConfigRequest {
- if in == nil {
- return nil
- }
-
- resp := &tfprotov5.PrepareProviderConfigRequest{
- Config: DynamicValue(in.Config),
- }
-
- return resp
-}
-
-func ConfigureProviderRequest(in *tfplugin5.Configure_Request) *tfprotov5.ConfigureProviderRequest {
- if in == nil {
- return nil
- }
-
- resp := &tfprotov5.ConfigureProviderRequest{
- Config: DynamicValue(in.Config),
- TerraformVersion: in.TerraformVersion,
- ClientCapabilities: ConfigureProviderClientCapabilities(in.ClientCapabilities),
- }
-
- return resp
-}
-
-func StopProviderRequest(in *tfplugin5.Stop_Request) *tfprotov5.StopProviderRequest {
- if in == nil {
- return nil
- }
-
- resp := &tfprotov5.StopProviderRequest{}
-
- return resp
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/raw_state.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/raw_state.go
deleted file mode 100644
index c31b7e64..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/raw_state.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package fromproto
-
-import (
- "github.com/hashicorp/terraform-plugin-go/tfprotov5"
- "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5"
-)
-
-func RawState(in *tfplugin5.RawState) *tfprotov5.RawState {
- if in == nil {
- return nil
- }
-
- resp := &tfprotov5.RawState{
- JSON: in.Json,
- Flatmap: in.Flatmap,
- }
-
- return resp
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/resource.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/resource.go
deleted file mode 100644
index f531b487..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/resource.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package fromproto
-
-import (
- "github.com/hashicorp/terraform-plugin-go/tfprotov5"
- "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5"
-)
-
-func ValidateResourceTypeConfigRequest(in *tfplugin5.ValidateResourceTypeConfig_Request) *tfprotov5.ValidateResourceTypeConfigRequest {
- if in == nil {
- return nil
- }
-
- resp := &tfprotov5.ValidateResourceTypeConfigRequest{
- Config: DynamicValue(in.Config),
- TypeName: in.TypeName,
- }
-
- return resp
-}
-
-func UpgradeResourceStateRequest(in *tfplugin5.UpgradeResourceState_Request) *tfprotov5.UpgradeResourceStateRequest {
- if in == nil {
- return nil
- }
-
- resp := &tfprotov5.UpgradeResourceStateRequest{
- RawState: RawState(in.RawState),
- TypeName: in.TypeName,
- Version: in.Version,
- }
-
- return resp
-}
-
-func ReadResourceRequest(in *tfplugin5.ReadResource_Request) *tfprotov5.ReadResourceRequest {
- if in == nil {
- return nil
- }
-
- resp := &tfprotov5.ReadResourceRequest{
- CurrentState: DynamicValue(in.CurrentState),
- Private: in.Private,
- ProviderMeta: DynamicValue(in.ProviderMeta),
- TypeName: in.TypeName,
- ClientCapabilities: ReadResourceClientCapabilities(in.ClientCapabilities),
- }
-
- return resp
-}
-
-func PlanResourceChangeRequest(in *tfplugin5.PlanResourceChange_Request) *tfprotov5.PlanResourceChangeRequest {
- if in == nil {
- return nil
- }
-
- resp := &tfprotov5.PlanResourceChangeRequest{
- Config: DynamicValue(in.Config),
- PriorPrivate: in.PriorPrivate,
- PriorState: DynamicValue(in.PriorState),
- ProposedNewState: DynamicValue(in.ProposedNewState),
- ProviderMeta: DynamicValue(in.ProviderMeta),
- TypeName: in.TypeName,
- ClientCapabilities: PlanResourceChangeClientCapabilities(in.ClientCapabilities),
- }
-
- return resp
-}
-
-func ApplyResourceChangeRequest(in *tfplugin5.ApplyResourceChange_Request) *tfprotov5.ApplyResourceChangeRequest {
- if in == nil {
- return nil
- }
-
- resp := &tfprotov5.ApplyResourceChangeRequest{
- Config: DynamicValue(in.Config),
- PlannedPrivate: in.PlannedPrivate,
- PlannedState: DynamicValue(in.PlannedState),
- PriorState: DynamicValue(in.PriorState),
- ProviderMeta: DynamicValue(in.ProviderMeta),
- TypeName: in.TypeName,
- }
-
- return resp
-}
-
-func ImportResourceStateRequest(in *tfplugin5.ImportResourceState_Request) *tfprotov5.ImportResourceStateRequest {
- if in == nil {
- return nil
- }
-
- resp := &tfprotov5.ImportResourceStateRequest{
- TypeName: in.TypeName,
- ID: in.Id,
- ClientCapabilities: ImportResourceStateClientCapabilities(in.ClientCapabilities),
- }
-
- return resp
-}
-
-func MoveResourceStateRequest(in *tfplugin5.MoveResourceState_Request) *tfprotov5.MoveResourceStateRequest {
- if in == nil {
- return nil
- }
-
- resp := &tfprotov5.MoveResourceStateRequest{
- SourcePrivate: in.SourcePrivate,
- SourceProviderAddress: in.SourceProviderAddress,
- SourceSchemaVersion: in.SourceSchemaVersion,
- SourceState: RawState(in.SourceState),
- SourceTypeName: in.SourceTypeName,
- TargetTypeName: in.TargetTypeName,
- }
-
- return resp
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/funcerr/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/funcerr/doc.go
deleted file mode 100644
index 9b9f61f0..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/funcerr/doc.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-// Package funcerr contains function error helpers. These implementations are
-// intentionally outside the public API.
-package funcerr
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/funcerr/function_error.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/funcerr/function_error.go
deleted file mode 100644
index 60428b44..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/funcerr/function_error.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package funcerr
-
-import (
- "context"
-
- "github.com/hashicorp/terraform-plugin-go/internal/logging"
- "github.com/hashicorp/terraform-plugin-go/tfprotov5"
-)
-
-// FunctionError is a single FunctionError.
-type FunctionError tfprotov5.FunctionError
-
-// HasError returns true if the FunctionError is not empty.
-func (e *FunctionError) HasError() bool {
- if e == nil {
- return false
- }
-
- return e.Text != "" || e.FunctionArgument != nil
-}
-
-// Log will log the function error:
-func (e *FunctionError) Log(ctx context.Context) {
- if e == nil {
- return
- }
-
- if !e.HasError() {
- return
- }
-
- switch {
- case e.FunctionArgument != nil && e.Text != "":
- logging.ProtocolError(ctx, "Response contains function error", map[string]interface{}{
- logging.KeyFunctionErrorText: e.Text,
- logging.KeyFunctionErrorArgument: *e.FunctionArgument,
- })
- case e.FunctionArgument != nil:
- logging.ProtocolError(ctx, "Response contains function error", map[string]interface{}{
- logging.KeyFunctionErrorArgument: *e.FunctionArgument,
- })
- case e.Text != "":
- logging.ProtocolError(ctx, "Response contains function error", map[string]interface{}{
- logging.KeyFunctionErrorText: e.Text,
- })
- }
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/client_capabilities.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/client_capabilities.go
deleted file mode 100644
index d64557b8..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/client_capabilities.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tf5serverlogging
-
-import (
- "context"
-
- "github.com/hashicorp/terraform-plugin-go/internal/logging"
- "github.com/hashicorp/terraform-plugin-go/tfprotov5"
-)
-
-// ConfigureProviderClientCapabilities generates a TRACE "Announced client capabilities" log.
-func ConfigureProviderClientCapabilities(ctx context.Context, capabilities *tfprotov5.ConfigureProviderClientCapabilities) {
- if capabilities == nil {
- logging.ProtocolTrace(ctx, "No announced client capabilities", map[string]interface{}{})
- return
- }
-
- responseFields := map[string]interface{}{
- logging.KeyClientCapabilityDeferralAllowed: capabilities.DeferralAllowed,
- }
-
- logging.ProtocolTrace(ctx, "Announced client capabilities", responseFields)
-}
-
-// ReadDataSourceClientCapabilities generates a TRACE "Announced client capabilities" log.
-func ReadDataSourceClientCapabilities(ctx context.Context, capabilities *tfprotov5.ReadDataSourceClientCapabilities) {
- if capabilities == nil {
- logging.ProtocolTrace(ctx, "No announced client capabilities", map[string]interface{}{})
- return
- }
-
- responseFields := map[string]interface{}{
- logging.KeyClientCapabilityDeferralAllowed: capabilities.DeferralAllowed,
- }
-
- logging.ProtocolTrace(ctx, "Announced client capabilities", responseFields)
-}
-
-// ReadResourceClientCapabilities generates a TRACE "Announced client capabilities" log.
-func ReadResourceClientCapabilities(ctx context.Context, capabilities *tfprotov5.ReadResourceClientCapabilities) {
- if capabilities == nil {
- logging.ProtocolTrace(ctx, "No announced client capabilities", map[string]interface{}{})
- return
- }
-
- responseFields := map[string]interface{}{
- logging.KeyClientCapabilityDeferralAllowed: capabilities.DeferralAllowed,
- }
-
- logging.ProtocolTrace(ctx, "Announced client capabilities", responseFields)
-}
-
-// PlanResourceChangeClientCapabilities generates a TRACE "Announced client capabilities" log.
-func PlanResourceChangeClientCapabilities(ctx context.Context, capabilities *tfprotov5.PlanResourceChangeClientCapabilities) {
- if capabilities == nil {
- logging.ProtocolTrace(ctx, "No announced client capabilities", map[string]interface{}{})
- return
- }
-
- responseFields := map[string]interface{}{
- logging.KeyClientCapabilityDeferralAllowed: capabilities.DeferralAllowed,
- }
-
- logging.ProtocolTrace(ctx, "Announced client capabilities", responseFields)
-}
-
-// ImportResourceStateClientCapabilities generates a TRACE "Announced client capabilities" log.
-func ImportResourceStateClientCapabilities(ctx context.Context, capabilities *tfprotov5.ImportResourceStateClientCapabilities) {
- if capabilities == nil {
- logging.ProtocolTrace(ctx, "No announced client capabilities", map[string]interface{}{})
- return
- }
-
- responseFields := map[string]interface{}{
- logging.KeyClientCapabilityDeferralAllowed: capabilities.DeferralAllowed,
- }
-
- logging.ProtocolTrace(ctx, "Announced client capabilities", responseFields)
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/context_keys.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/context_keys.go
deleted file mode 100644
index 5b2556a2..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/context_keys.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tf5serverlogging
-
-// Context key types.
-// Reference: https://staticcheck.io/docs/checks/#SA1029
-
-// ContextKeyDownstreamRequestStartTime is a context.Context key to store the
-// time.Time when the server began a downstream request.
-type ContextKeyDownstreamRequestStartTime struct{}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/deferred.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/deferred.go
deleted file mode 100644
index fa9449cc..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/deferred.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tf5serverlogging
-
-import (
- "context"
-
- "github.com/hashicorp/terraform-plugin-go/internal/logging"
- "github.com/hashicorp/terraform-plugin-go/tfprotov5"
-)
-
-// Deferred generates a TRACE "Received downstream deferred response" log if populated.
-func Deferred(ctx context.Context, deferred *tfprotov5.Deferred) {
- if deferred == nil {
- return
- }
-
- responseFields := map[string]interface{}{
- logging.KeyDeferredReason: deferred.Reason.String(),
- }
-
- logging.ProtocolTrace(ctx, "Received downstream deferred response", responseFields)
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/doc.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/doc.go
deleted file mode 100644
index 82b9d39e..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/doc.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-// Package tf5serverlogging contains logging functionality specific to
-// tf5server and tfprotov5 types.
-package tf5serverlogging
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/downstream_request.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/downstream_request.go
deleted file mode 100644
index 8c442fef..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/downstream_request.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tf5serverlogging
-
-import (
- "context"
- "time"
-
- "github.com/hashicorp/terraform-plugin-go/internal/logging"
- "github.com/hashicorp/terraform-plugin-go/tfprotov5"
- "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag"
- "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/funcerr"
-)
-
-// DownstreamRequest sets a request duration start time context key and
-// generates a TRACE "Sending request downstream" log.
-func DownstreamRequest(ctx context.Context) context.Context {
- requestStart := time.Now()
- ctx = context.WithValue(ctx, ContextKeyDownstreamRequestStartTime{}, requestStart)
-
- logging.ProtocolTrace(ctx, "Sending request downstream")
-
- return ctx
-}
-
-// DownstreamResponse generates the following logging:
-//
-// - TRACE "Received downstream response" log with request duration and
-// diagnostic severity counts
-// - Per-diagnostic logs
-func DownstreamResponse(ctx context.Context, diagnostics diag.Diagnostics) {
- responseFields := map[string]interface{}{
- logging.KeyDiagnosticErrorCount: diagnostics.ErrorCount(),
- logging.KeyDiagnosticWarningCount: diagnostics.WarningCount(),
- }
-
- if requestStart, ok := ctx.Value(ContextKeyDownstreamRequestStartTime{}).(time.Time); ok {
- responseFields[logging.KeyRequestDurationMs] = time.Since(requestStart).Milliseconds()
- }
-
- logging.ProtocolTrace(ctx, "Received downstream response", responseFields)
- diagnostics.Log(ctx)
-}
-
-// DownstreamResponseWithError generates the following logging:
-//
-// - TRACE "Received downstream response" log with request duration and
-// whether a function error is present
-// - Log with function error details
-func DownstreamResponseWithError(ctx context.Context, funcErr *tfprotov5.FunctionError) {
- fe := (*funcerr.FunctionError)(funcErr)
-
- responseFields := map[string]interface{}{
- logging.KeyFunctionErrorExists: fe.HasError(),
- }
-
- if requestStart, ok := ctx.Value(ContextKeyDownstreamRequestStartTime{}).(time.Time); ok {
- responseFields[logging.KeyRequestDurationMs] = time.Since(requestStart).Milliseconds()
- }
-
- logging.ProtocolTrace(ctx, "Received downstream response", responseFields)
- fe.Log(ctx)
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/server_capabilities.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/server_capabilities.go
deleted file mode 100644
index d0f86c84..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tf5serverlogging/server_capabilities.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-package tf5serverlogging
-
-import (
- "context"
-
- "github.com/hashicorp/terraform-plugin-go/internal/logging"
- "github.com/hashicorp/terraform-plugin-go/tfprotov5"
-)
-
-// ServerCapabilities generates a TRACE "Announced server capabilities" log.
-func ServerCapabilities(ctx context.Context, capabilities *tfprotov5.ServerCapabilities) {
- responseFields := map[string]interface{}{
- logging.KeyServerCapabilityGetProviderSchemaOptional: false,
- logging.KeyServerCapabilityPlanDestroy: false,
- }
-
- if capabilities != nil {
- responseFields[logging.KeyServerCapabilityGetProviderSchemaOptional] = capabilities.GetProviderSchemaOptional
- responseFields[logging.KeyServerCapabilityPlanDestroy] = capabilities.PlanDestroy
- }
-
- logging.ProtocolTrace(ctx, "Announced server capabilities", responseFields)
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go
deleted file mode 100644
index 46ce948a..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go
+++ /dev/null
@@ -1,6619 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-// Terraform Plugin RPC protocol version 5.6
-//
-// This file defines version 5.6 of the RPC protocol. To implement a plugin
-// against this protocol, copy this definition into your own codebase and
-// use protoc to generate stubs for your target language.
-//
-// This file will not be updated. Any minor versions of protocol 5 to follow
-// should copy this file and modify the copy while maintaing backwards
-// compatibility. Breaking changes, if any are required, will come
-// in a subsequent major version with its own separate proto definition.
-//
-// Note that only the proto files included in a release tag of Terraform are
-// official protocol releases. Proto files taken from other commits may include
-// incomplete changes or features that did not make it into a final release.
-// In all reasonable cases, plugin developers should take the proto file from
-// the tag of the most recent release of Terraform, and not from the main
-// branch or any other development branch.
-//
-
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.34.0
-// protoc v5.26.1
-// source: tfplugin5.proto
-
-package tfplugin5
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-type StringKind int32
-
-const (
- StringKind_PLAIN StringKind = 0
- StringKind_MARKDOWN StringKind = 1
-)
-
-// Enum value maps for StringKind.
-var (
- StringKind_name = map[int32]string{
- 0: "PLAIN",
- 1: "MARKDOWN",
- }
- StringKind_value = map[string]int32{
- "PLAIN": 0,
- "MARKDOWN": 1,
- }
-)
-
-func (x StringKind) Enum() *StringKind {
- p := new(StringKind)
- *p = x
- return p
-}
-
-func (x StringKind) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (StringKind) Descriptor() protoreflect.EnumDescriptor {
- return file_tfplugin5_proto_enumTypes[0].Descriptor()
-}
-
-func (StringKind) Type() protoreflect.EnumType {
- return &file_tfplugin5_proto_enumTypes[0]
-}
-
-func (x StringKind) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use StringKind.Descriptor instead.
-func (StringKind) EnumDescriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{0}
-}
-
-type Diagnostic_Severity int32
-
-const (
- Diagnostic_INVALID Diagnostic_Severity = 0
- Diagnostic_ERROR Diagnostic_Severity = 1
- Diagnostic_WARNING Diagnostic_Severity = 2
-)
-
-// Enum value maps for Diagnostic_Severity.
-var (
- Diagnostic_Severity_name = map[int32]string{
- 0: "INVALID",
- 1: "ERROR",
- 2: "WARNING",
- }
- Diagnostic_Severity_value = map[string]int32{
- "INVALID": 0,
- "ERROR": 1,
- "WARNING": 2,
- }
-)
-
-func (x Diagnostic_Severity) Enum() *Diagnostic_Severity {
- p := new(Diagnostic_Severity)
- *p = x
- return p
-}
-
-func (x Diagnostic_Severity) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (Diagnostic_Severity) Descriptor() protoreflect.EnumDescriptor {
- return file_tfplugin5_proto_enumTypes[1].Descriptor()
-}
-
-func (Diagnostic_Severity) Type() protoreflect.EnumType {
- return &file_tfplugin5_proto_enumTypes[1]
-}
-
-func (x Diagnostic_Severity) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use Diagnostic_Severity.Descriptor instead.
-func (Diagnostic_Severity) EnumDescriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{1, 0}
-}
-
-type Schema_NestedBlock_NestingMode int32
-
-const (
- Schema_NestedBlock_INVALID Schema_NestedBlock_NestingMode = 0
- Schema_NestedBlock_SINGLE Schema_NestedBlock_NestingMode = 1
- Schema_NestedBlock_LIST Schema_NestedBlock_NestingMode = 2
- Schema_NestedBlock_SET Schema_NestedBlock_NestingMode = 3
- Schema_NestedBlock_MAP Schema_NestedBlock_NestingMode = 4
- Schema_NestedBlock_GROUP Schema_NestedBlock_NestingMode = 5
-)
-
-// Enum value maps for Schema_NestedBlock_NestingMode.
-var (
- Schema_NestedBlock_NestingMode_name = map[int32]string{
- 0: "INVALID",
- 1: "SINGLE",
- 2: "LIST",
- 3: "SET",
- 4: "MAP",
- 5: "GROUP",
- }
- Schema_NestedBlock_NestingMode_value = map[string]int32{
- "INVALID": 0,
- "SINGLE": 1,
- "LIST": 2,
- "SET": 3,
- "MAP": 4,
- "GROUP": 5,
- }
-)
-
-func (x Schema_NestedBlock_NestingMode) Enum() *Schema_NestedBlock_NestingMode {
- p := new(Schema_NestedBlock_NestingMode)
- *p = x
- return p
-}
-
-func (x Schema_NestedBlock_NestingMode) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (Schema_NestedBlock_NestingMode) Descriptor() protoreflect.EnumDescriptor {
- return file_tfplugin5_proto_enumTypes[2].Descriptor()
-}
-
-func (Schema_NestedBlock_NestingMode) Type() protoreflect.EnumType {
- return &file_tfplugin5_proto_enumTypes[2]
-}
-
-func (x Schema_NestedBlock_NestingMode) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use Schema_NestedBlock_NestingMode.Descriptor instead.
-func (Schema_NestedBlock_NestingMode) EnumDescriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{6, 2, 0}
-}
-
-// Reason is the reason for deferring the change.
-type Deferred_Reason int32
-
-const (
- // UNKNOWN is the default value, and should not be used.
- Deferred_UNKNOWN Deferred_Reason = 0
- // RESOURCE_CONFIG_UNKNOWN is used when the config is partially unknown and the real
- // values need to be known before the change can be planned.
- Deferred_RESOURCE_CONFIG_UNKNOWN Deferred_Reason = 1
- // PROVIDER_CONFIG_UNKNOWN is used when parts of the provider configuration
- // are unknown, e.g. the provider configuration is only known after the apply is done.
- Deferred_PROVIDER_CONFIG_UNKNOWN Deferred_Reason = 2
- // ABSENT_PREREQ is used when a hard dependency has not been satisfied.
- Deferred_ABSENT_PREREQ Deferred_Reason = 3
-)
-
-// Enum value maps for Deferred_Reason.
-var (
- Deferred_Reason_name = map[int32]string{
- 0: "UNKNOWN",
- 1: "RESOURCE_CONFIG_UNKNOWN",
- 2: "PROVIDER_CONFIG_UNKNOWN",
- 3: "ABSENT_PREREQ",
- }
- Deferred_Reason_value = map[string]int32{
- "UNKNOWN": 0,
- "RESOURCE_CONFIG_UNKNOWN": 1,
- "PROVIDER_CONFIG_UNKNOWN": 2,
- "ABSENT_PREREQ": 3,
- }
-)
-
-func (x Deferred_Reason) Enum() *Deferred_Reason {
- p := new(Deferred_Reason)
- *p = x
- return p
-}
-
-func (x Deferred_Reason) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (Deferred_Reason) Descriptor() protoreflect.EnumDescriptor {
- return file_tfplugin5_proto_enumTypes[3].Descriptor()
-}
-
-func (Deferred_Reason) Type() protoreflect.EnumType {
- return &file_tfplugin5_proto_enumTypes[3]
-}
-
-func (x Deferred_Reason) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use Deferred_Reason.Descriptor instead.
-func (Deferred_Reason) EnumDescriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{10, 0}
-}
-
-// DynamicValue is an opaque encoding of terraform data, with the field name
-// indicating the encoding scheme used.
-type DynamicValue struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Msgpack []byte `protobuf:"bytes,1,opt,name=msgpack,proto3" json:"msgpack,omitempty"`
- Json []byte `protobuf:"bytes,2,opt,name=json,proto3" json:"json,omitempty"`
-}
-
-func (x *DynamicValue) Reset() {
- *x = DynamicValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DynamicValue) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DynamicValue) ProtoMessage() {}
-
-func (x *DynamicValue) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DynamicValue.ProtoReflect.Descriptor instead.
-func (*DynamicValue) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *DynamicValue) GetMsgpack() []byte {
- if x != nil {
- return x.Msgpack
- }
- return nil
-}
-
-func (x *DynamicValue) GetJson() []byte {
- if x != nil {
- return x.Json
- }
- return nil
-}
-
-type Diagnostic struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Severity Diagnostic_Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=tfplugin5.Diagnostic_Severity" json:"severity,omitempty"`
- Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"`
- Detail string `protobuf:"bytes,3,opt,name=detail,proto3" json:"detail,omitempty"`
- Attribute *AttributePath `protobuf:"bytes,4,opt,name=attribute,proto3" json:"attribute,omitempty"`
-}
-
-func (x *Diagnostic) Reset() {
- *x = Diagnostic{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Diagnostic) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Diagnostic) ProtoMessage() {}
-
-func (x *Diagnostic) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Diagnostic.ProtoReflect.Descriptor instead.
-func (*Diagnostic) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *Diagnostic) GetSeverity() Diagnostic_Severity {
- if x != nil {
- return x.Severity
- }
- return Diagnostic_INVALID
-}
-
-func (x *Diagnostic) GetSummary() string {
- if x != nil {
- return x.Summary
- }
- return ""
-}
-
-func (x *Diagnostic) GetDetail() string {
- if x != nil {
- return x.Detail
- }
- return ""
-}
-
-func (x *Diagnostic) GetAttribute() *AttributePath {
- if x != nil {
- return x.Attribute
- }
- return nil
-}
-
-type FunctionError struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Text string `protobuf:"bytes,1,opt,name=text,proto3" json:"text,omitempty"`
- // The optional function_argument records the index position of the
- // argument which caused the error.
- FunctionArgument *int64 `protobuf:"varint,2,opt,name=function_argument,json=functionArgument,proto3,oneof" json:"function_argument,omitempty"`
-}
-
-func (x *FunctionError) Reset() {
- *x = FunctionError{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *FunctionError) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*FunctionError) ProtoMessage() {}
-
-func (x *FunctionError) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use FunctionError.ProtoReflect.Descriptor instead.
-func (*FunctionError) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *FunctionError) GetText() string {
- if x != nil {
- return x.Text
- }
- return ""
-}
-
-func (x *FunctionError) GetFunctionArgument() int64 {
- if x != nil && x.FunctionArgument != nil {
- return *x.FunctionArgument
- }
- return 0
-}
-
-type AttributePath struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Steps []*AttributePath_Step `protobuf:"bytes,1,rep,name=steps,proto3" json:"steps,omitempty"`
-}
-
-func (x *AttributePath) Reset() {
- *x = AttributePath{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AttributePath) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AttributePath) ProtoMessage() {}
-
-func (x *AttributePath) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AttributePath.ProtoReflect.Descriptor instead.
-func (*AttributePath) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *AttributePath) GetSteps() []*AttributePath_Step {
- if x != nil {
- return x.Steps
- }
- return nil
-}
-
-type Stop struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *Stop) Reset() {
- *x = Stop{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Stop) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Stop) ProtoMessage() {}
-
-func (x *Stop) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Stop.ProtoReflect.Descriptor instead.
-func (*Stop) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{4}
-}
-
-// RawState holds the stored state for a resource to be upgraded by the
-// provider. It can be in one of two formats, the current json encoded format
-// in bytes, or the legacy flatmap format as a map of strings.
-type RawState struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Json []byte `protobuf:"bytes,1,opt,name=json,proto3" json:"json,omitempty"`
- Flatmap map[string]string `protobuf:"bytes,2,rep,name=flatmap,proto3" json:"flatmap,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
-}
-
-func (x *RawState) Reset() {
- *x = RawState{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RawState) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RawState) ProtoMessage() {}
-
-func (x *RawState) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RawState.ProtoReflect.Descriptor instead.
-func (*RawState) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *RawState) GetJson() []byte {
- if x != nil {
- return x.Json
- }
- return nil
-}
-
-func (x *RawState) GetFlatmap() map[string]string {
- if x != nil {
- return x.Flatmap
- }
- return nil
-}
-
-// Schema is the configuration schema for a Resource, Provider, or Provisioner.
-type Schema struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The version of the schema.
- // Schemas are versioned, so that providers can upgrade a saved resource
- // state when the schema is changed.
- Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"`
- // Block is the top level configuration block for this schema.
- Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"`
-}
-
-func (x *Schema) Reset() {
- *x = Schema{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Schema) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Schema) ProtoMessage() {}
-
-func (x *Schema) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Schema.ProtoReflect.Descriptor instead.
-func (*Schema) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *Schema) GetVersion() int64 {
- if x != nil {
- return x.Version
- }
- return 0
-}
-
-func (x *Schema) GetBlock() *Schema_Block {
- if x != nil {
- return x.Block
- }
- return nil
-}
-
-// ServerCapabilities allows providers to communicate extra information
-// regarding supported protocol features. This is used to indicate
-// availability of certain forward-compatible changes which may be optional
-// in a major protocol version, but cannot be tested for directly.
-type ServerCapabilities struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The plan_destroy capability signals that a provider expects a call
- // to PlanResourceChange when a resource is going to be destroyed.
- PlanDestroy bool `protobuf:"varint,1,opt,name=plan_destroy,json=planDestroy,proto3" json:"plan_destroy,omitempty"`
- // The get_provider_schema_optional capability indicates that this
- // provider does not require calling GetProviderSchema to operate
- // normally, and the caller can used a cached copy of the provider's
- // schema.
- GetProviderSchemaOptional bool `protobuf:"varint,2,opt,name=get_provider_schema_optional,json=getProviderSchemaOptional,proto3" json:"get_provider_schema_optional,omitempty"`
- // The move_resource_state capability signals that a provider supports the
- // MoveResourceState RPC.
- MoveResourceState bool `protobuf:"varint,3,opt,name=move_resource_state,json=moveResourceState,proto3" json:"move_resource_state,omitempty"`
-}
-
-func (x *ServerCapabilities) Reset() {
- *x = ServerCapabilities{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ServerCapabilities) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ServerCapabilities) ProtoMessage() {}
-
-func (x *ServerCapabilities) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ServerCapabilities.ProtoReflect.Descriptor instead.
-func (*ServerCapabilities) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *ServerCapabilities) GetPlanDestroy() bool {
- if x != nil {
- return x.PlanDestroy
- }
- return false
-}
-
-func (x *ServerCapabilities) GetGetProviderSchemaOptional() bool {
- if x != nil {
- return x.GetProviderSchemaOptional
- }
- return false
-}
-
-func (x *ServerCapabilities) GetMoveResourceState() bool {
- if x != nil {
- return x.MoveResourceState
- }
- return false
-}
-
-// ClientCapabilities allows Terraform to publish information regarding
-// supported protocol features. This is used to indicate availability of
-// certain forward-compatible changes which may be optional in a major
-// protocol version, but cannot be tested for directly.
-type ClientCapabilities struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The deferral_allowed capability signals that the client is able to
- // handle deferred responses from the provider.
- DeferralAllowed bool `protobuf:"varint,1,opt,name=deferral_allowed,json=deferralAllowed,proto3" json:"deferral_allowed,omitempty"`
-}
-
-func (x *ClientCapabilities) Reset() {
- *x = ClientCapabilities{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ClientCapabilities) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ClientCapabilities) ProtoMessage() {}
-
-func (x *ClientCapabilities) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ClientCapabilities.ProtoReflect.Descriptor instead.
-func (*ClientCapabilities) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{8}
-}
-
-func (x *ClientCapabilities) GetDeferralAllowed() bool {
- if x != nil {
- return x.DeferralAllowed
- }
- return false
-}
-
-type Function struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // parameters is the ordered list of positional function parameters.
- Parameters []*Function_Parameter `protobuf:"bytes,1,rep,name=parameters,proto3" json:"parameters,omitempty"`
- // variadic_parameter is an optional final parameter which accepts
- // zero or more argument values, in which Terraform will send an
- // ordered list of the parameter type.
- VariadicParameter *Function_Parameter `protobuf:"bytes,2,opt,name=variadic_parameter,json=variadicParameter,proto3" json:"variadic_parameter,omitempty"`
- // return is the function result.
- Return *Function_Return `protobuf:"bytes,3,opt,name=return,proto3" json:"return,omitempty"`
- // summary is the human-readable shortened documentation for the function.
- Summary string `protobuf:"bytes,4,opt,name=summary,proto3" json:"summary,omitempty"`
- // description is human-readable documentation for the function.
- Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"`
- // description_kind is the formatting of the description.
- DescriptionKind StringKind `protobuf:"varint,6,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin5.StringKind" json:"description_kind,omitempty"`
- // deprecation_message is human-readable documentation if the
- // function is deprecated.
- DeprecationMessage string `protobuf:"bytes,7,opt,name=deprecation_message,json=deprecationMessage,proto3" json:"deprecation_message,omitempty"`
-}
-
-func (x *Function) Reset() {
- *x = Function{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Function) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Function) ProtoMessage() {}
-
-func (x *Function) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Function.ProtoReflect.Descriptor instead.
-func (*Function) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{9}
-}
-
-func (x *Function) GetParameters() []*Function_Parameter {
- if x != nil {
- return x.Parameters
- }
- return nil
-}
-
-func (x *Function) GetVariadicParameter() *Function_Parameter {
- if x != nil {
- return x.VariadicParameter
- }
- return nil
-}
-
-func (x *Function) GetReturn() *Function_Return {
- if x != nil {
- return x.Return
- }
- return nil
-}
-
-func (x *Function) GetSummary() string {
- if x != nil {
- return x.Summary
- }
- return ""
-}
-
-func (x *Function) GetDescription() string {
- if x != nil {
- return x.Description
- }
- return ""
-}
-
-func (x *Function) GetDescriptionKind() StringKind {
- if x != nil {
- return x.DescriptionKind
- }
- return StringKind_PLAIN
-}
-
-func (x *Function) GetDeprecationMessage() string {
- if x != nil {
- return x.DeprecationMessage
- }
- return ""
-}
-
-// Deferred is a message that indicates that change is deferred for a reason.
-type Deferred struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // reason is the reason for deferring the change.
- Reason Deferred_Reason `protobuf:"varint,1,opt,name=reason,proto3,enum=tfplugin5.Deferred_Reason" json:"reason,omitempty"`
-}
-
-func (x *Deferred) Reset() {
- *x = Deferred{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Deferred) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Deferred) ProtoMessage() {}
-
-func (x *Deferred) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Deferred.ProtoReflect.Descriptor instead.
-func (*Deferred) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{10}
-}
-
-func (x *Deferred) GetReason() Deferred_Reason {
- if x != nil {
- return x.Reason
- }
- return Deferred_UNKNOWN
-}
-
-type GetMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *GetMetadata) Reset() {
- *x = GetMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetMetadata) ProtoMessage() {}
-
-func (x *GetMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetMetadata.ProtoReflect.Descriptor instead.
-func (*GetMetadata) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{11}
-}
-
-type GetProviderSchema struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *GetProviderSchema) Reset() {
- *x = GetProviderSchema{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetProviderSchema) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetProviderSchema) ProtoMessage() {}
-
-func (x *GetProviderSchema) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetProviderSchema.ProtoReflect.Descriptor instead.
-func (*GetProviderSchema) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{12}
-}
-
-type PrepareProviderConfig struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *PrepareProviderConfig) Reset() {
- *x = PrepareProviderConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *PrepareProviderConfig) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*PrepareProviderConfig) ProtoMessage() {}
-
-func (x *PrepareProviderConfig) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use PrepareProviderConfig.ProtoReflect.Descriptor instead.
-func (*PrepareProviderConfig) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{13}
-}
-
-type UpgradeResourceState struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *UpgradeResourceState) Reset() {
- *x = UpgradeResourceState{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *UpgradeResourceState) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*UpgradeResourceState) ProtoMessage() {}
-
-func (x *UpgradeResourceState) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use UpgradeResourceState.ProtoReflect.Descriptor instead.
-func (*UpgradeResourceState) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{14}
-}
-
-type ValidateResourceTypeConfig struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *ValidateResourceTypeConfig) Reset() {
- *x = ValidateResourceTypeConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ValidateResourceTypeConfig) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ValidateResourceTypeConfig) ProtoMessage() {}
-
-func (x *ValidateResourceTypeConfig) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ValidateResourceTypeConfig.ProtoReflect.Descriptor instead.
-func (*ValidateResourceTypeConfig) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{15}
-}
-
-type ValidateDataSourceConfig struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *ValidateDataSourceConfig) Reset() {
- *x = ValidateDataSourceConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ValidateDataSourceConfig) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ValidateDataSourceConfig) ProtoMessage() {}
-
-func (x *ValidateDataSourceConfig) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ValidateDataSourceConfig.ProtoReflect.Descriptor instead.
-func (*ValidateDataSourceConfig) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{16}
-}
-
-type Configure struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *Configure) Reset() {
- *x = Configure{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Configure) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Configure) ProtoMessage() {}
-
-func (x *Configure) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Configure.ProtoReflect.Descriptor instead.
-func (*Configure) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{17}
-}
-
-type ReadResource struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *ReadResource) Reset() {
- *x = ReadResource{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ReadResource) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ReadResource) ProtoMessage() {}
-
-func (x *ReadResource) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ReadResource.ProtoReflect.Descriptor instead.
-func (*ReadResource) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{18}
-}
-
-type PlanResourceChange struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *PlanResourceChange) Reset() {
- *x = PlanResourceChange{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *PlanResourceChange) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*PlanResourceChange) ProtoMessage() {}
-
-func (x *PlanResourceChange) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use PlanResourceChange.ProtoReflect.Descriptor instead.
-func (*PlanResourceChange) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{19}
-}
-
-type ApplyResourceChange struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *ApplyResourceChange) Reset() {
- *x = ApplyResourceChange{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ApplyResourceChange) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ApplyResourceChange) ProtoMessage() {}
-
-func (x *ApplyResourceChange) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ApplyResourceChange.ProtoReflect.Descriptor instead.
-func (*ApplyResourceChange) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{20}
-}
-
-type ImportResourceState struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *ImportResourceState) Reset() {
- *x = ImportResourceState{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ImportResourceState) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ImportResourceState) ProtoMessage() {}
-
-func (x *ImportResourceState) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ImportResourceState.ProtoReflect.Descriptor instead.
-func (*ImportResourceState) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{21}
-}
-
-type MoveResourceState struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *MoveResourceState) Reset() {
- *x = MoveResourceState{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *MoveResourceState) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*MoveResourceState) ProtoMessage() {}
-
-func (x *MoveResourceState) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use MoveResourceState.ProtoReflect.Descriptor instead.
-func (*MoveResourceState) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{22}
-}
-
-type ReadDataSource struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *ReadDataSource) Reset() {
- *x = ReadDataSource{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ReadDataSource) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ReadDataSource) ProtoMessage() {}
-
-func (x *ReadDataSource) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ReadDataSource.ProtoReflect.Descriptor instead.
-func (*ReadDataSource) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{23}
-}
-
-type GetProvisionerSchema struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *GetProvisionerSchema) Reset() {
- *x = GetProvisionerSchema{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[24]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetProvisionerSchema) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetProvisionerSchema) ProtoMessage() {}
-
-func (x *GetProvisionerSchema) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[24]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetProvisionerSchema.ProtoReflect.Descriptor instead.
-func (*GetProvisionerSchema) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{24}
-}
-
-type ValidateProvisionerConfig struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *ValidateProvisionerConfig) Reset() {
- *x = ValidateProvisionerConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[25]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ValidateProvisionerConfig) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ValidateProvisionerConfig) ProtoMessage() {}
-
-func (x *ValidateProvisionerConfig) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[25]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ValidateProvisionerConfig.ProtoReflect.Descriptor instead.
-func (*ValidateProvisionerConfig) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{25}
-}
-
-type ProvisionResource struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *ProvisionResource) Reset() {
- *x = ProvisionResource{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[26]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ProvisionResource) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ProvisionResource) ProtoMessage() {}
-
-func (x *ProvisionResource) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[26]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ProvisionResource.ProtoReflect.Descriptor instead.
-func (*ProvisionResource) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{26}
-}
-
-type GetFunctions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *GetFunctions) Reset() {
- *x = GetFunctions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetFunctions) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetFunctions) ProtoMessage() {}
-
-func (x *GetFunctions) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetFunctions.ProtoReflect.Descriptor instead.
-func (*GetFunctions) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{27}
-}
-
-type CallFunction struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *CallFunction) Reset() {
- *x = CallFunction{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[28]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CallFunction) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CallFunction) ProtoMessage() {}
-
-func (x *CallFunction) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[28]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CallFunction.ProtoReflect.Descriptor instead.
-func (*CallFunction) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{28}
-}
-
-type AttributePath_Step struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Types that are assignable to Selector:
- //
- // *AttributePath_Step_AttributeName
- // *AttributePath_Step_ElementKeyString
- // *AttributePath_Step_ElementKeyInt
- Selector isAttributePath_Step_Selector `protobuf_oneof:"selector"`
-}
-
-func (x *AttributePath_Step) Reset() {
- *x = AttributePath_Step{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[29]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AttributePath_Step) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AttributePath_Step) ProtoMessage() {}
-
-func (x *AttributePath_Step) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[29]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AttributePath_Step.ProtoReflect.Descriptor instead.
-func (*AttributePath_Step) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{3, 0}
-}
-
-func (m *AttributePath_Step) GetSelector() isAttributePath_Step_Selector {
- if m != nil {
- return m.Selector
- }
- return nil
-}
-
-func (x *AttributePath_Step) GetAttributeName() string {
- if x, ok := x.GetSelector().(*AttributePath_Step_AttributeName); ok {
- return x.AttributeName
- }
- return ""
-}
-
-func (x *AttributePath_Step) GetElementKeyString() string {
- if x, ok := x.GetSelector().(*AttributePath_Step_ElementKeyString); ok {
- return x.ElementKeyString
- }
- return ""
-}
-
-func (x *AttributePath_Step) GetElementKeyInt() int64 {
- if x, ok := x.GetSelector().(*AttributePath_Step_ElementKeyInt); ok {
- return x.ElementKeyInt
- }
- return 0
-}
-
-type isAttributePath_Step_Selector interface {
- isAttributePath_Step_Selector()
-}
-
-type AttributePath_Step_AttributeName struct {
- // Set "attribute_name" to represent looking up an attribute
- // in the current object value.
- AttributeName string `protobuf:"bytes,1,opt,name=attribute_name,json=attributeName,proto3,oneof"`
-}
-
-type AttributePath_Step_ElementKeyString struct {
- // Set "element_key_*" to represent looking up an element in
- // an indexable collection type.
- ElementKeyString string `protobuf:"bytes,2,opt,name=element_key_string,json=elementKeyString,proto3,oneof"`
-}
-
-type AttributePath_Step_ElementKeyInt struct {
- ElementKeyInt int64 `protobuf:"varint,3,opt,name=element_key_int,json=elementKeyInt,proto3,oneof"`
-}
-
-func (*AttributePath_Step_AttributeName) isAttributePath_Step_Selector() {}
-
-func (*AttributePath_Step_ElementKeyString) isAttributePath_Step_Selector() {}
-
-func (*AttributePath_Step_ElementKeyInt) isAttributePath_Step_Selector() {}
-
-type Stop_Request struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *Stop_Request) Reset() {
- *x = Stop_Request{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[30]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Stop_Request) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Stop_Request) ProtoMessage() {}
-
-func (x *Stop_Request) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[30]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Stop_Request.ProtoReflect.Descriptor instead.
-func (*Stop_Request) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{4, 0}
-}
-
-type Stop_Response struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Error string `protobuf:"bytes,1,opt,name=Error,proto3" json:"Error,omitempty"`
-}
-
-func (x *Stop_Response) Reset() {
- *x = Stop_Response{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[31]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Stop_Response) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Stop_Response) ProtoMessage() {}
-
-func (x *Stop_Response) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[31]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Stop_Response.ProtoReflect.Descriptor instead.
-func (*Stop_Response) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{4, 1}
-}
-
-func (x *Stop_Response) GetError() string {
- if x != nil {
- return x.Error
- }
- return ""
-}
-
-type Schema_Block struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"`
- Attributes []*Schema_Attribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"`
- BlockTypes []*Schema_NestedBlock `protobuf:"bytes,3,rep,name=block_types,json=blockTypes,proto3" json:"block_types,omitempty"`
- Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"`
- DescriptionKind StringKind `protobuf:"varint,5,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin5.StringKind" json:"description_kind,omitempty"`
- Deprecated bool `protobuf:"varint,6,opt,name=deprecated,proto3" json:"deprecated,omitempty"`
-}
-
-func (x *Schema_Block) Reset() {
- *x = Schema_Block{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[33]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Schema_Block) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Schema_Block) ProtoMessage() {}
-
-func (x *Schema_Block) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[33]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Schema_Block.ProtoReflect.Descriptor instead.
-func (*Schema_Block) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{6, 0}
-}
-
-func (x *Schema_Block) GetVersion() int64 {
- if x != nil {
- return x.Version
- }
- return 0
-}
-
-func (x *Schema_Block) GetAttributes() []*Schema_Attribute {
- if x != nil {
- return x.Attributes
- }
- return nil
-}
-
-func (x *Schema_Block) GetBlockTypes() []*Schema_NestedBlock {
- if x != nil {
- return x.BlockTypes
- }
- return nil
-}
-
-func (x *Schema_Block) GetDescription() string {
- if x != nil {
- return x.Description
- }
- return ""
-}
-
-func (x *Schema_Block) GetDescriptionKind() StringKind {
- if x != nil {
- return x.DescriptionKind
- }
- return StringKind_PLAIN
-}
-
-func (x *Schema_Block) GetDeprecated() bool {
- if x != nil {
- return x.Deprecated
- }
- return false
-}
-
-type Schema_Attribute struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- Type []byte `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
- Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
- Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"`
- Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"`
- Computed bool `protobuf:"varint,6,opt,name=computed,proto3" json:"computed,omitempty"`
- Sensitive bool `protobuf:"varint,7,opt,name=sensitive,proto3" json:"sensitive,omitempty"`
- DescriptionKind StringKind `protobuf:"varint,8,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin5.StringKind" json:"description_kind,omitempty"`
- Deprecated bool `protobuf:"varint,9,opt,name=deprecated,proto3" json:"deprecated,omitempty"`
-}
-
-func (x *Schema_Attribute) Reset() {
- *x = Schema_Attribute{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[34]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Schema_Attribute) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Schema_Attribute) ProtoMessage() {}
-
-func (x *Schema_Attribute) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[34]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Schema_Attribute.ProtoReflect.Descriptor instead.
-func (*Schema_Attribute) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{6, 1}
-}
-
-func (x *Schema_Attribute) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *Schema_Attribute) GetType() []byte {
- if x != nil {
- return x.Type
- }
- return nil
-}
-
-func (x *Schema_Attribute) GetDescription() string {
- if x != nil {
- return x.Description
- }
- return ""
-}
-
-func (x *Schema_Attribute) GetRequired() bool {
- if x != nil {
- return x.Required
- }
- return false
-}
-
-func (x *Schema_Attribute) GetOptional() bool {
- if x != nil {
- return x.Optional
- }
- return false
-}
-
-func (x *Schema_Attribute) GetComputed() bool {
- if x != nil {
- return x.Computed
- }
- return false
-}
-
-func (x *Schema_Attribute) GetSensitive() bool {
- if x != nil {
- return x.Sensitive
- }
- return false
-}
-
-func (x *Schema_Attribute) GetDescriptionKind() StringKind {
- if x != nil {
- return x.DescriptionKind
- }
- return StringKind_PLAIN
-}
-
-func (x *Schema_Attribute) GetDeprecated() bool {
- if x != nil {
- return x.Deprecated
- }
- return false
-}
-
-type Schema_NestedBlock struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
- Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"`
- Nesting Schema_NestedBlock_NestingMode `protobuf:"varint,3,opt,name=nesting,proto3,enum=tfplugin5.Schema_NestedBlock_NestingMode" json:"nesting,omitempty"`
- MinItems int64 `protobuf:"varint,4,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
- MaxItems int64 `protobuf:"varint,5,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
-}
-
-func (x *Schema_NestedBlock) Reset() {
- *x = Schema_NestedBlock{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[35]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Schema_NestedBlock) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Schema_NestedBlock) ProtoMessage() {}
-
-func (x *Schema_NestedBlock) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[35]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Schema_NestedBlock.ProtoReflect.Descriptor instead.
-func (*Schema_NestedBlock) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{6, 2}
-}
-
-func (x *Schema_NestedBlock) GetTypeName() string {
- if x != nil {
- return x.TypeName
- }
- return ""
-}
-
-func (x *Schema_NestedBlock) GetBlock() *Schema_Block {
- if x != nil {
- return x.Block
- }
- return nil
-}
-
-func (x *Schema_NestedBlock) GetNesting() Schema_NestedBlock_NestingMode {
- if x != nil {
- return x.Nesting
- }
- return Schema_NestedBlock_INVALID
-}
-
-func (x *Schema_NestedBlock) GetMinItems() int64 {
- if x != nil {
- return x.MinItems
- }
- return 0
-}
-
-func (x *Schema_NestedBlock) GetMaxItems() int64 {
- if x != nil {
- return x.MaxItems
- }
- return 0
-}
-
-type Function_Parameter struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // name is the human-readable display name for the parameter.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // type is the type constraint for the parameter.
- Type []byte `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
- // allow_null_value when enabled denotes that a null argument value can
- // be passed to the provider. When disabled, Terraform returns an error
- // if the argument value is null.
- AllowNullValue bool `protobuf:"varint,3,opt,name=allow_null_value,json=allowNullValue,proto3" json:"allow_null_value,omitempty"`
- // allow_unknown_values when enabled denotes that only wholly known
- // argument values will be passed to the provider. When disabled,
- // Terraform skips the function call entirely and assumes an unknown
- // value result from the function.
- AllowUnknownValues bool `protobuf:"varint,4,opt,name=allow_unknown_values,json=allowUnknownValues,proto3" json:"allow_unknown_values,omitempty"`
- // description is human-readable documentation for the parameter.
- Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"`
- // description_kind is the formatting of the description.
- DescriptionKind StringKind `protobuf:"varint,6,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin5.StringKind" json:"description_kind,omitempty"`
-}
-
-func (x *Function_Parameter) Reset() {
- *x = Function_Parameter{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[36]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Function_Parameter) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Function_Parameter) ProtoMessage() {}
-
-func (x *Function_Parameter) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[36]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Function_Parameter.ProtoReflect.Descriptor instead.
-func (*Function_Parameter) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{9, 0}
-}
-
-func (x *Function_Parameter) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *Function_Parameter) GetType() []byte {
- if x != nil {
- return x.Type
- }
- return nil
-}
-
-func (x *Function_Parameter) GetAllowNullValue() bool {
- if x != nil {
- return x.AllowNullValue
- }
- return false
-}
-
-func (x *Function_Parameter) GetAllowUnknownValues() bool {
- if x != nil {
- return x.AllowUnknownValues
- }
- return false
-}
-
-func (x *Function_Parameter) GetDescription() string {
- if x != nil {
- return x.Description
- }
- return ""
-}
-
-func (x *Function_Parameter) GetDescriptionKind() StringKind {
- if x != nil {
- return x.DescriptionKind
- }
- return StringKind_PLAIN
-}
-
-type Function_Return struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // type is the type constraint for the function result.
- Type []byte `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
-}
-
-func (x *Function_Return) Reset() {
- *x = Function_Return{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[37]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Function_Return) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Function_Return) ProtoMessage() {}
-
-func (x *Function_Return) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[37]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Function_Return.ProtoReflect.Descriptor instead.
-func (*Function_Return) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{9, 1}
-}
-
-func (x *Function_Return) GetType() []byte {
- if x != nil {
- return x.Type
- }
- return nil
-}
-
-type GetMetadata_Request struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *GetMetadata_Request) Reset() {
- *x = GetMetadata_Request{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[38]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetMetadata_Request) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetMetadata_Request) ProtoMessage() {}
-
-func (x *GetMetadata_Request) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[38]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetMetadata_Request.ProtoReflect.Descriptor instead.
-func (*GetMetadata_Request) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{11, 0}
-}
-
-type GetMetadata_Response struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- ServerCapabilities *ServerCapabilities `protobuf:"bytes,1,opt,name=server_capabilities,json=serverCapabilities,proto3" json:"server_capabilities,omitempty"`
- Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
- DataSources []*GetMetadata_DataSourceMetadata `protobuf:"bytes,3,rep,name=data_sources,json=dataSources,proto3" json:"data_sources,omitempty"`
- Resources []*GetMetadata_ResourceMetadata `protobuf:"bytes,4,rep,name=resources,proto3" json:"resources,omitempty"`
- // functions returns metadata for any functions.
- Functions []*GetMetadata_FunctionMetadata `protobuf:"bytes,5,rep,name=functions,proto3" json:"functions,omitempty"`
-}
-
-func (x *GetMetadata_Response) Reset() {
- *x = GetMetadata_Response{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[39]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetMetadata_Response) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetMetadata_Response) ProtoMessage() {}
-
-func (x *GetMetadata_Response) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[39]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetMetadata_Response.ProtoReflect.Descriptor instead.
-func (*GetMetadata_Response) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{11, 1}
-}
-
-func (x *GetMetadata_Response) GetServerCapabilities() *ServerCapabilities {
- if x != nil {
- return x.ServerCapabilities
- }
- return nil
-}
-
-func (x *GetMetadata_Response) GetDiagnostics() []*Diagnostic {
- if x != nil {
- return x.Diagnostics
- }
- return nil
-}
-
-func (x *GetMetadata_Response) GetDataSources() []*GetMetadata_DataSourceMetadata {
- if x != nil {
- return x.DataSources
- }
- return nil
-}
-
-func (x *GetMetadata_Response) GetResources() []*GetMetadata_ResourceMetadata {
- if x != nil {
- return x.Resources
- }
- return nil
-}
-
-func (x *GetMetadata_Response) GetFunctions() []*GetMetadata_FunctionMetadata {
- if x != nil {
- return x.Functions
- }
- return nil
-}
-
-type GetMetadata_FunctionMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // name is the function name.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-}
-
-func (x *GetMetadata_FunctionMetadata) Reset() {
- *x = GetMetadata_FunctionMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[40]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetMetadata_FunctionMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetMetadata_FunctionMetadata) ProtoMessage() {}
-
-func (x *GetMetadata_FunctionMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[40]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetMetadata_FunctionMetadata.ProtoReflect.Descriptor instead.
-func (*GetMetadata_FunctionMetadata) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{11, 2}
-}
-
-func (x *GetMetadata_FunctionMetadata) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-type GetMetadata_DataSourceMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
-}
-
-func (x *GetMetadata_DataSourceMetadata) Reset() {
- *x = GetMetadata_DataSourceMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[41]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetMetadata_DataSourceMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetMetadata_DataSourceMetadata) ProtoMessage() {}
-
-func (x *GetMetadata_DataSourceMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[41]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetMetadata_DataSourceMetadata.ProtoReflect.Descriptor instead.
-func (*GetMetadata_DataSourceMetadata) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{11, 3}
-}
-
-func (x *GetMetadata_DataSourceMetadata) GetTypeName() string {
- if x != nil {
- return x.TypeName
- }
- return ""
-}
-
-type GetMetadata_ResourceMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
-}
-
-func (x *GetMetadata_ResourceMetadata) Reset() {
- *x = GetMetadata_ResourceMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[42]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetMetadata_ResourceMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetMetadata_ResourceMetadata) ProtoMessage() {}
-
-func (x *GetMetadata_ResourceMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[42]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetMetadata_ResourceMetadata.ProtoReflect.Descriptor instead.
-func (*GetMetadata_ResourceMetadata) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{11, 4}
-}
-
-func (x *GetMetadata_ResourceMetadata) GetTypeName() string {
- if x != nil {
- return x.TypeName
- }
- return ""
-}
-
-type GetProviderSchema_Request struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *GetProviderSchema_Request) Reset() {
- *x = GetProviderSchema_Request{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[43]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetProviderSchema_Request) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetProviderSchema_Request) ProtoMessage() {}
-
-func (x *GetProviderSchema_Request) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[43]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetProviderSchema_Request.ProtoReflect.Descriptor instead.
-func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{12, 0}
-}
-
-type GetProviderSchema_Response struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Provider *Schema `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider,omitempty"`
- ResourceSchemas map[string]*Schema `protobuf:"bytes,2,rep,name=resource_schemas,json=resourceSchemas,proto3" json:"resource_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- DataSourceSchemas map[string]*Schema `protobuf:"bytes,3,rep,name=data_source_schemas,json=dataSourceSchemas,proto3" json:"data_source_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
- ProviderMeta *Schema `protobuf:"bytes,5,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"`
- ServerCapabilities *ServerCapabilities `protobuf:"bytes,6,opt,name=server_capabilities,json=serverCapabilities,proto3" json:"server_capabilities,omitempty"`
- // functions is a mapping of function names to definitions.
- Functions map[string]*Function `protobuf:"bytes,7,rep,name=functions,proto3" json:"functions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
-}
-
-func (x *GetProviderSchema_Response) Reset() {
- *x = GetProviderSchema_Response{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[44]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetProviderSchema_Response) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetProviderSchema_Response) ProtoMessage() {}
-
-func (x *GetProviderSchema_Response) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[44]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetProviderSchema_Response.ProtoReflect.Descriptor instead.
-func (*GetProviderSchema_Response) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{12, 1}
-}
-
-func (x *GetProviderSchema_Response) GetProvider() *Schema {
- if x != nil {
- return x.Provider
- }
- return nil
-}
-
-func (x *GetProviderSchema_Response) GetResourceSchemas() map[string]*Schema {
- if x != nil {
- return x.ResourceSchemas
- }
- return nil
-}
-
-func (x *GetProviderSchema_Response) GetDataSourceSchemas() map[string]*Schema {
- if x != nil {
- return x.DataSourceSchemas
- }
- return nil
-}
-
-func (x *GetProviderSchema_Response) GetDiagnostics() []*Diagnostic {
- if x != nil {
- return x.Diagnostics
- }
- return nil
-}
-
-func (x *GetProviderSchema_Response) GetProviderMeta() *Schema {
- if x != nil {
- return x.ProviderMeta
- }
- return nil
-}
-
-func (x *GetProviderSchema_Response) GetServerCapabilities() *ServerCapabilities {
- if x != nil {
- return x.ServerCapabilities
- }
- return nil
-}
-
-func (x *GetProviderSchema_Response) GetFunctions() map[string]*Function {
- if x != nil {
- return x.Functions
- }
- return nil
-}
-
-type PrepareProviderConfig_Request struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
-}
-
-func (x *PrepareProviderConfig_Request) Reset() {
- *x = PrepareProviderConfig_Request{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[48]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *PrepareProviderConfig_Request) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*PrepareProviderConfig_Request) ProtoMessage() {}
-
-func (x *PrepareProviderConfig_Request) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[48]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use PrepareProviderConfig_Request.ProtoReflect.Descriptor instead.
-func (*PrepareProviderConfig_Request) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{13, 0}
-}
-
-func (x *PrepareProviderConfig_Request) GetConfig() *DynamicValue {
- if x != nil {
- return x.Config
- }
- return nil
-}
-
-type PrepareProviderConfig_Response struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- PreparedConfig *DynamicValue `protobuf:"bytes,1,opt,name=prepared_config,json=preparedConfig,proto3" json:"prepared_config,omitempty"`
- Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
-}
-
-func (x *PrepareProviderConfig_Response) Reset() {
- *x = PrepareProviderConfig_Response{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[49]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *PrepareProviderConfig_Response) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*PrepareProviderConfig_Response) ProtoMessage() {}
-
-func (x *PrepareProviderConfig_Response) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[49]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use PrepareProviderConfig_Response.ProtoReflect.Descriptor instead.
-func (*PrepareProviderConfig_Response) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{13, 1}
-}
-
-func (x *PrepareProviderConfig_Response) GetPreparedConfig() *DynamicValue {
- if x != nil {
- return x.PreparedConfig
- }
- return nil
-}
-
-func (x *PrepareProviderConfig_Response) GetDiagnostics() []*Diagnostic {
- if x != nil {
- return x.Diagnostics
- }
- return nil
-}
-
-// Request is the message that is sent to the provider during the
-// UpgradeResourceState RPC.
-//
-// This message intentionally does not include configuration data as any
-// configuration-based or configuration-conditional changes should occur
-// during the PlanResourceChange RPC. Additionally, the configuration is
-// not guaranteed to exist (in the case of resource destruction), be wholly
-// known, nor match the given prior state, which could lead to unexpected
-// provider behaviors for practitioners.
-type UpgradeResourceState_Request struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
- // version is the schema_version number recorded in the state file
- Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
- // raw_state is the raw states as stored for the resource. Core does
- // not have access to the schema of prior_version, so it's the
- // provider's responsibility to interpret this value using the
- // appropriate older schema. The raw_state will be the json encoded
- // state, or a legacy flat-mapped format.
- RawState *RawState `protobuf:"bytes,3,opt,name=raw_state,json=rawState,proto3" json:"raw_state,omitempty"`
-}
-
-func (x *UpgradeResourceState_Request) Reset() {
- *x = UpgradeResourceState_Request{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[50]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *UpgradeResourceState_Request) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*UpgradeResourceState_Request) ProtoMessage() {}
-
-func (x *UpgradeResourceState_Request) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[50]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use UpgradeResourceState_Request.ProtoReflect.Descriptor instead.
-func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{14, 0}
-}
-
-func (x *UpgradeResourceState_Request) GetTypeName() string {
- if x != nil {
- return x.TypeName
- }
- return ""
-}
-
-func (x *UpgradeResourceState_Request) GetVersion() int64 {
- if x != nil {
- return x.Version
- }
- return 0
-}
-
-func (x *UpgradeResourceState_Request) GetRawState() *RawState {
- if x != nil {
- return x.RawState
- }
- return nil
-}
-
-type UpgradeResourceState_Response struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // new_state is a msgpack-encoded data structure that, when interpreted with
- // the _current_ schema for this resource type, is functionally equivalent to
- // that which was given in prior_state_raw.
- UpgradedState *DynamicValue `protobuf:"bytes,1,opt,name=upgraded_state,json=upgradedState,proto3" json:"upgraded_state,omitempty"`
- // diagnostics describes any errors encountered during migration that could not
- // be safely resolved, and warnings about any possibly-risky assumptions made
- // in the upgrade process.
- Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
-}
-
-func (x *UpgradeResourceState_Response) Reset() {
- *x = UpgradeResourceState_Response{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[51]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *UpgradeResourceState_Response) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*UpgradeResourceState_Response) ProtoMessage() {}
-
-func (x *UpgradeResourceState_Response) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[51]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use UpgradeResourceState_Response.ProtoReflect.Descriptor instead.
-func (*UpgradeResourceState_Response) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{14, 1}
-}
-
-func (x *UpgradeResourceState_Response) GetUpgradedState() *DynamicValue {
- if x != nil {
- return x.UpgradedState
- }
- return nil
-}
-
-func (x *UpgradeResourceState_Response) GetDiagnostics() []*Diagnostic {
- if x != nil {
- return x.Diagnostics
- }
- return nil
-}
-
-type ValidateResourceTypeConfig_Request struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
- Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
-}
-
-func (x *ValidateResourceTypeConfig_Request) Reset() {
- *x = ValidateResourceTypeConfig_Request{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[52]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ValidateResourceTypeConfig_Request) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ValidateResourceTypeConfig_Request) ProtoMessage() {}
-
-func (x *ValidateResourceTypeConfig_Request) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[52]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ValidateResourceTypeConfig_Request.ProtoReflect.Descriptor instead.
-func (*ValidateResourceTypeConfig_Request) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{15, 0}
-}
-
-func (x *ValidateResourceTypeConfig_Request) GetTypeName() string {
- if x != nil {
- return x.TypeName
- }
- return ""
-}
-
-func (x *ValidateResourceTypeConfig_Request) GetConfig() *DynamicValue {
- if x != nil {
- return x.Config
- }
- return nil
-}
-
-type ValidateResourceTypeConfig_Response struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
-}
-
-func (x *ValidateResourceTypeConfig_Response) Reset() {
- *x = ValidateResourceTypeConfig_Response{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[53]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ValidateResourceTypeConfig_Response) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ValidateResourceTypeConfig_Response) ProtoMessage() {}
-
-func (x *ValidateResourceTypeConfig_Response) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[53]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ValidateResourceTypeConfig_Response.ProtoReflect.Descriptor instead.
-func (*ValidateResourceTypeConfig_Response) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{15, 1}
-}
-
-func (x *ValidateResourceTypeConfig_Response) GetDiagnostics() []*Diagnostic {
- if x != nil {
- return x.Diagnostics
- }
- return nil
-}
-
-type ValidateDataSourceConfig_Request struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
- Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
-}
-
-func (x *ValidateDataSourceConfig_Request) Reset() {
- *x = ValidateDataSourceConfig_Request{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[54]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ValidateDataSourceConfig_Request) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ValidateDataSourceConfig_Request) ProtoMessage() {}
-
-func (x *ValidateDataSourceConfig_Request) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[54]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ValidateDataSourceConfig_Request.ProtoReflect.Descriptor instead.
-func (*ValidateDataSourceConfig_Request) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{16, 0}
-}
-
-func (x *ValidateDataSourceConfig_Request) GetTypeName() string {
- if x != nil {
- return x.TypeName
- }
- return ""
-}
-
-func (x *ValidateDataSourceConfig_Request) GetConfig() *DynamicValue {
- if x != nil {
- return x.Config
- }
- return nil
-}
-
-type ValidateDataSourceConfig_Response struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
-}
-
-func (x *ValidateDataSourceConfig_Response) Reset() {
- *x = ValidateDataSourceConfig_Response{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[55]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ValidateDataSourceConfig_Response) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ValidateDataSourceConfig_Response) ProtoMessage() {}
-
-func (x *ValidateDataSourceConfig_Response) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[55]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ValidateDataSourceConfig_Response.ProtoReflect.Descriptor instead.
-func (*ValidateDataSourceConfig_Response) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{16, 1}
-}
-
-func (x *ValidateDataSourceConfig_Response) GetDiagnostics() []*Diagnostic {
- if x != nil {
- return x.Diagnostics
- }
- return nil
-}
-
-type Configure_Request struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- TerraformVersion string `protobuf:"bytes,1,opt,name=terraform_version,json=terraformVersion,proto3" json:"terraform_version,omitempty"`
- Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
- ClientCapabilities *ClientCapabilities `protobuf:"bytes,3,opt,name=client_capabilities,json=clientCapabilities,proto3" json:"client_capabilities,omitempty"`
-}
-
-func (x *Configure_Request) Reset() {
- *x = Configure_Request{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[56]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Configure_Request) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Configure_Request) ProtoMessage() {}
-
-func (x *Configure_Request) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[56]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Configure_Request.ProtoReflect.Descriptor instead.
-func (*Configure_Request) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{17, 0}
-}
-
-func (x *Configure_Request) GetTerraformVersion() string {
- if x != nil {
- return x.TerraformVersion
- }
- return ""
-}
-
-func (x *Configure_Request) GetConfig() *DynamicValue {
- if x != nil {
- return x.Config
- }
- return nil
-}
-
-func (x *Configure_Request) GetClientCapabilities() *ClientCapabilities {
- if x != nil {
- return x.ClientCapabilities
- }
- return nil
-}
-
-type Configure_Response struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
-}
-
-func (x *Configure_Response) Reset() {
- *x = Configure_Response{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[57]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Configure_Response) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Configure_Response) ProtoMessage() {}
-
-func (x *Configure_Response) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[57]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Configure_Response.ProtoReflect.Descriptor instead.
-func (*Configure_Response) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{17, 1}
-}
-
-func (x *Configure_Response) GetDiagnostics() []*Diagnostic {
- if x != nil {
- return x.Diagnostics
- }
- return nil
-}
-
-// Request is the message that is sent to the provider during the
-// ReadResource RPC.
-//
-// This message intentionally does not include configuration data as any
-// configuration-based or configuration-conditional changes should occur
-// during the PlanResourceChange RPC. Additionally, the configuration is
-// not guaranteed to be wholly known nor match the given prior state, which
-// could lead to unexpected provider behaviors for practitioners.
-type ReadResource_Request struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
- CurrentState *DynamicValue `protobuf:"bytes,2,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"`
- Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"`
- ProviderMeta *DynamicValue `protobuf:"bytes,4,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"`
- ClientCapabilities *ClientCapabilities `protobuf:"bytes,5,opt,name=client_capabilities,json=clientCapabilities,proto3" json:"client_capabilities,omitempty"`
-}
-
-func (x *ReadResource_Request) Reset() {
- *x = ReadResource_Request{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[58]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ReadResource_Request) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ReadResource_Request) ProtoMessage() {}
-
-func (x *ReadResource_Request) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[58]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ReadResource_Request.ProtoReflect.Descriptor instead.
-func (*ReadResource_Request) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{18, 0}
-}
-
-func (x *ReadResource_Request) GetTypeName() string {
- if x != nil {
- return x.TypeName
- }
- return ""
-}
-
-func (x *ReadResource_Request) GetCurrentState() *DynamicValue {
- if x != nil {
- return x.CurrentState
- }
- return nil
-}
-
-func (x *ReadResource_Request) GetPrivate() []byte {
- if x != nil {
- return x.Private
- }
- return nil
-}
-
-func (x *ReadResource_Request) GetProviderMeta() *DynamicValue {
- if x != nil {
- return x.ProviderMeta
- }
- return nil
-}
-
-func (x *ReadResource_Request) GetClientCapabilities() *ClientCapabilities {
- if x != nil {
- return x.ClientCapabilities
- }
- return nil
-}
-
-type ReadResource_Response struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"`
- Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
- Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"`
- // deferred is set if the provider is deferring the change. If set the caller
- // needs to handle the deferral.
- Deferred *Deferred `protobuf:"bytes,4,opt,name=deferred,proto3" json:"deferred,omitempty"`
-}
-
-func (x *ReadResource_Response) Reset() {
- *x = ReadResource_Response{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[59]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ReadResource_Response) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ReadResource_Response) ProtoMessage() {}
-
-func (x *ReadResource_Response) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[59]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ReadResource_Response.ProtoReflect.Descriptor instead.
-func (*ReadResource_Response) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{18, 1}
-}
-
-func (x *ReadResource_Response) GetNewState() *DynamicValue {
- if x != nil {
- return x.NewState
- }
- return nil
-}
-
-func (x *ReadResource_Response) GetDiagnostics() []*Diagnostic {
- if x != nil {
- return x.Diagnostics
- }
- return nil
-}
-
-func (x *ReadResource_Response) GetPrivate() []byte {
- if x != nil {
- return x.Private
- }
- return nil
-}
-
-func (x *ReadResource_Response) GetDeferred() *Deferred {
- if x != nil {
- return x.Deferred
- }
- return nil
-}
-
-type PlanResourceChange_Request struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
- PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"`
- ProposedNewState *DynamicValue `protobuf:"bytes,3,opt,name=proposed_new_state,json=proposedNewState,proto3" json:"proposed_new_state,omitempty"`
- Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"`
- PriorPrivate []byte `protobuf:"bytes,5,opt,name=prior_private,json=priorPrivate,proto3" json:"prior_private,omitempty"`
- ProviderMeta *DynamicValue `protobuf:"bytes,6,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"`
- ClientCapabilities *ClientCapabilities `protobuf:"bytes,7,opt,name=client_capabilities,json=clientCapabilities,proto3" json:"client_capabilities,omitempty"`
-}
-
-func (x *PlanResourceChange_Request) Reset() {
- *x = PlanResourceChange_Request{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[60]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *PlanResourceChange_Request) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*PlanResourceChange_Request) ProtoMessage() {}
-
-func (x *PlanResourceChange_Request) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[60]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use PlanResourceChange_Request.ProtoReflect.Descriptor instead.
-func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{19, 0}
-}
-
-func (x *PlanResourceChange_Request) GetTypeName() string {
- if x != nil {
- return x.TypeName
- }
- return ""
-}
-
-func (x *PlanResourceChange_Request) GetPriorState() *DynamicValue {
- if x != nil {
- return x.PriorState
- }
- return nil
-}
-
-func (x *PlanResourceChange_Request) GetProposedNewState() *DynamicValue {
- if x != nil {
- return x.ProposedNewState
- }
- return nil
-}
-
-func (x *PlanResourceChange_Request) GetConfig() *DynamicValue {
- if x != nil {
- return x.Config
- }
- return nil
-}
-
-func (x *PlanResourceChange_Request) GetPriorPrivate() []byte {
- if x != nil {
- return x.PriorPrivate
- }
- return nil
-}
-
-func (x *PlanResourceChange_Request) GetProviderMeta() *DynamicValue {
- if x != nil {
- return x.ProviderMeta
- }
- return nil
-}
-
-func (x *PlanResourceChange_Request) GetClientCapabilities() *ClientCapabilities {
- if x != nil {
- return x.ClientCapabilities
- }
- return nil
-}
-
-type PlanResourceChange_Response struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- PlannedState *DynamicValue `protobuf:"bytes,1,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"`
- RequiresReplace []*AttributePath `protobuf:"bytes,2,rep,name=requires_replace,json=requiresReplace,proto3" json:"requires_replace,omitempty"`
- PlannedPrivate []byte `protobuf:"bytes,3,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"`
- Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
- // This may be set only by the helper/schema "SDK" in the main Terraform
- // repository, to request that Terraform Core >=0.12 permit additional
- // inconsistencies that can result from the legacy SDK type system
- // and its imprecise mapping to the >=0.12 type system.
- // The change in behavior implied by this flag makes sense only for the
- // specific details of the legacy SDK type system, and are not a general
- // mechanism to avoid proper type handling in providers.
- //
- // ==== DO NOT USE THIS ====
- // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
- // ==== DO NOT USE THIS ====
- LegacyTypeSystem bool `protobuf:"varint,5,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"`
- // deferred is set if the provider is deferring the change. If set the caller
- // needs to handle the deferral.
- Deferred *Deferred `protobuf:"bytes,6,opt,name=deferred,proto3" json:"deferred,omitempty"`
-}
-
-func (x *PlanResourceChange_Response) Reset() {
- *x = PlanResourceChange_Response{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[61]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *PlanResourceChange_Response) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*PlanResourceChange_Response) ProtoMessage() {}
-
-func (x *PlanResourceChange_Response) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[61]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use PlanResourceChange_Response.ProtoReflect.Descriptor instead.
-func (*PlanResourceChange_Response) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{19, 1}
-}
-
-func (x *PlanResourceChange_Response) GetPlannedState() *DynamicValue {
- if x != nil {
- return x.PlannedState
- }
- return nil
-}
-
-func (x *PlanResourceChange_Response) GetRequiresReplace() []*AttributePath {
- if x != nil {
- return x.RequiresReplace
- }
- return nil
-}
-
-func (x *PlanResourceChange_Response) GetPlannedPrivate() []byte {
- if x != nil {
- return x.PlannedPrivate
- }
- return nil
-}
-
-func (x *PlanResourceChange_Response) GetDiagnostics() []*Diagnostic {
- if x != nil {
- return x.Diagnostics
- }
- return nil
-}
-
-func (x *PlanResourceChange_Response) GetLegacyTypeSystem() bool {
- if x != nil {
- return x.LegacyTypeSystem
- }
- return false
-}
-
-func (x *PlanResourceChange_Response) GetDeferred() *Deferred {
- if x != nil {
- return x.Deferred
- }
- return nil
-}
-
-type ApplyResourceChange_Request struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
- PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"`
- PlannedState *DynamicValue `protobuf:"bytes,3,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"`
- Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"`
- PlannedPrivate []byte `protobuf:"bytes,5,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"`
- ProviderMeta *DynamicValue `protobuf:"bytes,6,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"`
-}
-
-func (x *ApplyResourceChange_Request) Reset() {
- *x = ApplyResourceChange_Request{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[62]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ApplyResourceChange_Request) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ApplyResourceChange_Request) ProtoMessage() {}
-
-func (x *ApplyResourceChange_Request) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[62]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ApplyResourceChange_Request.ProtoReflect.Descriptor instead.
-func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{20, 0}
-}
-
-func (x *ApplyResourceChange_Request) GetTypeName() string {
- if x != nil {
- return x.TypeName
- }
- return ""
-}
-
-func (x *ApplyResourceChange_Request) GetPriorState() *DynamicValue {
- if x != nil {
- return x.PriorState
- }
- return nil
-}
-
-func (x *ApplyResourceChange_Request) GetPlannedState() *DynamicValue {
- if x != nil {
- return x.PlannedState
- }
- return nil
-}
-
-func (x *ApplyResourceChange_Request) GetConfig() *DynamicValue {
- if x != nil {
- return x.Config
- }
- return nil
-}
-
-func (x *ApplyResourceChange_Request) GetPlannedPrivate() []byte {
- if x != nil {
- return x.PlannedPrivate
- }
- return nil
-}
-
-func (x *ApplyResourceChange_Request) GetProviderMeta() *DynamicValue {
- if x != nil {
- return x.ProviderMeta
- }
- return nil
-}
-
-type ApplyResourceChange_Response struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"`
- Private []byte `protobuf:"bytes,2,opt,name=private,proto3" json:"private,omitempty"`
- Diagnostics []*Diagnostic `protobuf:"bytes,3,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
- // This may be set only by the helper/schema "SDK" in the main Terraform
- // repository, to request that Terraform Core >=0.12 permit additional
- // inconsistencies that can result from the legacy SDK type system
- // and its imprecise mapping to the >=0.12 type system.
- // The change in behavior implied by this flag makes sense only for the
- // specific details of the legacy SDK type system, and are not a general
- // mechanism to avoid proper type handling in providers.
- //
- // ==== DO NOT USE THIS ====
- // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
- // ==== DO NOT USE THIS ====
- LegacyTypeSystem bool `protobuf:"varint,4,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"`
-}
-
-func (x *ApplyResourceChange_Response) Reset() {
- *x = ApplyResourceChange_Response{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[63]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ApplyResourceChange_Response) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ApplyResourceChange_Response) ProtoMessage() {}
-
-func (x *ApplyResourceChange_Response) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[63]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ApplyResourceChange_Response.ProtoReflect.Descriptor instead.
-func (*ApplyResourceChange_Response) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{20, 1}
-}
-
-func (x *ApplyResourceChange_Response) GetNewState() *DynamicValue {
- if x != nil {
- return x.NewState
- }
- return nil
-}
-
-func (x *ApplyResourceChange_Response) GetPrivate() []byte {
- if x != nil {
- return x.Private
- }
- return nil
-}
-
-func (x *ApplyResourceChange_Response) GetDiagnostics() []*Diagnostic {
- if x != nil {
- return x.Diagnostics
- }
- return nil
-}
-
-func (x *ApplyResourceChange_Response) GetLegacyTypeSystem() bool {
- if x != nil {
- return x.LegacyTypeSystem
- }
- return false
-}
-
-type ImportResourceState_Request struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
- Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
- ClientCapabilities *ClientCapabilities `protobuf:"bytes,3,opt,name=client_capabilities,json=clientCapabilities,proto3" json:"client_capabilities,omitempty"`
-}
-
-func (x *ImportResourceState_Request) Reset() {
- *x = ImportResourceState_Request{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[64]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ImportResourceState_Request) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ImportResourceState_Request) ProtoMessage() {}
-
-func (x *ImportResourceState_Request) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[64]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ImportResourceState_Request.ProtoReflect.Descriptor instead.
-func (*ImportResourceState_Request) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{21, 0}
-}
-
-func (x *ImportResourceState_Request) GetTypeName() string {
- if x != nil {
- return x.TypeName
- }
- return ""
-}
-
-func (x *ImportResourceState_Request) GetId() string {
- if x != nil {
- return x.Id
- }
- return ""
-}
-
-func (x *ImportResourceState_Request) GetClientCapabilities() *ClientCapabilities {
- if x != nil {
- return x.ClientCapabilities
- }
- return nil
-}
-
-type ImportResourceState_ImportedResource struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
- State *DynamicValue `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"`
- Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"`
-}
-
-func (x *ImportResourceState_ImportedResource) Reset() {
- *x = ImportResourceState_ImportedResource{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[65]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ImportResourceState_ImportedResource) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ImportResourceState_ImportedResource) ProtoMessage() {}
-
-func (x *ImportResourceState_ImportedResource) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[65]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ImportResourceState_ImportedResource.ProtoReflect.Descriptor instead.
-func (*ImportResourceState_ImportedResource) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{21, 1}
-}
-
-func (x *ImportResourceState_ImportedResource) GetTypeName() string {
- if x != nil {
- return x.TypeName
- }
- return ""
-}
-
-func (x *ImportResourceState_ImportedResource) GetState() *DynamicValue {
- if x != nil {
- return x.State
- }
- return nil
-}
-
-func (x *ImportResourceState_ImportedResource) GetPrivate() []byte {
- if x != nil {
- return x.Private
- }
- return nil
-}
-
-type ImportResourceState_Response struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- ImportedResources []*ImportResourceState_ImportedResource `protobuf:"bytes,1,rep,name=imported_resources,json=importedResources,proto3" json:"imported_resources,omitempty"`
- Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
- // deferred is set if the provider is deferring the change. If set the caller
- // needs to handle the deferral.
- Deferred *Deferred `protobuf:"bytes,3,opt,name=deferred,proto3" json:"deferred,omitempty"`
-}
-
-func (x *ImportResourceState_Response) Reset() {
- *x = ImportResourceState_Response{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[66]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ImportResourceState_Response) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ImportResourceState_Response) ProtoMessage() {}
-
-func (x *ImportResourceState_Response) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[66]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ImportResourceState_Response.ProtoReflect.Descriptor instead.
-func (*ImportResourceState_Response) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{21, 2}
-}
-
-func (x *ImportResourceState_Response) GetImportedResources() []*ImportResourceState_ImportedResource {
- if x != nil {
- return x.ImportedResources
- }
- return nil
-}
-
-func (x *ImportResourceState_Response) GetDiagnostics() []*Diagnostic {
- if x != nil {
- return x.Diagnostics
- }
- return nil
-}
-
-func (x *ImportResourceState_Response) GetDeferred() *Deferred {
- if x != nil {
- return x.Deferred
- }
- return nil
-}
-
-type MoveResourceState_Request struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The address of the provider the resource is being moved from.
- SourceProviderAddress string `protobuf:"bytes,1,opt,name=source_provider_address,json=sourceProviderAddress,proto3" json:"source_provider_address,omitempty"`
- // The resource type that the resource is being moved from.
- SourceTypeName string `protobuf:"bytes,2,opt,name=source_type_name,json=sourceTypeName,proto3" json:"source_type_name,omitempty"`
- // The schema version of the resource type that the resource is being
- // moved from.
- SourceSchemaVersion int64 `protobuf:"varint,3,opt,name=source_schema_version,json=sourceSchemaVersion,proto3" json:"source_schema_version,omitempty"`
- // The raw state of the resource being moved. Only the json field is
- // populated, as there should be no legacy providers using the flatmap
- // format that support newly introduced RPCs.
- SourceState *RawState `protobuf:"bytes,4,opt,name=source_state,json=sourceState,proto3" json:"source_state,omitempty"`
- // The resource type that the resource is being moved to.
- TargetTypeName string `protobuf:"bytes,5,opt,name=target_type_name,json=targetTypeName,proto3" json:"target_type_name,omitempty"`
- // The private state of the resource being moved.
- SourcePrivate []byte `protobuf:"bytes,6,opt,name=source_private,json=sourcePrivate,proto3" json:"source_private,omitempty"`
-}
-
-func (x *MoveResourceState_Request) Reset() {
- *x = MoveResourceState_Request{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[67]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *MoveResourceState_Request) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*MoveResourceState_Request) ProtoMessage() {}
-
-func (x *MoveResourceState_Request) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[67]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use MoveResourceState_Request.ProtoReflect.Descriptor instead.
-func (*MoveResourceState_Request) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{22, 0}
-}
-
-func (x *MoveResourceState_Request) GetSourceProviderAddress() string {
- if x != nil {
- return x.SourceProviderAddress
- }
- return ""
-}
-
-func (x *MoveResourceState_Request) GetSourceTypeName() string {
- if x != nil {
- return x.SourceTypeName
- }
- return ""
-}
-
-func (x *MoveResourceState_Request) GetSourceSchemaVersion() int64 {
- if x != nil {
- return x.SourceSchemaVersion
- }
- return 0
-}
-
-func (x *MoveResourceState_Request) GetSourceState() *RawState {
- if x != nil {
- return x.SourceState
- }
- return nil
-}
-
-func (x *MoveResourceState_Request) GetTargetTypeName() string {
- if x != nil {
- return x.TargetTypeName
- }
- return ""
-}
-
-func (x *MoveResourceState_Request) GetSourcePrivate() []byte {
- if x != nil {
- return x.SourcePrivate
- }
- return nil
-}
-
-type MoveResourceState_Response struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The state of the resource after it has been moved.
- TargetState *DynamicValue `protobuf:"bytes,1,opt,name=target_state,json=targetState,proto3" json:"target_state,omitempty"`
- // Any diagnostics that occurred during the move.
- Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
- // The private state of the resource after it has been moved.
- TargetPrivate []byte `protobuf:"bytes,3,opt,name=target_private,json=targetPrivate,proto3" json:"target_private,omitempty"`
-}
-
-func (x *MoveResourceState_Response) Reset() {
- *x = MoveResourceState_Response{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[68]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *MoveResourceState_Response) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*MoveResourceState_Response) ProtoMessage() {}
-
-func (x *MoveResourceState_Response) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[68]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use MoveResourceState_Response.ProtoReflect.Descriptor instead.
-func (*MoveResourceState_Response) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{22, 1}
-}
-
-func (x *MoveResourceState_Response) GetTargetState() *DynamicValue {
- if x != nil {
- return x.TargetState
- }
- return nil
-}
-
-func (x *MoveResourceState_Response) GetDiagnostics() []*Diagnostic {
- if x != nil {
- return x.Diagnostics
- }
- return nil
-}
-
-func (x *MoveResourceState_Response) GetTargetPrivate() []byte {
- if x != nil {
- return x.TargetPrivate
- }
- return nil
-}
-
-type ReadDataSource_Request struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
- Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
- ProviderMeta *DynamicValue `protobuf:"bytes,3,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"`
- ClientCapabilities *ClientCapabilities `protobuf:"bytes,4,opt,name=client_capabilities,json=clientCapabilities,proto3" json:"client_capabilities,omitempty"`
-}
-
-func (x *ReadDataSource_Request) Reset() {
- *x = ReadDataSource_Request{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[69]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ReadDataSource_Request) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ReadDataSource_Request) ProtoMessage() {}
-
-func (x *ReadDataSource_Request) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[69]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ReadDataSource_Request.ProtoReflect.Descriptor instead.
-func (*ReadDataSource_Request) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{23, 0}
-}
-
-func (x *ReadDataSource_Request) GetTypeName() string {
- if x != nil {
- return x.TypeName
- }
- return ""
-}
-
-func (x *ReadDataSource_Request) GetConfig() *DynamicValue {
- if x != nil {
- return x.Config
- }
- return nil
-}
-
-func (x *ReadDataSource_Request) GetProviderMeta() *DynamicValue {
- if x != nil {
- return x.ProviderMeta
- }
- return nil
-}
-
-func (x *ReadDataSource_Request) GetClientCapabilities() *ClientCapabilities {
- if x != nil {
- return x.ClientCapabilities
- }
- return nil
-}
-
-type ReadDataSource_Response struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- State *DynamicValue `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"`
- Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
- // deferred is set if the provider is deferring the change. If set the caller
- // needs to handle the deferral.
- Deferred *Deferred `protobuf:"bytes,3,opt,name=deferred,proto3" json:"deferred,omitempty"`
-}
-
-func (x *ReadDataSource_Response) Reset() {
- *x = ReadDataSource_Response{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[70]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ReadDataSource_Response) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ReadDataSource_Response) ProtoMessage() {}
-
-func (x *ReadDataSource_Response) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[70]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ReadDataSource_Response.ProtoReflect.Descriptor instead.
-func (*ReadDataSource_Response) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{23, 1}
-}
-
-func (x *ReadDataSource_Response) GetState() *DynamicValue {
- if x != nil {
- return x.State
- }
- return nil
-}
-
-func (x *ReadDataSource_Response) GetDiagnostics() []*Diagnostic {
- if x != nil {
- return x.Diagnostics
- }
- return nil
-}
-
-func (x *ReadDataSource_Response) GetDeferred() *Deferred {
- if x != nil {
- return x.Deferred
- }
- return nil
-}
-
-type GetProvisionerSchema_Request struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *GetProvisionerSchema_Request) Reset() {
- *x = GetProvisionerSchema_Request{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[71]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetProvisionerSchema_Request) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetProvisionerSchema_Request) ProtoMessage() {}
-
-func (x *GetProvisionerSchema_Request) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[71]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetProvisionerSchema_Request.ProtoReflect.Descriptor instead.
-func (*GetProvisionerSchema_Request) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{24, 0}
-}
-
-type GetProvisionerSchema_Response struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Provisioner *Schema `protobuf:"bytes,1,opt,name=provisioner,proto3" json:"provisioner,omitempty"`
- Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
-}
-
-func (x *GetProvisionerSchema_Response) Reset() {
- *x = GetProvisionerSchema_Response{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[72]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetProvisionerSchema_Response) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetProvisionerSchema_Response) ProtoMessage() {}
-
-func (x *GetProvisionerSchema_Response) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[72]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetProvisionerSchema_Response.ProtoReflect.Descriptor instead.
-func (*GetProvisionerSchema_Response) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{24, 1}
-}
-
-func (x *GetProvisionerSchema_Response) GetProvisioner() *Schema {
- if x != nil {
- return x.Provisioner
- }
- return nil
-}
-
-func (x *GetProvisionerSchema_Response) GetDiagnostics() []*Diagnostic {
- if x != nil {
- return x.Diagnostics
- }
- return nil
-}
-
-type ValidateProvisionerConfig_Request struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
-}
-
-func (x *ValidateProvisionerConfig_Request) Reset() {
- *x = ValidateProvisionerConfig_Request{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[73]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ValidateProvisionerConfig_Request) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ValidateProvisionerConfig_Request) ProtoMessage() {}
-
-func (x *ValidateProvisionerConfig_Request) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[73]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ValidateProvisionerConfig_Request.ProtoReflect.Descriptor instead.
-func (*ValidateProvisionerConfig_Request) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{25, 0}
-}
-
-func (x *ValidateProvisionerConfig_Request) GetConfig() *DynamicValue {
- if x != nil {
- return x.Config
- }
- return nil
-}
-
-type ValidateProvisionerConfig_Response struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
-}
-
-func (x *ValidateProvisionerConfig_Response) Reset() {
- *x = ValidateProvisionerConfig_Response{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[74]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ValidateProvisionerConfig_Response) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ValidateProvisionerConfig_Response) ProtoMessage() {}
-
-func (x *ValidateProvisionerConfig_Response) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[74]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ValidateProvisionerConfig_Response.ProtoReflect.Descriptor instead.
-func (*ValidateProvisionerConfig_Response) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{25, 1}
-}
-
-func (x *ValidateProvisionerConfig_Response) GetDiagnostics() []*Diagnostic {
- if x != nil {
- return x.Diagnostics
- }
- return nil
-}
-
-type ProvisionResource_Request struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
- Connection *DynamicValue `protobuf:"bytes,2,opt,name=connection,proto3" json:"connection,omitempty"`
-}
-
-func (x *ProvisionResource_Request) Reset() {
- *x = ProvisionResource_Request{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[75]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ProvisionResource_Request) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ProvisionResource_Request) ProtoMessage() {}
-
-func (x *ProvisionResource_Request) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[75]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ProvisionResource_Request.ProtoReflect.Descriptor instead.
-func (*ProvisionResource_Request) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{26, 0}
-}
-
-func (x *ProvisionResource_Request) GetConfig() *DynamicValue {
- if x != nil {
- return x.Config
- }
- return nil
-}
-
-func (x *ProvisionResource_Request) GetConnection() *DynamicValue {
- if x != nil {
- return x.Connection
- }
- return nil
-}
-
-type ProvisionResource_Response struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Output string `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"`
- Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
-}
-
-func (x *ProvisionResource_Response) Reset() {
- *x = ProvisionResource_Response{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[76]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ProvisionResource_Response) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ProvisionResource_Response) ProtoMessage() {}
-
-func (x *ProvisionResource_Response) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[76]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ProvisionResource_Response.ProtoReflect.Descriptor instead.
-func (*ProvisionResource_Response) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{26, 1}
-}
-
-func (x *ProvisionResource_Response) GetOutput() string {
- if x != nil {
- return x.Output
- }
- return ""
-}
-
-func (x *ProvisionResource_Response) GetDiagnostics() []*Diagnostic {
- if x != nil {
- return x.Diagnostics
- }
- return nil
-}
-
-type GetFunctions_Request struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *GetFunctions_Request) Reset() {
- *x = GetFunctions_Request{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[77]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetFunctions_Request) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetFunctions_Request) ProtoMessage() {}
-
-func (x *GetFunctions_Request) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[77]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetFunctions_Request.ProtoReflect.Descriptor instead.
-func (*GetFunctions_Request) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{27, 0}
-}
-
-type GetFunctions_Response struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // functions is a mapping of function names to definitions.
- Functions map[string]*Function `protobuf:"bytes,1,rep,name=functions,proto3" json:"functions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- // diagnostics is any warnings or errors.
- Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
-}
-
-func (x *GetFunctions_Response) Reset() {
- *x = GetFunctions_Response{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[78]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetFunctions_Response) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetFunctions_Response) ProtoMessage() {}
-
-func (x *GetFunctions_Response) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[78]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetFunctions_Response.ProtoReflect.Descriptor instead.
-func (*GetFunctions_Response) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{27, 1}
-}
-
-func (x *GetFunctions_Response) GetFunctions() map[string]*Function {
- if x != nil {
- return x.Functions
- }
- return nil
-}
-
-func (x *GetFunctions_Response) GetDiagnostics() []*Diagnostic {
- if x != nil {
- return x.Diagnostics
- }
- return nil
-}
-
-type CallFunction_Request struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // name is the name of the function being called.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // arguments is the data of each function argument value.
- Arguments []*DynamicValue `protobuf:"bytes,2,rep,name=arguments,proto3" json:"arguments,omitempty"`
-}
-
-func (x *CallFunction_Request) Reset() {
- *x = CallFunction_Request{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[80]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CallFunction_Request) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CallFunction_Request) ProtoMessage() {}
-
-func (x *CallFunction_Request) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[80]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CallFunction_Request.ProtoReflect.Descriptor instead.
-func (*CallFunction_Request) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{28, 0}
-}
-
-func (x *CallFunction_Request) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *CallFunction_Request) GetArguments() []*DynamicValue {
- if x != nil {
- return x.Arguments
- }
- return nil
-}
-
-type CallFunction_Response struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // result is result value after running the function logic.
- Result *DynamicValue `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"`
- // error is any error from the function logic.
- Error *FunctionError `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
-}
-
-func (x *CallFunction_Response) Reset() {
- *x = CallFunction_Response{}
- if protoimpl.UnsafeEnabled {
- mi := &file_tfplugin5_proto_msgTypes[81]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CallFunction_Response) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CallFunction_Response) ProtoMessage() {}
-
-func (x *CallFunction_Response) ProtoReflect() protoreflect.Message {
- mi := &file_tfplugin5_proto_msgTypes[81]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CallFunction_Response.ProtoReflect.Descriptor instead.
-func (*CallFunction_Response) Descriptor() ([]byte, []int) {
- return file_tfplugin5_proto_rawDescGZIP(), []int{28, 1}
-}
-
-func (x *CallFunction_Response) GetResult() *DynamicValue {
- if x != nil {
- return x.Result
- }
- return nil
-}
-
-func (x *CallFunction_Response) GetError() *FunctionError {
- if x != nil {
- return x.Error
- }
- return nil
-}
-
-var File_tfplugin5_proto protoreflect.FileDescriptor
-
-var file_tfplugin5_proto_rawDesc = []byte{
- 0x0a, 0x0f, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x12, 0x09, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x22, 0x3c, 0x0a, 0x0c,
- 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x18, 0x0a, 0x07,
- 0x6d, 0x73, 0x67, 0x70, 0x61, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d,
- 0x73, 0x67, 0x70, 0x61, 0x63, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x22, 0xe3, 0x01, 0x0a, 0x0a, 0x44,
- 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x73, 0x65, 0x76,
- 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x74, 0x66,
- 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74,
- 0x69, 0x63, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x52, 0x08, 0x73, 0x65, 0x76,
- 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12,
- 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x36, 0x0a, 0x09, 0x61, 0x74, 0x74, 0x72, 0x69,
- 0x62, 0x75, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70,
- 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65,
- 0x50, 0x61, 0x74, 0x68, 0x52, 0x09, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x22,
- 0x2f, 0x0a, 0x08, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x0b, 0x0a, 0x07, 0x49,
- 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f,
- 0x52, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x57, 0x41, 0x52, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02,
- 0x22, 0x6b, 0x0a, 0x0d, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f,
- 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x78, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x74, 0x65, 0x78, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03,
- 0x48, 0x00, 0x52, 0x10, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x72, 0x67, 0x75,
- 0x6d, 0x65, 0x6e, 0x74, 0x88, 0x01, 0x01, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x66, 0x75, 0x6e, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x22, 0xdc, 0x01,
- 0x0a, 0x0d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12,
- 0x33, 0x0a, 0x05, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d,
- 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69,
- 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x2e, 0x53, 0x74, 0x65, 0x70, 0x52, 0x05, 0x73,
- 0x74, 0x65, 0x70, 0x73, 0x1a, 0x95, 0x01, 0x0a, 0x04, 0x53, 0x74, 0x65, 0x70, 0x12, 0x27, 0x0a,
- 0x0e, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0d, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75,
- 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e,
- 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x48, 0x00, 0x52, 0x10, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79,
- 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x28, 0x0a, 0x0f, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e,
- 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48,
- 0x00, 0x52, 0x0d, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x49, 0x6e, 0x74,
- 0x42, 0x0a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, 0x33, 0x0a, 0x04,
- 0x53, 0x74, 0x6f, 0x70, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x20, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x45,
- 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f,
- 0x72, 0x22, 0x96, 0x01, 0x0a, 0x08, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x12,
- 0x0a, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6a, 0x73,
- 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x07, 0x66, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e,
- 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x46, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70,
- 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x66, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, 0x1a, 0x3a,
- 0x0a, 0x0c, 0x46, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
- 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
- 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xcc, 0x07, 0x0a, 0x06, 0x53,
- 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
- 0x2d, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17,
- 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x1a, 0xa2,
- 0x02, 0x0a, 0x05, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73,
- 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
- 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69,
- 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62,
- 0x75, 0x74, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12,
- 0x3e, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35,
- 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c,
- 0x6f, 0x63, 0x6b, 0x52, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12,
- 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66,
- 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69,
- 0x6e, 0x64, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b,
- 0x69, 0x6e, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
- 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
- 0x74, 0x65, 0x64, 0x1a, 0xa9, 0x02, 0x0a, 0x09, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
- 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
- 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72,
- 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72,
- 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x18,
- 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x12,
- 0x1c, 0x0a, 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x07, 0x20, 0x01,
- 0x28, 0x08, 0x52, 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x40, 0x0a,
- 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e,
- 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67,
- 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f,
- 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12,
- 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x09, 0x20,
- 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x1a,
- 0xa7, 0x02, 0x0a, 0x0b, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12,
- 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05,
- 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66,
- 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x42,
- 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x43, 0x0a, 0x07, 0x6e,
- 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x74,
- 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e,
- 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x4e, 0x65, 0x73, 0x74,
- 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x07, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67,
- 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a,
- 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03,
- 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x4d, 0x0a, 0x0b, 0x4e, 0x65,
- 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56,
- 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45,
- 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03,
- 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x4d, 0x41, 0x50, 0x10, 0x04, 0x12, 0x09,
- 0x0a, 0x05, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x05, 0x22, 0xa8, 0x01, 0x0a, 0x12, 0x53, 0x65,
- 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73,
- 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x6c, 0x61, 0x6e, 0x44, 0x65, 0x73, 0x74,
- 0x72, 0x6f, 0x79, 0x12, 0x3f, 0x0a, 0x1c, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69,
- 0x64, 0x65, 0x72, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x67, 0x65, 0x74, 0x50, 0x72,
- 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x2e, 0x0a, 0x13, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x72, 0x65, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x08, 0x52, 0x11, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53,
- 0x74, 0x61, 0x74, 0x65, 0x22, 0x3f, 0x0a, 0x12, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x61,
- 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x65,
- 0x66, 0x65, 0x72, 0x72, 0x61, 0x6c, 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x61, 0x6c, 0x41, 0x6c,
- 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x22, 0x8e, 0x05, 0x0a, 0x08, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69,
- 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69,
- 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x61,
- 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72,
- 0x73, 0x12, 0x4c, 0x0a, 0x12, 0x76, 0x61, 0x72, 0x69, 0x61, 0x64, 0x69, 0x63, 0x5f, 0x70, 0x61,
- 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e,
- 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69,
- 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x11, 0x76, 0x61,
- 0x72, 0x69, 0x61, 0x64, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12,
- 0x32, 0x0a, 0x06, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x74,
- 0x75, 0x72, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a,
- 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12,
- 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b,
- 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c,
- 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64,
- 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e,
- 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12,
- 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61,
- 0x67, 0x65, 0x1a, 0xf3, 0x01, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72,
- 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x0c, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x61, 0x6c, 0x6c, 0x6f,
- 0x77, 0x5f, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x08, 0x52, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c,
- 0x75, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x75, 0x6e, 0x6b, 0x6e,
- 0x6f, 0x77, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x56, 0x61,
- 0x6c, 0x75, 0x65, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e,
- 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x72,
- 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x1a, 0x1c, 0x0a, 0x06, 0x52, 0x65, 0x74, 0x75,
- 0x72, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c,
- 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xa2, 0x01, 0x0a, 0x08, 0x44, 0x65, 0x66, 0x65, 0x72,
- 0x72, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e,
- 0x44, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52,
- 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x62, 0x0a, 0x06, 0x52, 0x65, 0x61, 0x73, 0x6f,
- 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1b,
- 0x0a, 0x17, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49,
- 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x12, 0x1b, 0x0a, 0x17, 0x50,
- 0x52, 0x4f, 0x56, 0x49, 0x44, 0x45, 0x52, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x5f, 0x55,
- 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x42, 0x53, 0x45,
- 0x4e, 0x54, 0x5f, 0x50, 0x52, 0x45, 0x52, 0x45, 0x51, 0x10, 0x03, 0x22, 0x96, 0x04, 0x0a, 0x0b,
- 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0xef, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x61,
- 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x65, 0x72,
- 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52,
- 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74,
- 0x69, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69,
- 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75,
- 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52,
- 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x4c, 0x0a, 0x0c,
- 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47,
- 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0b, 0x64,
- 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e,
- 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74,
- 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65,
- 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x73, 0x12, 0x45, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35,
- 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x75, 0x6e,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x09, 0x66,
- 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x26, 0x0a, 0x10, 0x46, 0x75, 0x6e, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x1a, 0x31, 0x0a, 0x12, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65,
- 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e,
- 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e,
- 0x61, 0x6d, 0x65, 0x1a, 0x2f, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d,
- 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f,
- 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65,
- 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xc7, 0x06, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76,
- 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0xa6, 0x06, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35,
- 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65,
- 0x72, 0x12, 0x65, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63,
- 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, 0x66,
- 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69,
- 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x6c, 0x0a, 0x13, 0x64, 0x61, 0x74, 0x61,
- 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18,
- 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e,
- 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68,
- 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, 0x74,
- 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e,
- 0x74, 0x72, 0x79, 0x52, 0x11, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53,
- 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f,
- 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66,
- 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74,
- 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12,
- 0x36, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69,
- 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69,
- 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65,
- 0x72, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x06,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35,
- 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74,
- 0x69, 0x65, 0x73, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62,
- 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x74, 0x66, 0x70,
- 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64,
- 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
- 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x14, 0x52,
- 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e,
- 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35,
- 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
- 0x38, 0x01, 0x1a, 0x57, 0x0a, 0x16, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
- 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27,
- 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e,
- 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x51, 0x0a, 0x0e, 0x46,
- 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
- 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
- 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13,
- 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xdb,
- 0x01, 0x0a, 0x15, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64,
- 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e,
- 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f,
- 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x85, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x40, 0x0a, 0x0f, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x63, 0x6f,
- 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70,
- 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61,
- 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x64, 0x43, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69,
- 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75,
- 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52,
- 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x90, 0x02, 0x0a,
- 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x72, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a,
- 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07,
- 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x73,
- 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70,
- 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52,
- 0x08, 0x72, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x83, 0x01, 0x0a, 0x08, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0e, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64,
- 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17,
- 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d,
- 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65,
- 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f,
- 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66,
- 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74,
- 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22,
- 0xba, 0x01, 0x0a, 0x1a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57,
- 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70,
- 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79,
- 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69,
- 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
- 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69,
- 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75,
- 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52,
- 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xb8, 0x01, 0x0a,
- 0x18, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d,
- 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79,
- 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37,
- 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e,
- 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67,
- 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x8a, 0x02, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x75, 0x72, 0x65, 0x1a, 0xb7, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x76,
- 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x65,
- 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f,
- 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17,
- 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d,
- 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
- 0x4e, 0x0a, 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69,
- 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74,
- 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43,
- 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x63, 0x6c, 0x69,
- 0x65, 0x6e, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x1a,
- 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64,
- 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61,
- 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73,
- 0x74, 0x69, 0x63, 0x73, 0x22, 0xe4, 0x03, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x8c, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3c,
- 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e,
- 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c,
- 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07,
- 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70,
- 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64,
- 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e,
- 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69,
- 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72,
- 0x4d, 0x65, 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63,
- 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x6c,
- 0x69, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73,
- 0x52, 0x12, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69,
- 0x74, 0x69, 0x65, 0x73, 0x1a, 0xc4, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35,
- 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e,
- 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e,
- 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74,
- 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73,
- 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73,
- 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x08, 0x64, 0x65,
- 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74,
- 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65,
- 0x64, 0x52, 0x08, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x22, 0xf3, 0x05, 0x0a, 0x12,
- 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e,
- 0x67, 0x65, 0x1a, 0x8b, 0x03, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b,
- 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70,
- 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e,
- 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72,
- 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x45, 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65,
- 0x64, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79,
- 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x70,
- 0x6f, 0x73, 0x65, 0x64, 0x4e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06,
- 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74,
- 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63,
- 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a,
- 0x0d, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05,
- 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x69, 0x76, 0x61,
- 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d,
- 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c,
- 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c,
- 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61,
- 0x12, 0x4e, 0x0a, 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62,
- 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e,
- 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74,
- 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x63, 0x6c,
- 0x69, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73,
- 0x1a, 0xce, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a,
- 0x0d, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35,
- 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70,
- 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x10, 0x72,
- 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18,
- 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e,
- 0x35, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52,
- 0x0f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65,
- 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76,
- 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e,
- 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61,
- 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15,
- 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e,
- 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69,
- 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70,
- 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10,
- 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d,
- 0x12, 0x2f, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44,
- 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x52, 0x08, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65,
- 0x64, 0x22, 0x92, 0x04, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xb6, 0x02, 0x0a, 0x07, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61,
- 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61,
- 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67,
- 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65,
- 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d,
- 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e,
- 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c,
- 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f,
- 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70,
- 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61,
- 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x70,
- 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05,
- 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69,
- 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72,
- 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66,
- 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56,
- 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65,
- 0x74, 0x61, 0x1a, 0xc1, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44,
- 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77,
- 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12,
- 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x03,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35,
- 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61,
- 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61,
- 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65,
- 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0xef, 0x03, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72,
- 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x86,
- 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79,
- 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74,
- 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x4e, 0x0a, 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e,
- 0x74, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35,
- 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74,
- 0x69, 0x65, 0x73, 0x52, 0x12, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x70, 0x61, 0x62,
- 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x1a, 0x78, 0x0a, 0x10, 0x49, 0x6d, 0x70, 0x6f, 0x72,
- 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74,
- 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67,
- 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65,
- 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61,
- 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74,
- 0x65, 0x1a, 0xd4, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e,
- 0x0a, 0x12, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70,
- 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72,
- 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x11, 0x69, 0x6d, 0x70,
- 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x37,
- 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e,
- 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67,
- 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2f, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x65, 0x72,
- 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c,
- 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x52, 0x08,
- 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x22, 0xe7, 0x03, 0x0a, 0x11, 0x4d, 0x6f, 0x76,
- 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0xa8,
- 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x17, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x64,
- 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x65,
- 0x73, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70,
- 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x15,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x76, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
- 0x12, 0x36, 0x0a, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69,
- 0x6e, 0x35, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0b, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x61, 0x72, 0x67,
- 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61,
- 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x72, 0x69,
- 0x76, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x1a, 0xa6, 0x01, 0x0a, 0x08, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74,
- 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74,
- 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63,
- 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x74, 0x61,
- 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63,
- 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67,
- 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b,
- 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x74,
- 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x50, 0x72, 0x69, 0x76, 0x61,
- 0x74, 0x65, 0x22, 0x9e, 0x03, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0xe5, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f,
- 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17,
- 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d,
- 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
- 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69,
- 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
- 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x4e, 0x0a,
- 0x13, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69,
- 0x74, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70,
- 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x70,
- 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x63, 0x6c, 0x69, 0x65, 0x6e,
- 0x74, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x1a, 0xa3, 0x01,
- 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74,
- 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c,
- 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c,
- 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61,
- 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15,
- 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e,
- 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69,
- 0x63, 0x73, 0x12, 0x2f, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35,
- 0x2e, 0x44, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x52, 0x08, 0x64, 0x65, 0x66, 0x65, 0x72,
- 0x72, 0x65, 0x64, 0x22, 0x9b, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69,
- 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x1a, 0x09, 0x0a, 0x07,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x78, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e,
- 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75,
- 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0b, 0x70, 0x72, 0x6f,
- 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67,
- 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e,
- 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f,
- 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63,
- 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x19, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72,
- 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a,
- 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f,
- 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70,
- 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61,
- 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e,
- 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74,
- 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73,
- 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73,
- 0x22, 0xe5, 0x01, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x73, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79,
- 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x12, 0x37, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69,
- 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
- 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5b, 0x0a, 0x08, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75,
- 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12,
- 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35,
- 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61,
- 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x81, 0x02, 0x0a, 0x0c, 0x47, 0x65, 0x74,
- 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0xe5, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x12, 0x4d, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35,
- 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18,
- 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e,
- 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69,
- 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x1a, 0x51, 0x0a, 0x0e, 0x46, 0x75, 0x6e,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
- 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74,
- 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd1, 0x01, 0x0a,
- 0x0c, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x54, 0x0a,
- 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x09,
- 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61,
- 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65,
- 0x6e, 0x74, 0x73, 0x1a, 0x6b, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x2f, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61,
- 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74,
- 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72,
- 0x2a, 0x25, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x09,
- 0x0a, 0x05, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4d, 0x41, 0x52,
- 0x4b, 0x44, 0x4f, 0x57, 0x4e, 0x10, 0x01, 0x32, 0xef, 0x0b, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x76,
- 0x69, 0x64, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64,
- 0x61, 0x74, 0x61, 0x12, 0x1e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e,
- 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e,
- 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d,
- 0x61, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65,
- 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67,
- 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53,
- 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c,
- 0x0a, 0x15, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65,
- 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67,
- 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69,
- 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72,
- 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x1a,
- 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2d, 0x2e, 0x74, 0x66, 0x70,
- 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52,
- 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x74, 0x66, 0x70, 0x6c,
- 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x18, 0x56, 0x61, 0x6c,
- 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2b, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e,
- 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56,
- 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x69, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75,
- 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x55, 0x70,
- 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61,
- 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x09, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x1c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75,
- 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x2e, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69,
- 0x6e, 0x35, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e,
- 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69,
- 0x6e, 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x12, 0x50, 0x6c, 0x61, 0x6e,
- 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x25,
- 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52,
- 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e,
- 0x35, 0x2e, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68,
- 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a,
- 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68,
- 0x61, 0x6e, 0x67, 0x65, 0x12, 0x26, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35,
- 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68,
- 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74,
- 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52,
- 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x26, 0x2e, 0x74,
- 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52,
- 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35,
- 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53,
- 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a,
- 0x11, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61,
- 0x74, 0x65, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x4d,
- 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65,
- 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75,
- 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x57, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x12, 0x21, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x65,
- 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35,
- 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x46,
- 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75,
- 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c,
- 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x43,
- 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x74, 0x66,
- 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74,
- 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39,
- 0x0a, 0x04, 0x53, 0x74, 0x6f, 0x70, 0x12, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69,
- 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70,
- 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x86, 0x03, 0x0a, 0x0b, 0x50, 0x72,
- 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x5e, 0x0a, 0x09, 0x47, 0x65, 0x74,
- 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69,
- 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65,
- 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50,
- 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
- 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x19, 0x56, 0x61, 0x6c,
- 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69,
- 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69,
- 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35,
- 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69,
- 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x62, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e,
- 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75,
- 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25,
- 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69,
- 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x39, 0x0a, 0x04, 0x53, 0x74, 0x6f, 0x70, 0x12,
- 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70,
- 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75,
- 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x42, 0x47, 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x74, 0x65, 0x72, 0x72, 0x61,
- 0x66, 0x6f, 0x72, 0x6d, 0x2d, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x74,
- 0x66, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x76, 0x35, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61,
- 0x6c, 0x2f, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x62, 0x06, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x33,
-}
-
-var (
- file_tfplugin5_proto_rawDescOnce sync.Once
- file_tfplugin5_proto_rawDescData = file_tfplugin5_proto_rawDesc
-)
-
-func file_tfplugin5_proto_rawDescGZIP() []byte {
- file_tfplugin5_proto_rawDescOnce.Do(func() {
- file_tfplugin5_proto_rawDescData = protoimpl.X.CompressGZIP(file_tfplugin5_proto_rawDescData)
- })
- return file_tfplugin5_proto_rawDescData
-}
-
-var file_tfplugin5_proto_enumTypes = make([]protoimpl.EnumInfo, 4)
-var file_tfplugin5_proto_msgTypes = make([]protoimpl.MessageInfo, 82)
-var file_tfplugin5_proto_goTypes = []interface{}{
- (StringKind)(0), // 0: tfplugin5.StringKind
- (Diagnostic_Severity)(0), // 1: tfplugin5.Diagnostic.Severity
- (Schema_NestedBlock_NestingMode)(0), // 2: tfplugin5.Schema.NestedBlock.NestingMode
- (Deferred_Reason)(0), // 3: tfplugin5.Deferred.Reason
- (*DynamicValue)(nil), // 4: tfplugin5.DynamicValue
- (*Diagnostic)(nil), // 5: tfplugin5.Diagnostic
- (*FunctionError)(nil), // 6: tfplugin5.FunctionError
- (*AttributePath)(nil), // 7: tfplugin5.AttributePath
- (*Stop)(nil), // 8: tfplugin5.Stop
- (*RawState)(nil), // 9: tfplugin5.RawState
- (*Schema)(nil), // 10: tfplugin5.Schema
- (*ServerCapabilities)(nil), // 11: tfplugin5.ServerCapabilities
- (*ClientCapabilities)(nil), // 12: tfplugin5.ClientCapabilities
- (*Function)(nil), // 13: tfplugin5.Function
- (*Deferred)(nil), // 14: tfplugin5.Deferred
- (*GetMetadata)(nil), // 15: tfplugin5.GetMetadata
- (*GetProviderSchema)(nil), // 16: tfplugin5.GetProviderSchema
- (*PrepareProviderConfig)(nil), // 17: tfplugin5.PrepareProviderConfig
- (*UpgradeResourceState)(nil), // 18: tfplugin5.UpgradeResourceState
- (*ValidateResourceTypeConfig)(nil), // 19: tfplugin5.ValidateResourceTypeConfig
- (*ValidateDataSourceConfig)(nil), // 20: tfplugin5.ValidateDataSourceConfig
- (*Configure)(nil), // 21: tfplugin5.Configure
- (*ReadResource)(nil), // 22: tfplugin5.ReadResource
- (*PlanResourceChange)(nil), // 23: tfplugin5.PlanResourceChange
- (*ApplyResourceChange)(nil), // 24: tfplugin5.ApplyResourceChange
- (*ImportResourceState)(nil), // 25: tfplugin5.ImportResourceState
- (*MoveResourceState)(nil), // 26: tfplugin5.MoveResourceState
- (*ReadDataSource)(nil), // 27: tfplugin5.ReadDataSource
- (*GetProvisionerSchema)(nil), // 28: tfplugin5.GetProvisionerSchema
- (*ValidateProvisionerConfig)(nil), // 29: tfplugin5.ValidateProvisionerConfig
- (*ProvisionResource)(nil), // 30: tfplugin5.ProvisionResource
- (*GetFunctions)(nil), // 31: tfplugin5.GetFunctions
- (*CallFunction)(nil), // 32: tfplugin5.CallFunction
- (*AttributePath_Step)(nil), // 33: tfplugin5.AttributePath.Step
- (*Stop_Request)(nil), // 34: tfplugin5.Stop.Request
- (*Stop_Response)(nil), // 35: tfplugin5.Stop.Response
- nil, // 36: tfplugin5.RawState.FlatmapEntry
- (*Schema_Block)(nil), // 37: tfplugin5.Schema.Block
- (*Schema_Attribute)(nil), // 38: tfplugin5.Schema.Attribute
- (*Schema_NestedBlock)(nil), // 39: tfplugin5.Schema.NestedBlock
- (*Function_Parameter)(nil), // 40: tfplugin5.Function.Parameter
- (*Function_Return)(nil), // 41: tfplugin5.Function.Return
- (*GetMetadata_Request)(nil), // 42: tfplugin5.GetMetadata.Request
- (*GetMetadata_Response)(nil), // 43: tfplugin5.GetMetadata.Response
- (*GetMetadata_FunctionMetadata)(nil), // 44: tfplugin5.GetMetadata.FunctionMetadata
- (*GetMetadata_DataSourceMetadata)(nil), // 45: tfplugin5.GetMetadata.DataSourceMetadata
- (*GetMetadata_ResourceMetadata)(nil), // 46: tfplugin5.GetMetadata.ResourceMetadata
- (*GetProviderSchema_Request)(nil), // 47: tfplugin5.GetProviderSchema.Request
- (*GetProviderSchema_Response)(nil), // 48: tfplugin5.GetProviderSchema.Response
- nil, // 49: tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry
- nil, // 50: tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry
- nil, // 51: tfplugin5.GetProviderSchema.Response.FunctionsEntry
- (*PrepareProviderConfig_Request)(nil), // 52: tfplugin5.PrepareProviderConfig.Request
- (*PrepareProviderConfig_Response)(nil), // 53: tfplugin5.PrepareProviderConfig.Response
- (*UpgradeResourceState_Request)(nil), // 54: tfplugin5.UpgradeResourceState.Request
- (*UpgradeResourceState_Response)(nil), // 55: tfplugin5.UpgradeResourceState.Response
- (*ValidateResourceTypeConfig_Request)(nil), // 56: tfplugin5.ValidateResourceTypeConfig.Request
- (*ValidateResourceTypeConfig_Response)(nil), // 57: tfplugin5.ValidateResourceTypeConfig.Response
- (*ValidateDataSourceConfig_Request)(nil), // 58: tfplugin5.ValidateDataSourceConfig.Request
- (*ValidateDataSourceConfig_Response)(nil), // 59: tfplugin5.ValidateDataSourceConfig.Response
- (*Configure_Request)(nil), // 60: tfplugin5.Configure.Request
- (*Configure_Response)(nil), // 61: tfplugin5.Configure.Response
- (*ReadResource_Request)(nil), // 62: tfplugin5.ReadResource.Request
- (*ReadResource_Response)(nil), // 63: tfplugin5.ReadResource.Response
- (*PlanResourceChange_Request)(nil), // 64: tfplugin5.PlanResourceChange.Request
- (*PlanResourceChange_Response)(nil), // 65: tfplugin5.PlanResourceChange.Response
- (*ApplyResourceChange_Request)(nil), // 66: tfplugin5.ApplyResourceChange.Request
- (*ApplyResourceChange_Response)(nil), // 67: tfplugin5.ApplyResourceChange.Response
- (*ImportResourceState_Request)(nil), // 68: tfplugin5.ImportResourceState.Request
- (*ImportResourceState_ImportedResource)(nil), // 69: tfplugin5.ImportResourceState.ImportedResource
- (*ImportResourceState_Response)(nil), // 70: tfplugin5.ImportResourceState.Response
- (*MoveResourceState_Request)(nil), // 71: tfplugin5.MoveResourceState.Request
- (*MoveResourceState_Response)(nil), // 72: tfplugin5.MoveResourceState.Response
- (*ReadDataSource_Request)(nil), // 73: tfplugin5.ReadDataSource.Request
- (*ReadDataSource_Response)(nil), // 74: tfplugin5.ReadDataSource.Response
- (*GetProvisionerSchema_Request)(nil), // 75: tfplugin5.GetProvisionerSchema.Request
- (*GetProvisionerSchema_Response)(nil), // 76: tfplugin5.GetProvisionerSchema.Response
- (*ValidateProvisionerConfig_Request)(nil), // 77: tfplugin5.ValidateProvisionerConfig.Request
- (*ValidateProvisionerConfig_Response)(nil), // 78: tfplugin5.ValidateProvisionerConfig.Response
- (*ProvisionResource_Request)(nil), // 79: tfplugin5.ProvisionResource.Request
- (*ProvisionResource_Response)(nil), // 80: tfplugin5.ProvisionResource.Response
- (*GetFunctions_Request)(nil), // 81: tfplugin5.GetFunctions.Request
- (*GetFunctions_Response)(nil), // 82: tfplugin5.GetFunctions.Response
- nil, // 83: tfplugin5.GetFunctions.Response.FunctionsEntry
- (*CallFunction_Request)(nil), // 84: tfplugin5.CallFunction.Request
- (*CallFunction_Response)(nil), // 85: tfplugin5.CallFunction.Response
-}
-var file_tfplugin5_proto_depIdxs = []int32{
- 1, // 0: tfplugin5.Diagnostic.severity:type_name -> tfplugin5.Diagnostic.Severity
- 7, // 1: tfplugin5.Diagnostic.attribute:type_name -> tfplugin5.AttributePath
- 33, // 2: tfplugin5.AttributePath.steps:type_name -> tfplugin5.AttributePath.Step
- 36, // 3: tfplugin5.RawState.flatmap:type_name -> tfplugin5.RawState.FlatmapEntry
- 37, // 4: tfplugin5.Schema.block:type_name -> tfplugin5.Schema.Block
- 40, // 5: tfplugin5.Function.parameters:type_name -> tfplugin5.Function.Parameter
- 40, // 6: tfplugin5.Function.variadic_parameter:type_name -> tfplugin5.Function.Parameter
- 41, // 7: tfplugin5.Function.return:type_name -> tfplugin5.Function.Return
- 0, // 8: tfplugin5.Function.description_kind:type_name -> tfplugin5.StringKind
- 3, // 9: tfplugin5.Deferred.reason:type_name -> tfplugin5.Deferred.Reason
- 38, // 10: tfplugin5.Schema.Block.attributes:type_name -> tfplugin5.Schema.Attribute
- 39, // 11: tfplugin5.Schema.Block.block_types:type_name -> tfplugin5.Schema.NestedBlock
- 0, // 12: tfplugin5.Schema.Block.description_kind:type_name -> tfplugin5.StringKind
- 0, // 13: tfplugin5.Schema.Attribute.description_kind:type_name -> tfplugin5.StringKind
- 37, // 14: tfplugin5.Schema.NestedBlock.block:type_name -> tfplugin5.Schema.Block
- 2, // 15: tfplugin5.Schema.NestedBlock.nesting:type_name -> tfplugin5.Schema.NestedBlock.NestingMode
- 0, // 16: tfplugin5.Function.Parameter.description_kind:type_name -> tfplugin5.StringKind
- 11, // 17: tfplugin5.GetMetadata.Response.server_capabilities:type_name -> tfplugin5.ServerCapabilities
- 5, // 18: tfplugin5.GetMetadata.Response.diagnostics:type_name -> tfplugin5.Diagnostic
- 45, // 19: tfplugin5.GetMetadata.Response.data_sources:type_name -> tfplugin5.GetMetadata.DataSourceMetadata
- 46, // 20: tfplugin5.GetMetadata.Response.resources:type_name -> tfplugin5.GetMetadata.ResourceMetadata
- 44, // 21: tfplugin5.GetMetadata.Response.functions:type_name -> tfplugin5.GetMetadata.FunctionMetadata
- 10, // 22: tfplugin5.GetProviderSchema.Response.provider:type_name -> tfplugin5.Schema
- 49, // 23: tfplugin5.GetProviderSchema.Response.resource_schemas:type_name -> tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry
- 50, // 24: tfplugin5.GetProviderSchema.Response.data_source_schemas:type_name -> tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry
- 5, // 25: tfplugin5.GetProviderSchema.Response.diagnostics:type_name -> tfplugin5.Diagnostic
- 10, // 26: tfplugin5.GetProviderSchema.Response.provider_meta:type_name -> tfplugin5.Schema
- 11, // 27: tfplugin5.GetProviderSchema.Response.server_capabilities:type_name -> tfplugin5.ServerCapabilities
- 51, // 28: tfplugin5.GetProviderSchema.Response.functions:type_name -> tfplugin5.GetProviderSchema.Response.FunctionsEntry
- 10, // 29: tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry.value:type_name -> tfplugin5.Schema
- 10, // 30: tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry.value:type_name -> tfplugin5.Schema
- 13, // 31: tfplugin5.GetProviderSchema.Response.FunctionsEntry.value:type_name -> tfplugin5.Function
- 4, // 32: tfplugin5.PrepareProviderConfig.Request.config:type_name -> tfplugin5.DynamicValue
- 4, // 33: tfplugin5.PrepareProviderConfig.Response.prepared_config:type_name -> tfplugin5.DynamicValue
- 5, // 34: tfplugin5.PrepareProviderConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic
- 9, // 35: tfplugin5.UpgradeResourceState.Request.raw_state:type_name -> tfplugin5.RawState
- 4, // 36: tfplugin5.UpgradeResourceState.Response.upgraded_state:type_name -> tfplugin5.DynamicValue
- 5, // 37: tfplugin5.UpgradeResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic
- 4, // 38: tfplugin5.ValidateResourceTypeConfig.Request.config:type_name -> tfplugin5.DynamicValue
- 5, // 39: tfplugin5.ValidateResourceTypeConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic
- 4, // 40: tfplugin5.ValidateDataSourceConfig.Request.config:type_name -> tfplugin5.DynamicValue
- 5, // 41: tfplugin5.ValidateDataSourceConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic
- 4, // 42: tfplugin5.Configure.Request.config:type_name -> tfplugin5.DynamicValue
- 12, // 43: tfplugin5.Configure.Request.client_capabilities:type_name -> tfplugin5.ClientCapabilities
- 5, // 44: tfplugin5.Configure.Response.diagnostics:type_name -> tfplugin5.Diagnostic
- 4, // 45: tfplugin5.ReadResource.Request.current_state:type_name -> tfplugin5.DynamicValue
- 4, // 46: tfplugin5.ReadResource.Request.provider_meta:type_name -> tfplugin5.DynamicValue
- 12, // 47: tfplugin5.ReadResource.Request.client_capabilities:type_name -> tfplugin5.ClientCapabilities
- 4, // 48: tfplugin5.ReadResource.Response.new_state:type_name -> tfplugin5.DynamicValue
- 5, // 49: tfplugin5.ReadResource.Response.diagnostics:type_name -> tfplugin5.Diagnostic
- 14, // 50: tfplugin5.ReadResource.Response.deferred:type_name -> tfplugin5.Deferred
- 4, // 51: tfplugin5.PlanResourceChange.Request.prior_state:type_name -> tfplugin5.DynamicValue
- 4, // 52: tfplugin5.PlanResourceChange.Request.proposed_new_state:type_name -> tfplugin5.DynamicValue
- 4, // 53: tfplugin5.PlanResourceChange.Request.config:type_name -> tfplugin5.DynamicValue
- 4, // 54: tfplugin5.PlanResourceChange.Request.provider_meta:type_name -> tfplugin5.DynamicValue
- 12, // 55: tfplugin5.PlanResourceChange.Request.client_capabilities:type_name -> tfplugin5.ClientCapabilities
- 4, // 56: tfplugin5.PlanResourceChange.Response.planned_state:type_name -> tfplugin5.DynamicValue
- 7, // 57: tfplugin5.PlanResourceChange.Response.requires_replace:type_name -> tfplugin5.AttributePath
- 5, // 58: tfplugin5.PlanResourceChange.Response.diagnostics:type_name -> tfplugin5.Diagnostic
- 14, // 59: tfplugin5.PlanResourceChange.Response.deferred:type_name -> tfplugin5.Deferred
- 4, // 60: tfplugin5.ApplyResourceChange.Request.prior_state:type_name -> tfplugin5.DynamicValue
- 4, // 61: tfplugin5.ApplyResourceChange.Request.planned_state:type_name -> tfplugin5.DynamicValue
- 4, // 62: tfplugin5.ApplyResourceChange.Request.config:type_name -> tfplugin5.DynamicValue
- 4, // 63: tfplugin5.ApplyResourceChange.Request.provider_meta:type_name -> tfplugin5.DynamicValue
- 4, // 64: tfplugin5.ApplyResourceChange.Response.new_state:type_name -> tfplugin5.DynamicValue
- 5, // 65: tfplugin5.ApplyResourceChange.Response.diagnostics:type_name -> tfplugin5.Diagnostic
- 12, // 66: tfplugin5.ImportResourceState.Request.client_capabilities:type_name -> tfplugin5.ClientCapabilities
- 4, // 67: tfplugin5.ImportResourceState.ImportedResource.state:type_name -> tfplugin5.DynamicValue
- 69, // 68: tfplugin5.ImportResourceState.Response.imported_resources:type_name -> tfplugin5.ImportResourceState.ImportedResource
- 5, // 69: tfplugin5.ImportResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic
- 14, // 70: tfplugin5.ImportResourceState.Response.deferred:type_name -> tfplugin5.Deferred
- 9, // 71: tfplugin5.MoveResourceState.Request.source_state:type_name -> tfplugin5.RawState
- 4, // 72: tfplugin5.MoveResourceState.Response.target_state:type_name -> tfplugin5.DynamicValue
- 5, // 73: tfplugin5.MoveResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic
- 4, // 74: tfplugin5.ReadDataSource.Request.config:type_name -> tfplugin5.DynamicValue
- 4, // 75: tfplugin5.ReadDataSource.Request.provider_meta:type_name -> tfplugin5.DynamicValue
- 12, // 76: tfplugin5.ReadDataSource.Request.client_capabilities:type_name -> tfplugin5.ClientCapabilities
- 4, // 77: tfplugin5.ReadDataSource.Response.state:type_name -> tfplugin5.DynamicValue
- 5, // 78: tfplugin5.ReadDataSource.Response.diagnostics:type_name -> tfplugin5.Diagnostic
- 14, // 79: tfplugin5.ReadDataSource.Response.deferred:type_name -> tfplugin5.Deferred
- 10, // 80: tfplugin5.GetProvisionerSchema.Response.provisioner:type_name -> tfplugin5.Schema
- 5, // 81: tfplugin5.GetProvisionerSchema.Response.diagnostics:type_name -> tfplugin5.Diagnostic
- 4, // 82: tfplugin5.ValidateProvisionerConfig.Request.config:type_name -> tfplugin5.DynamicValue
- 5, // 83: tfplugin5.ValidateProvisionerConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic
- 4, // 84: tfplugin5.ProvisionResource.Request.config:type_name -> tfplugin5.DynamicValue
- 4, // 85: tfplugin5.ProvisionResource.Request.connection:type_name -> tfplugin5.DynamicValue
- 5, // 86: tfplugin5.ProvisionResource.Response.diagnostics:type_name -> tfplugin5.Diagnostic
- 83, // 87: tfplugin5.GetFunctions.Response.functions:type_name -> tfplugin5.GetFunctions.Response.FunctionsEntry
- 5, // 88: tfplugin5.GetFunctions.Response.diagnostics:type_name -> tfplugin5.Diagnostic
- 13, // 89: tfplugin5.GetFunctions.Response.FunctionsEntry.value:type_name -> tfplugin5.Function
- 4, // 90: tfplugin5.CallFunction.Request.arguments:type_name -> tfplugin5.DynamicValue
- 4, // 91: tfplugin5.CallFunction.Response.result:type_name -> tfplugin5.DynamicValue
- 6, // 92: tfplugin5.CallFunction.Response.error:type_name -> tfplugin5.FunctionError
- 42, // 93: tfplugin5.Provider.GetMetadata:input_type -> tfplugin5.GetMetadata.Request
- 47, // 94: tfplugin5.Provider.GetSchema:input_type -> tfplugin5.GetProviderSchema.Request
- 52, // 95: tfplugin5.Provider.PrepareProviderConfig:input_type -> tfplugin5.PrepareProviderConfig.Request
- 56, // 96: tfplugin5.Provider.ValidateResourceTypeConfig:input_type -> tfplugin5.ValidateResourceTypeConfig.Request
- 58, // 97: tfplugin5.Provider.ValidateDataSourceConfig:input_type -> tfplugin5.ValidateDataSourceConfig.Request
- 54, // 98: tfplugin5.Provider.UpgradeResourceState:input_type -> tfplugin5.UpgradeResourceState.Request
- 60, // 99: tfplugin5.Provider.Configure:input_type -> tfplugin5.Configure.Request
- 62, // 100: tfplugin5.Provider.ReadResource:input_type -> tfplugin5.ReadResource.Request
- 64, // 101: tfplugin5.Provider.PlanResourceChange:input_type -> tfplugin5.PlanResourceChange.Request
- 66, // 102: tfplugin5.Provider.ApplyResourceChange:input_type -> tfplugin5.ApplyResourceChange.Request
- 68, // 103: tfplugin5.Provider.ImportResourceState:input_type -> tfplugin5.ImportResourceState.Request
- 71, // 104: tfplugin5.Provider.MoveResourceState:input_type -> tfplugin5.MoveResourceState.Request
- 73, // 105: tfplugin5.Provider.ReadDataSource:input_type -> tfplugin5.ReadDataSource.Request
- 81, // 106: tfplugin5.Provider.GetFunctions:input_type -> tfplugin5.GetFunctions.Request
- 84, // 107: tfplugin5.Provider.CallFunction:input_type -> tfplugin5.CallFunction.Request
- 34, // 108: tfplugin5.Provider.Stop:input_type -> tfplugin5.Stop.Request
- 75, // 109: tfplugin5.Provisioner.GetSchema:input_type -> tfplugin5.GetProvisionerSchema.Request
- 77, // 110: tfplugin5.Provisioner.ValidateProvisionerConfig:input_type -> tfplugin5.ValidateProvisionerConfig.Request
- 79, // 111: tfplugin5.Provisioner.ProvisionResource:input_type -> tfplugin5.ProvisionResource.Request
- 34, // 112: tfplugin5.Provisioner.Stop:input_type -> tfplugin5.Stop.Request
- 43, // 113: tfplugin5.Provider.GetMetadata:output_type -> tfplugin5.GetMetadata.Response
- 48, // 114: tfplugin5.Provider.GetSchema:output_type -> tfplugin5.GetProviderSchema.Response
- 53, // 115: tfplugin5.Provider.PrepareProviderConfig:output_type -> tfplugin5.PrepareProviderConfig.Response
- 57, // 116: tfplugin5.Provider.ValidateResourceTypeConfig:output_type -> tfplugin5.ValidateResourceTypeConfig.Response
- 59, // 117: tfplugin5.Provider.ValidateDataSourceConfig:output_type -> tfplugin5.ValidateDataSourceConfig.Response
- 55, // 118: tfplugin5.Provider.UpgradeResourceState:output_type -> tfplugin5.UpgradeResourceState.Response
- 61, // 119: tfplugin5.Provider.Configure:output_type -> tfplugin5.Configure.Response
- 63, // 120: tfplugin5.Provider.ReadResource:output_type -> tfplugin5.ReadResource.Response
- 65, // 121: tfplugin5.Provider.PlanResourceChange:output_type -> tfplugin5.PlanResourceChange.Response
- 67, // 122: tfplugin5.Provider.ApplyResourceChange:output_type -> tfplugin5.ApplyResourceChange.Response
- 70, // 123: tfplugin5.Provider.ImportResourceState:output_type -> tfplugin5.ImportResourceState.Response
- 72, // 124: tfplugin5.Provider.MoveResourceState:output_type -> tfplugin5.MoveResourceState.Response
- 74, // 125: tfplugin5.Provider.ReadDataSource:output_type -> tfplugin5.ReadDataSource.Response
- 82, // 126: tfplugin5.Provider.GetFunctions:output_type -> tfplugin5.GetFunctions.Response
- 85, // 127: tfplugin5.Provider.CallFunction:output_type -> tfplugin5.CallFunction.Response
- 35, // 128: tfplugin5.Provider.Stop:output_type -> tfplugin5.Stop.Response
- 76, // 129: tfplugin5.Provisioner.GetSchema:output_type -> tfplugin5.GetProvisionerSchema.Response
- 78, // 130: tfplugin5.Provisioner.ValidateProvisionerConfig:output_type -> tfplugin5.ValidateProvisionerConfig.Response
- 80, // 131: tfplugin5.Provisioner.ProvisionResource:output_type -> tfplugin5.ProvisionResource.Response
- 35, // 132: tfplugin5.Provisioner.Stop:output_type -> tfplugin5.Stop.Response
- 113, // [113:133] is the sub-list for method output_type
- 93, // [93:113] is the sub-list for method input_type
- 93, // [93:93] is the sub-list for extension type_name
- 93, // [93:93] is the sub-list for extension extendee
- 0, // [0:93] is the sub-list for field type_name
-}
-
-func init() { file_tfplugin5_proto_init() }
-func file_tfplugin5_proto_init() {
- if File_tfplugin5_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_tfplugin5_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DynamicValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Diagnostic); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FunctionError); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AttributePath); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Stop); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RawState); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Schema); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ServerCapabilities); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ClientCapabilities); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Function); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Deferred); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetProviderSchema); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PrepareProviderConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UpgradeResourceState); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateResourceTypeConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateDataSourceConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Configure); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReadResource); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PlanResourceChange); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ApplyResourceChange); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ImportResourceState); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MoveResourceState); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReadDataSource); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetProvisionerSchema); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateProvisionerConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ProvisionResource); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetFunctions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CallFunction); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AttributePath_Step); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Stop_Request); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Stop_Response); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Schema_Block); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Schema_Attribute); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Schema_NestedBlock); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Function_Parameter); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Function_Return); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetMetadata_Request); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetMetadata_Response); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetMetadata_FunctionMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetMetadata_DataSourceMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetMetadata_ResourceMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetProviderSchema_Request); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetProviderSchema_Response); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PrepareProviderConfig_Request); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PrepareProviderConfig_Response); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UpgradeResourceState_Request); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UpgradeResourceState_Response); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateResourceTypeConfig_Request); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateResourceTypeConfig_Response); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateDataSourceConfig_Request); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateDataSourceConfig_Response); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Configure_Request); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Configure_Response); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReadResource_Request); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReadResource_Response); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PlanResourceChange_Request); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PlanResourceChange_Response); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ApplyResourceChange_Request); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ApplyResourceChange_Response); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ImportResourceState_Request); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ImportResourceState_ImportedResource); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ImportResourceState_Response); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MoveResourceState_Request); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MoveResourceState_Response); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReadDataSource_Request); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReadDataSource_Response); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetProvisionerSchema_Request); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetProvisionerSchema_Response); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateProvisionerConfig_Request); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ValidateProvisionerConfig_Response); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ProvisionResource_Request); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ProvisionResource_Response); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetFunctions_Request); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetFunctions_Response); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CallFunction_Request); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_tfplugin5_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CallFunction_Response); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_tfplugin5_proto_msgTypes[2].OneofWrappers = []interface{}{}
- file_tfplugin5_proto_msgTypes[29].OneofWrappers = []interface{}{
- (*AttributePath_Step_AttributeName)(nil),
- (*AttributePath_Step_ElementKeyString)(nil),
- (*AttributePath_Step_ElementKeyInt)(nil),
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_tfplugin5_proto_rawDesc,
- NumEnums: 4,
- NumMessages: 82,
- NumExtensions: 0,
- NumServices: 2,
- },
- GoTypes: file_tfplugin5_proto_goTypes,
- DependencyIndexes: file_tfplugin5_proto_depIdxs,
- EnumInfos: file_tfplugin5_proto_enumTypes,
- MessageInfos: file_tfplugin5_proto_msgTypes,
- }.Build()
- File_tfplugin5_proto = out.File
- file_tfplugin5_proto_rawDesc = nil
- file_tfplugin5_proto_goTypes = nil
- file_tfplugin5_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.proto b/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.proto
deleted file mode 100644
index 3c2fa84a..00000000
--- a/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.proto
+++ /dev/null
@@ -1,635 +0,0 @@
-// Copyright (c) HashiCorp, Inc.
-// SPDX-License-Identifier: MPL-2.0
-
-// Terraform Plugin RPC protocol version 5.6
-//
-// This file defines version 5.6 of the RPC protocol. To implement a plugin
-// against this protocol, copy this definition into your own codebase and
-// use protoc to generate stubs for your target language.
-//
-// This file will not be updated. Any minor versions of protocol 5 to follow
-// should copy this file and modify the copy while maintaing backwards
-// compatibility. Breaking changes, if any are required, will come
-// in a subsequent major version with its own separate proto definition.
-//
-// Note that only the proto files included in a release tag of Terraform are
-// official protocol releases. Proto files taken from other commits may include
-// incomplete changes or features that did not make it into a final release.
-// In all reasonable cases, plugin developers should take the proto file from
-// the tag of the most recent release of Terraform, and not from the main
-// branch or any other development branch.
-//
-syntax = "proto3";
-option go_package = "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5";
-
-package tfplugin5;
-
-// DynamicValue is an opaque encoding of terraform data, with the field name
-// indicating the encoding scheme used.
-message DynamicValue {
- bytes msgpack = 1;
- bytes json = 2;
-}
-
-message Diagnostic {
- enum Severity {
- INVALID = 0;
- ERROR = 1;
- WARNING = 2;
- }
- Severity severity = 1;
- string summary = 2;
- string detail = 3;
- AttributePath attribute = 4;
-}
-
-message FunctionError {
- string text = 1;
- // The optional function_argument records the index position of the
- // argument which caused the error.
- optional int64 function_argument = 2;
-}
-
-message AttributePath {
- message Step {
- oneof selector {
- // Set "attribute_name" to represent looking up an attribute
- // in the current object value.
- string attribute_name = 1;
- // Set "element_key_*" to represent looking up an element in
- // an indexable collection type.
- string element_key_string = 2;
- int64 element_key_int = 3;
- }
- }
- repeated Step steps = 1;
-}
-
-message Stop {
- message Request {
- }
- message Response {
- string Error = 1;
- }
-}
-
-// RawState holds the stored state for a resource to be upgraded by the
-// provider. It can be in one of two formats, the current json encoded format
-// in bytes, or the legacy flatmap format as a map of strings.
-message RawState {
- bytes json = 1;
- map flatmap = 2;
-}
-
-enum StringKind {
- PLAIN = 0;
- MARKDOWN = 1;
-}
-
-// Schema is the configuration schema for a Resource, Provider, or Provisioner.
-message Schema {
- message Block {
- int64 version = 1;
- repeated Attribute attributes = 2;
- repeated NestedBlock block_types = 3;
- string description = 4;
- StringKind description_kind = 5;
- bool deprecated = 6;
- }
-
- message Attribute {
- string name = 1;
- bytes type = 2;
- string description = 3;
- bool required = 4;
- bool optional = 5;
- bool computed = 6;
- bool sensitive = 7;
- StringKind description_kind = 8;
- bool deprecated = 9;
- }
-
- message NestedBlock {
- enum NestingMode {
- INVALID = 0;
- SINGLE = 1;
- LIST = 2;
- SET = 3;
- MAP = 4;
- GROUP = 5;
- }
-
- string type_name = 1;
- Block block = 2;
- NestingMode nesting = 3;
- int64 min_items = 4;
- int64 max_items = 5;
- }
-
- // The version of the schema.
- // Schemas are versioned, so that providers can upgrade a saved resource
- // state when the schema is changed.
- int64 version = 1;
-
- // Block is the top level configuration block for this schema.
- Block block = 2;
-}
-
-// ServerCapabilities allows providers to communicate extra information
-// regarding supported protocol features. This is used to indicate
-// availability of certain forward-compatible changes which may be optional
-// in a major protocol version, but cannot be tested for directly.
-message ServerCapabilities {
- // The plan_destroy capability signals that a provider expects a call
- // to PlanResourceChange when a resource is going to be destroyed.
- bool plan_destroy = 1;
-
- // The get_provider_schema_optional capability indicates that this
- // provider does not require calling GetProviderSchema to operate
- // normally, and the caller can used a cached copy of the provider's
- // schema.
- bool get_provider_schema_optional = 2;
-
- // The move_resource_state capability signals that a provider supports the
- // MoveResourceState RPC.
- bool move_resource_state = 3;
-}
-
-// ClientCapabilities allows Terraform to publish information regarding
-// supported protocol features. This is used to indicate availability of
-// certain forward-compatible changes which may be optional in a major
-// protocol version, but cannot be tested for directly.
-message ClientCapabilities {
- // The deferral_allowed capability signals that the client is able to
- // handle deferred responses from the provider.
- bool deferral_allowed = 1;
-}
-
-message Function {
- // parameters is the ordered list of positional function parameters.
- repeated Parameter parameters = 1;
-
- // variadic_parameter is an optional final parameter which accepts
- // zero or more argument values, in which Terraform will send an
- // ordered list of the parameter type.
- Parameter variadic_parameter = 2;
-
- // return is the function result.
- Return return = 3;
-
- // summary is the human-readable shortened documentation for the function.
- string summary = 4;
-
- // description is human-readable documentation for the function.
- string description = 5;
-
- // description_kind is the formatting of the description.
- StringKind description_kind = 6;
-
- // deprecation_message is human-readable documentation if the
- // function is deprecated.
- string deprecation_message = 7;
-
- message Parameter {
- // name is the human-readable display name for the parameter.
- string name = 1;
-
- // type is the type constraint for the parameter.
- bytes type = 2;
-
- // allow_null_value when enabled denotes that a null argument value can
- // be passed to the provider. When disabled, Terraform returns an error
- // if the argument value is null.
- bool allow_null_value = 3;
-
- // allow_unknown_values when enabled denotes that only wholly known
- // argument values will be passed to the provider. When disabled,
- // Terraform skips the function call entirely and assumes an unknown
- // value result from the function.
- bool allow_unknown_values = 4;
-
- // description is human-readable documentation for the parameter.
- string description = 5;
-
- // description_kind is the formatting of the description.
- StringKind description_kind = 6;
- }
-
- message Return {
- // type is the type constraint for the function result.
- bytes type = 1;
- }
-}
-
-// Deferred is a message that indicates that change is deferred for a reason.
-message Deferred {
- // Reason is the reason for deferring the change.
- enum Reason {
- // UNKNOWN is the default value, and should not be used.
- UNKNOWN = 0;
- // RESOURCE_CONFIG_UNKNOWN is used when the config is partially unknown and the real
- // values need to be known before the change can be planned.
- RESOURCE_CONFIG_UNKNOWN = 1;
- // PROVIDER_CONFIG_UNKNOWN is used when parts of the provider configuration
- // are unknown, e.g. the provider configuration is only known after the apply is done.
- PROVIDER_CONFIG_UNKNOWN = 2;
- // ABSENT_PREREQ is used when a hard dependency has not been satisfied.
- ABSENT_PREREQ = 3;
- }
- // reason is the reason for deferring the change.
- Reason reason = 1;
-}
-
-service Provider {
- //////// Information about what a provider supports/expects
-
- // GetMetadata returns upfront information about server capabilities and
- // supported resource types without requiring the server to instantiate all
- // schema information, which may be memory intensive. This RPC is optional,
- // where clients may receive an unimplemented RPC error. Clients should
- // ignore the error and call the GetSchema RPC as a fallback.
- rpc GetMetadata(GetMetadata.Request) returns (GetMetadata.Response);
-
- // GetSchema returns schema information for the provider, data resources,
- // and managed resources.
- rpc GetSchema(GetProviderSchema.Request) returns (GetProviderSchema.Response);
- rpc PrepareProviderConfig(PrepareProviderConfig.Request) returns (PrepareProviderConfig.Response);
- rpc ValidateResourceTypeConfig(ValidateResourceTypeConfig.Request) returns (ValidateResourceTypeConfig.Response);
- rpc ValidateDataSourceConfig(ValidateDataSourceConfig.Request) returns (ValidateDataSourceConfig.Response);
- rpc UpgradeResourceState(UpgradeResourceState.Request) returns (UpgradeResourceState.Response);
-
- //////// One-time initialization, called before other functions below
- rpc Configure(Configure.Request) returns (Configure.Response);
-
- //////// Managed Resource Lifecycle
- rpc ReadResource(ReadResource.Request) returns (ReadResource.Response);
- rpc PlanResourceChange(PlanResourceChange.Request) returns (PlanResourceChange.Response);
- rpc ApplyResourceChange(ApplyResourceChange.Request) returns (ApplyResourceChange.Response);
- rpc ImportResourceState(ImportResourceState.Request) returns (ImportResourceState.Response);
- rpc MoveResourceState(MoveResourceState.Request) returns (MoveResourceState.Response);
- rpc ReadDataSource(ReadDataSource.Request) returns (ReadDataSource.Response);
-
- // Functions
-
- // GetFunctions returns the definitions of all functions.
- rpc GetFunctions(GetFunctions.Request) returns (GetFunctions.Response);
-
- // CallFunction runs the provider-defined function logic and returns
- // the result with any diagnostics.
- rpc CallFunction(CallFunction.Request) returns (CallFunction.Response);
-
- //////// Graceful Shutdown
- rpc Stop(Stop.Request) returns (Stop.Response);
-}
-
-message GetMetadata {
- message Request {
- }
-
- message Response {
- ServerCapabilities server_capabilities = 1;
- repeated Diagnostic diagnostics = 2;
- repeated DataSourceMetadata data_sources = 3;
- repeated ResourceMetadata resources = 4;
-
- // functions returns metadata for any functions.
- repeated FunctionMetadata functions = 5;
- }
-
- message FunctionMetadata {
- // name is the function name.
- string name = 1;
- }
-
- message DataSourceMetadata {
- string type_name = 1;
- }
-
- message ResourceMetadata {
- string type_name = 1;
- }
-}
-
-message GetProviderSchema {
- message Request {
- }
- message Response {
- Schema provider = 1;
- map